1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCObjectFileInfo.h"
23 #include "llvm/MC/MCParser/MCAsmLexer.h"
24 #include "llvm/MC/MCParser/MCAsmParser.h"
25 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
26 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/SourceMgr.h"
33 #include "llvm/Support/TargetParser.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
58 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
59 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
60 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
61 int tryParseRegister();
62 int tryMatchVectorRegister(StringRef &Kind, bool expected);
63 bool parseRegister(OperandVector &Operands);
64 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
65 bool parseVectorList(OperandVector &Operands);
66 bool parseOperand(OperandVector &Operands, bool isCondCode,
69 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
70 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
71 bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 bool parseDirectiveArch(SMLoc L);
74 bool parseDirectiveCPU(SMLoc L);
75 bool parseDirectiveWord(unsigned Size, SMLoc L);
76 bool parseDirectiveInst(SMLoc L);
78 bool parseDirectiveTLSDescCall(SMLoc L);
80 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
81 bool parseDirectiveLtorg(SMLoc L);
83 bool parseDirectiveReq(StringRef Name, SMLoc L);
84 bool parseDirectiveUnreq(SMLoc L);
86 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
87 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
88 OperandVector &Operands, MCStreamer &Out,
90 bool MatchingInlineAsm) override;
91 /// @name Auto-generated Match Functions
94 #define GET_ASSEMBLER_HEADER
95 #include "AArch64GenAsmMatcher.inc"
99 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
100 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
101 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
102 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
103 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
104 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
105 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
106 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
107 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
108 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
109 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
110 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
111 bool tryParseVectorRegister(OperandVector &Operands);
112 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
115 enum AArch64MatchResultTy {
116 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
117 #define GET_OPERAND_DIAGNOSTIC_TYPES
118 #include "AArch64GenAsmMatcher.inc"
120 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
121 const MCInstrInfo &MII, const MCTargetOptions &Options)
122 : MCTargetAsmParser(Options, STI) {
123 MCAsmParserExtension::Initialize(Parser);
124 MCStreamer &S = getParser().getStreamer();
125 if (S.getTargetStreamer() == nullptr)
126 new AArch64TargetStreamer(S);
128 // Initialize the set of available features.
129 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
132 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
133 SMLoc NameLoc, OperandVector &Operands) override;
134 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
135 bool ParseDirective(AsmToken DirectiveID) override;
136 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
137 unsigned Kind) override;
139 static bool classifySymbolRef(const MCExpr *Expr,
140 AArch64MCExpr::VariantKind &ELFRefKind,
141 MCSymbolRefExpr::VariantKind &DarwinRefKind,
144 } // end anonymous namespace
148 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
150 class AArch64Operand : public MCParsedAsmOperand {
169 SMLoc StartLoc, EndLoc;
174 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
182 struct VectorListOp {
185 unsigned NumElements;
186 unsigned ElementKind;
189 struct VectorIndexOp {
197 struct ShiftedImmOp {
199 unsigned ShiftAmount;
203 AArch64CC::CondCode Code;
207 unsigned Val; // Encoded 8-bit representation.
211 unsigned Val; // Not the enum since not all values have names.
221 uint32_t PStateField;
240 struct ShiftExtendOp {
241 AArch64_AM::ShiftExtendType Type;
243 bool HasExplicitAmount;
253 struct VectorListOp VectorList;
254 struct VectorIndexOp VectorIndex;
256 struct ShiftedImmOp ShiftedImm;
257 struct CondCodeOp CondCode;
258 struct FPImmOp FPImm;
259 struct BarrierOp Barrier;
260 struct SysRegOp SysReg;
261 struct SysCRImmOp SysCRImm;
262 struct PrefetchOp Prefetch;
263 struct PSBHintOp PSBHint;
264 struct ShiftExtendOp ShiftExtend;
267 // Keep the MCContext around as the MCExprs may need manipulated during
268 // the add<>Operands() calls.
272 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
274 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
276 StartLoc = o.StartLoc;
286 ShiftedImm = o.ShiftedImm;
289 CondCode = o.CondCode;
301 VectorList = o.VectorList;
304 VectorIndex = o.VectorIndex;
310 SysCRImm = o.SysCRImm;
313 Prefetch = o.Prefetch;
319 ShiftExtend = o.ShiftExtend;
324 /// getStartLoc - Get the location of the first token of this operand.
325 SMLoc getStartLoc() const override { return StartLoc; }
326 /// getEndLoc - Get the location of the last token of this operand.
327 SMLoc getEndLoc() const override { return EndLoc; }
329 StringRef getToken() const {
330 assert(Kind == k_Token && "Invalid access!");
331 return StringRef(Tok.Data, Tok.Length);
334 bool isTokenSuffix() const {
335 assert(Kind == k_Token && "Invalid access!");
339 const MCExpr *getImm() const {
340 assert(Kind == k_Immediate && "Invalid access!");
344 const MCExpr *getShiftedImmVal() const {
345 assert(Kind == k_ShiftedImm && "Invalid access!");
346 return ShiftedImm.Val;
349 unsigned getShiftedImmShift() const {
350 assert(Kind == k_ShiftedImm && "Invalid access!");
351 return ShiftedImm.ShiftAmount;
354 AArch64CC::CondCode getCondCode() const {
355 assert(Kind == k_CondCode && "Invalid access!");
356 return CondCode.Code;
359 unsigned getFPImm() const {
360 assert(Kind == k_FPImm && "Invalid access!");
364 unsigned getBarrier() const {
365 assert(Kind == k_Barrier && "Invalid access!");
369 StringRef getBarrierName() const {
370 assert(Kind == k_Barrier && "Invalid access!");
371 return StringRef(Barrier.Data, Barrier.Length);
374 unsigned getReg() const override {
375 assert(Kind == k_Register && "Invalid access!");
379 unsigned getVectorListStart() const {
380 assert(Kind == k_VectorList && "Invalid access!");
381 return VectorList.RegNum;
384 unsigned getVectorListCount() const {
385 assert(Kind == k_VectorList && "Invalid access!");
386 return VectorList.Count;
389 unsigned getVectorIndex() const {
390 assert(Kind == k_VectorIndex && "Invalid access!");
391 return VectorIndex.Val;
394 StringRef getSysReg() const {
395 assert(Kind == k_SysReg && "Invalid access!");
396 return StringRef(SysReg.Data, SysReg.Length);
399 unsigned getSysCR() const {
400 assert(Kind == k_SysCR && "Invalid access!");
404 unsigned getPrefetch() const {
405 assert(Kind == k_Prefetch && "Invalid access!");
409 unsigned getPSBHint() const {
410 assert(Kind == k_PSBHint && "Invalid access!");
414 StringRef getPSBHintName() const {
415 assert(Kind == k_PSBHint && "Invalid access!");
416 return StringRef(PSBHint.Data, PSBHint.Length);
419 StringRef getPrefetchName() const {
420 assert(Kind == k_Prefetch && "Invalid access!");
421 return StringRef(Prefetch.Data, Prefetch.Length);
424 AArch64_AM::ShiftExtendType getShiftExtendType() const {
425 assert(Kind == k_ShiftExtend && "Invalid access!");
426 return ShiftExtend.Type;
429 unsigned getShiftExtendAmount() const {
430 assert(Kind == k_ShiftExtend && "Invalid access!");
431 return ShiftExtend.Amount;
434 bool hasShiftExtendAmount() const {
435 assert(Kind == k_ShiftExtend && "Invalid access!");
436 return ShiftExtend.HasExplicitAmount;
439 bool isImm() const override { return Kind == k_Immediate; }
440 bool isMem() const override { return false; }
441 bool isSImm9() const {
444 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
447 int64_t Val = MCE->getValue();
448 return (Val >= -256 && Val < 256);
450 bool isSImm7s4() const {
453 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
456 int64_t Val = MCE->getValue();
457 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
459 bool isSImm7s8() const {
462 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
465 int64_t Val = MCE->getValue();
466 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
468 bool isSImm7s16() const {
471 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
474 int64_t Val = MCE->getValue();
475 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
478 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
479 AArch64MCExpr::VariantKind ELFRefKind;
480 MCSymbolRefExpr::VariantKind DarwinRefKind;
482 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
484 // If we don't understand the expression, assume the best and
485 // let the fixup and relocation code deal with it.
489 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
490 ELFRefKind == AArch64MCExpr::VK_LO12 ||
491 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
492 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
493 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
494 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
495 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
496 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
497 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
498 // Note that we don't range-check the addend. It's adjusted modulo page
499 // size when converted, so there is no "out of range" condition when using
501 return Addend >= 0 && (Addend % Scale) == 0;
502 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
503 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
504 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
511 template <int Scale> bool isUImm12Offset() const {
515 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
517 return isSymbolicUImm12Offset(getImm(), Scale);
519 int64_t Val = MCE->getValue();
520 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
523 bool isImm0_1() const {
526 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
529 int64_t Val = MCE->getValue();
530 return (Val >= 0 && Val < 2);
532 bool isImm0_7() const {
535 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
538 int64_t Val = MCE->getValue();
539 return (Val >= 0 && Val < 8);
541 bool isImm1_8() const {
544 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
547 int64_t Val = MCE->getValue();
548 return (Val > 0 && Val < 9);
550 bool isImm0_15() const {
553 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
556 int64_t Val = MCE->getValue();
557 return (Val >= 0 && Val < 16);
559 bool isImm1_16() const {
562 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
565 int64_t Val = MCE->getValue();
566 return (Val > 0 && Val < 17);
568 bool isImm0_31() const {
571 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
574 int64_t Val = MCE->getValue();
575 return (Val >= 0 && Val < 32);
577 bool isImm1_31() const {
580 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
583 int64_t Val = MCE->getValue();
584 return (Val >= 1 && Val < 32);
586 bool isImm1_32() const {
589 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
592 int64_t Val = MCE->getValue();
593 return (Val >= 1 && Val < 33);
595 bool isImm0_63() const {
598 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
601 int64_t Val = MCE->getValue();
602 return (Val >= 0 && Val < 64);
604 bool isImm1_63() const {
607 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
610 int64_t Val = MCE->getValue();
611 return (Val >= 1 && Val < 64);
613 bool isImm1_64() const {
616 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
619 int64_t Val = MCE->getValue();
620 return (Val >= 1 && Val < 65);
622 bool isImm0_127() const {
625 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
628 int64_t Val = MCE->getValue();
629 return (Val >= 0 && Val < 128);
631 bool isImm0_255() const {
634 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
637 int64_t Val = MCE->getValue();
638 return (Val >= 0 && Val < 256);
640 bool isImm0_65535() const {
643 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
646 int64_t Val = MCE->getValue();
647 return (Val >= 0 && Val < 65536);
649 bool isImm32_63() const {
652 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
655 int64_t Val = MCE->getValue();
656 return (Val >= 32 && Val < 64);
658 bool isLogicalImm32() const {
661 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
664 int64_t Val = MCE->getValue();
665 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
668 return AArch64_AM::isLogicalImmediate(Val, 32);
670 bool isLogicalImm64() const {
673 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
676 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
678 bool isLogicalImm32Not() const {
681 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
684 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
685 return AArch64_AM::isLogicalImmediate(Val, 32);
687 bool isLogicalImm64Not() const {
690 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
693 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
695 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
696 bool isAddSubImm() const {
697 if (!isShiftedImm() && !isImm())
702 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
703 if (isShiftedImm()) {
704 unsigned Shift = ShiftedImm.ShiftAmount;
705 Expr = ShiftedImm.Val;
706 if (Shift != 0 && Shift != 12)
712 AArch64MCExpr::VariantKind ELFRefKind;
713 MCSymbolRefExpr::VariantKind DarwinRefKind;
715 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
716 DarwinRefKind, Addend)) {
717 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
718 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
719 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
720 || ELFRefKind == AArch64MCExpr::VK_LO12
721 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
722 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
723 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
724 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
725 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
726 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
727 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
730 // Otherwise it should be a real immediate in range:
731 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
732 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
734 bool isAddSubImmNeg() const {
735 if (!isShiftedImm() && !isImm())
740 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
741 if (isShiftedImm()) {
742 unsigned Shift = ShiftedImm.ShiftAmount;
743 Expr = ShiftedImm.Val;
744 if (Shift != 0 && Shift != 12)
749 // Otherwise it should be a real negative immediate in range:
750 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
751 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
753 bool isCondCode() const { return Kind == k_CondCode; }
754 bool isSIMDImmType10() const {
757 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
760 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
762 bool isBranchTarget26() const {
765 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
768 int64_t Val = MCE->getValue();
771 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
773 bool isPCRelLabel19() const {
776 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
779 int64_t Val = MCE->getValue();
782 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
784 bool isBranchTarget14() const {
787 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
790 int64_t Val = MCE->getValue();
793 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
797 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
801 AArch64MCExpr::VariantKind ELFRefKind;
802 MCSymbolRefExpr::VariantKind DarwinRefKind;
804 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
805 DarwinRefKind, Addend)) {
808 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
811 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
812 if (ELFRefKind == AllowedModifiers[i])
819 bool isMovZSymbolG3() const {
820 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
823 bool isMovZSymbolG2() const {
824 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
825 AArch64MCExpr::VK_TPREL_G2,
826 AArch64MCExpr::VK_DTPREL_G2});
829 bool isMovZSymbolG1() const {
830 return isMovWSymbol({
831 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
832 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
833 AArch64MCExpr::VK_DTPREL_G1,
837 bool isMovZSymbolG0() const {
838 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
839 AArch64MCExpr::VK_TPREL_G0,
840 AArch64MCExpr::VK_DTPREL_G0});
843 bool isMovKSymbolG3() const {
844 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
847 bool isMovKSymbolG2() const {
848 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
851 bool isMovKSymbolG1() const {
852 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
853 AArch64MCExpr::VK_TPREL_G1_NC,
854 AArch64MCExpr::VK_DTPREL_G1_NC});
857 bool isMovKSymbolG0() const {
859 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
860 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
863 template<int RegWidth, int Shift>
864 bool isMOVZMovAlias() const {
865 if (!isImm()) return false;
867 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
868 if (!CE) return false;
869 uint64_t Value = CE->getValue();
871 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
874 template<int RegWidth, int Shift>
875 bool isMOVNMovAlias() const {
876 if (!isImm()) return false;
878 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
879 if (!CE) return false;
880 uint64_t Value = CE->getValue();
882 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
885 bool isFPImm() const { return Kind == k_FPImm; }
886 bool isBarrier() const { return Kind == k_Barrier; }
887 bool isSysReg() const { return Kind == k_SysReg; }
888 bool isMRSSystemRegister() const {
889 if (!isSysReg()) return false;
891 return SysReg.MRSReg != -1U;
893 bool isMSRSystemRegister() const {
894 if (!isSysReg()) return false;
895 return SysReg.MSRReg != -1U;
897 bool isSystemPStateFieldWithImm0_1() const {
898 if (!isSysReg()) return false;
899 return (SysReg.PStateField == AArch64PState::PAN ||
900 SysReg.PStateField == AArch64PState::UAO);
902 bool isSystemPStateFieldWithImm0_15() const {
903 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
904 return SysReg.PStateField != -1U;
906 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
907 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
908 bool isVectorRegLo() const {
909 return Kind == k_Register && Reg.isVector &&
910 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
913 bool isGPR32as64() const {
914 return Kind == k_Register && !Reg.isVector &&
915 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
917 bool isWSeqPair() const {
918 return Kind == k_Register && !Reg.isVector &&
919 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
922 bool isXSeqPair() const {
923 return Kind == k_Register && !Reg.isVector &&
924 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
928 bool isGPR64sp0() const {
929 return Kind == k_Register && !Reg.isVector &&
930 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
933 /// Is this a vector list with the type implicit (presumably attached to the
934 /// instruction itself)?
935 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
936 return Kind == k_VectorList && VectorList.Count == NumRegs &&
937 !VectorList.ElementKind;
940 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
941 bool isTypedVectorList() const {
942 if (Kind != k_VectorList)
944 if (VectorList.Count != NumRegs)
946 if (VectorList.ElementKind != ElementKind)
948 return VectorList.NumElements == NumElements;
951 bool isVectorIndex1() const {
952 return Kind == k_VectorIndex && VectorIndex.Val == 1;
954 bool isVectorIndexB() const {
955 return Kind == k_VectorIndex && VectorIndex.Val < 16;
957 bool isVectorIndexH() const {
958 return Kind == k_VectorIndex && VectorIndex.Val < 8;
960 bool isVectorIndexS() const {
961 return Kind == k_VectorIndex && VectorIndex.Val < 4;
963 bool isVectorIndexD() const {
964 return Kind == k_VectorIndex && VectorIndex.Val < 2;
966 bool isToken() const override { return Kind == k_Token; }
967 bool isTokenEqual(StringRef Str) const {
968 return Kind == k_Token && getToken() == Str;
970 bool isSysCR() const { return Kind == k_SysCR; }
971 bool isPrefetch() const { return Kind == k_Prefetch; }
972 bool isPSBHint() const { return Kind == k_PSBHint; }
973 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
974 bool isShifter() const {
975 if (!isShiftExtend())
978 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
979 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
980 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
981 ST == AArch64_AM::MSL);
983 bool isExtend() const {
984 if (!isShiftExtend())
987 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
988 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
989 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
990 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
991 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
992 ET == AArch64_AM::LSL) &&
993 getShiftExtendAmount() <= 4;
996 bool isExtend64() const {
999 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1000 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1001 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1003 bool isExtendLSL64() const {
1006 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1007 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1008 ET == AArch64_AM::LSL) &&
1009 getShiftExtendAmount() <= 4;
1012 template<int Width> bool isMemXExtend() const {
1015 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1016 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1017 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1018 getShiftExtendAmount() == 0);
1021 template<int Width> bool isMemWExtend() const {
1024 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1025 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1026 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1027 getShiftExtendAmount() == 0);
1030 template <unsigned width>
1031 bool isArithmeticShifter() const {
1035 // An arithmetic shifter is LSL, LSR, or ASR.
1036 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1037 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1038 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1041 template <unsigned width>
1042 bool isLogicalShifter() const {
1046 // A logical shifter is LSL, LSR, ASR or ROR.
1047 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1048 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1049 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1050 getShiftExtendAmount() < width;
1053 bool isMovImm32Shifter() const {
1057 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1058 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1059 if (ST != AArch64_AM::LSL)
1061 uint64_t Val = getShiftExtendAmount();
1062 return (Val == 0 || Val == 16);
1065 bool isMovImm64Shifter() const {
1069 // A MOVi shifter is LSL of 0 or 16.
1070 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1071 if (ST != AArch64_AM::LSL)
1073 uint64_t Val = getShiftExtendAmount();
1074 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1077 bool isLogicalVecShifter() const {
1081 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1082 unsigned Shift = getShiftExtendAmount();
1083 return getShiftExtendType() == AArch64_AM::LSL &&
1084 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1087 bool isLogicalVecHalfWordShifter() const {
1088 if (!isLogicalVecShifter())
1091 // A logical vector shifter is a left shift by 0 or 8.
1092 unsigned Shift = getShiftExtendAmount();
1093 return getShiftExtendType() == AArch64_AM::LSL &&
1094 (Shift == 0 || Shift == 8);
1097 bool isMoveVecShifter() const {
1098 if (!isShiftExtend())
1101 // A logical vector shifter is a left shift by 8 or 16.
1102 unsigned Shift = getShiftExtendAmount();
1103 return getShiftExtendType() == AArch64_AM::MSL &&
1104 (Shift == 8 || Shift == 16);
1107 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1108 // to LDUR/STUR when the offset is not legal for the former but is for
1109 // the latter. As such, in addition to checking for being a legal unscaled
1110 // address, also check that it is not a legal scaled address. This avoids
1111 // ambiguity in the matcher.
1113 bool isSImm9OffsetFB() const {
1114 return isSImm9() && !isUImm12Offset<Width / 8>();
1117 bool isAdrpLabel() const {
1118 // Validation was handled during parsing, so we just sanity check that
1119 // something didn't go haywire.
1123 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1124 int64_t Val = CE->getValue();
1125 int64_t Min = - (4096 * (1LL << (21 - 1)));
1126 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1127 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1133 bool isAdrLabel() const {
1134 // Validation was handled during parsing, so we just sanity check that
1135 // something didn't go haywire.
1139 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1140 int64_t Val = CE->getValue();
1141 int64_t Min = - (1LL << (21 - 1));
1142 int64_t Max = ((1LL << (21 - 1)) - 1);
1143 return Val >= Min && Val <= Max;
1149 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1150 // Add as immediates when possible. Null MCExpr = 0.
1152 Inst.addOperand(MCOperand::createImm(0));
1153 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1154 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1156 Inst.addOperand(MCOperand::createExpr(Expr));
1159 void addRegOperands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 Inst.addOperand(MCOperand::createReg(getReg()));
1164 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1165 assert(N == 1 && "Invalid number of operands!");
1167 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1169 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1170 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1171 RI->getEncodingValue(getReg()));
1173 Inst.addOperand(MCOperand::createReg(Reg));
1176 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1177 assert(N == 1 && "Invalid number of operands!");
1179 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1180 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1183 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1184 assert(N == 1 && "Invalid number of operands!");
1186 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1187 Inst.addOperand(MCOperand::createReg(getReg()));
1190 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1191 assert(N == 1 && "Invalid number of operands!");
1192 Inst.addOperand(MCOperand::createReg(getReg()));
1195 template <unsigned NumRegs>
1196 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1198 static const unsigned FirstRegs[] = { AArch64::D0,
1201 AArch64::D0_D1_D2_D3 };
1202 unsigned FirstReg = FirstRegs[NumRegs - 1];
1205 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1208 template <unsigned NumRegs>
1209 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1210 assert(N == 1 && "Invalid number of operands!");
1211 static const unsigned FirstRegs[] = { AArch64::Q0,
1214 AArch64::Q0_Q1_Q2_Q3 };
1215 unsigned FirstReg = FirstRegs[NumRegs - 1];
1218 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1221 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1222 assert(N == 1 && "Invalid number of operands!");
1223 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1226 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1227 assert(N == 1 && "Invalid number of operands!");
1228 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1231 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1232 assert(N == 1 && "Invalid number of operands!");
1233 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1236 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1237 assert(N == 1 && "Invalid number of operands!");
1238 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1241 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1242 assert(N == 1 && "Invalid number of operands!");
1243 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1246 void addImmOperands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 // If this is a pageoff symrefexpr with an addend, adjust the addend
1249 // to be only the page-offset portion. Otherwise, just add the expr
1251 addExpr(Inst, getImm());
1254 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1255 assert(N == 2 && "Invalid number of operands!");
1256 if (isShiftedImm()) {
1257 addExpr(Inst, getShiftedImmVal());
1258 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1260 addExpr(Inst, getImm());
1261 Inst.addOperand(MCOperand::createImm(0));
1265 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1266 assert(N == 2 && "Invalid number of operands!");
1268 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1269 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1270 int64_t Val = -CE->getValue();
1271 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1273 Inst.addOperand(MCOperand::createImm(Val));
1274 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1277 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1278 assert(N == 1 && "Invalid number of operands!");
1279 Inst.addOperand(MCOperand::createImm(getCondCode()));
1282 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1286 addExpr(Inst, getImm());
1288 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1291 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1292 addImmOperands(Inst, N);
1296 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1297 assert(N == 1 && "Invalid number of operands!");
1298 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1301 Inst.addOperand(MCOperand::createExpr(getImm()));
1304 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1307 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1313 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1319 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1325 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1331 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1337 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1340 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1343 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1346 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1349 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1352 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1355 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1356 assert(N == 1 && "Invalid number of operands!");
1357 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1358 assert(MCE && "Invalid constant immediate operand!");
1359 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1362 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1363 assert(N == 1 && "Invalid number of operands!");
1364 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1365 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1368 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1369 assert(N == 1 && "Invalid number of operands!");
1370 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1371 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1374 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1375 assert(N == 1 && "Invalid number of operands!");
1376 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1377 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1380 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1383 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1386 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1387 assert(N == 1 && "Invalid number of operands!");
1388 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1389 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1392 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1393 assert(N == 1 && "Invalid number of operands!");
1394 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1395 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1398 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1399 assert(N == 1 && "Invalid number of operands!");
1400 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1401 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1404 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1405 assert(N == 1 && "Invalid number of operands!");
1406 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1407 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1410 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1411 assert(N == 1 && "Invalid number of operands!");
1412 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1413 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1416 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1417 assert(N == 1 && "Invalid number of operands!");
1418 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1419 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1422 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1423 assert(N == 1 && "Invalid number of operands!");
1424 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1426 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1427 Inst.addOperand(MCOperand::createImm(encoding));
1430 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1431 assert(N == 1 && "Invalid number of operands!");
1432 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1433 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1434 Inst.addOperand(MCOperand::createImm(encoding));
1437 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1438 assert(N == 1 && "Invalid number of operands!");
1439 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1440 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1441 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1442 Inst.addOperand(MCOperand::createImm(encoding));
1445 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1446 assert(N == 1 && "Invalid number of operands!");
1447 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1449 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1450 Inst.addOperand(MCOperand::createImm(encoding));
1453 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1454 assert(N == 1 && "Invalid number of operands!");
1455 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1456 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1457 Inst.addOperand(MCOperand::createImm(encoding));
1460 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1461 // Branch operands don't encode the low bits, so shift them off
1462 // here. If it's a label, however, just put it on directly as there's
1463 // not enough information now to do anything.
1464 assert(N == 1 && "Invalid number of operands!");
1465 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1467 addExpr(Inst, getImm());
1470 assert(MCE && "Invalid constant immediate operand!");
1471 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1474 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1475 // Branch operands don't encode the low bits, so shift them off
1476 // here. If it's a label, however, just put it on directly as there's
1477 // not enough information now to do anything.
1478 assert(N == 1 && "Invalid number of operands!");
1479 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1481 addExpr(Inst, getImm());
1484 assert(MCE && "Invalid constant immediate operand!");
1485 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1488 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1489 // Branch operands don't encode the low bits, so shift them off
1490 // here. If it's a label, however, just put it on directly as there's
1491 // not enough information now to do anything.
1492 assert(N == 1 && "Invalid number of operands!");
1493 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1495 addExpr(Inst, getImm());
1498 assert(MCE && "Invalid constant immediate operand!");
1499 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1502 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1503 assert(N == 1 && "Invalid number of operands!");
1504 Inst.addOperand(MCOperand::createImm(getFPImm()));
1507 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1508 assert(N == 1 && "Invalid number of operands!");
1509 Inst.addOperand(MCOperand::createImm(getBarrier()));
1512 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1513 assert(N == 1 && "Invalid number of operands!");
1515 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1518 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1519 assert(N == 1 && "Invalid number of operands!");
1521 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1524 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1525 assert(N == 1 && "Invalid number of operands!");
1527 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1530 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1531 assert(N == 1 && "Invalid number of operands!");
1533 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1536 void addSysCROperands(MCInst &Inst, unsigned N) const {
1537 assert(N == 1 && "Invalid number of operands!");
1538 Inst.addOperand(MCOperand::createImm(getSysCR()));
1541 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1542 assert(N == 1 && "Invalid number of operands!");
1543 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1546 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1547 assert(N == 1 && "Invalid number of operands!");
1548 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1551 void addShifterOperands(MCInst &Inst, unsigned N) const {
1552 assert(N == 1 && "Invalid number of operands!");
1554 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1555 Inst.addOperand(MCOperand::createImm(Imm));
1558 void addExtendOperands(MCInst &Inst, unsigned N) const {
1559 assert(N == 1 && "Invalid number of operands!");
1560 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1561 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1562 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1563 Inst.addOperand(MCOperand::createImm(Imm));
1566 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1567 assert(N == 1 && "Invalid number of operands!");
1568 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1569 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1570 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1571 Inst.addOperand(MCOperand::createImm(Imm));
1574 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1575 assert(N == 2 && "Invalid number of operands!");
1576 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1577 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1578 Inst.addOperand(MCOperand::createImm(IsSigned));
1579 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1582 // For 8-bit load/store instructions with a register offset, both the
1583 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1584 // they're disambiguated by whether the shift was explicit or implicit rather
1586 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1587 assert(N == 2 && "Invalid number of operands!");
1588 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1589 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1590 Inst.addOperand(MCOperand::createImm(IsSigned));
1591 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1595 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1596 assert(N == 1 && "Invalid number of operands!");
1598 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1599 uint64_t Value = CE->getValue();
1600 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1604 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1605 assert(N == 1 && "Invalid number of operands!");
1607 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1608 uint64_t Value = CE->getValue();
1609 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1612 void print(raw_ostream &OS) const override;
1614 static std::unique_ptr<AArch64Operand>
1615 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1616 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1617 Op->Tok.Data = Str.data();
1618 Op->Tok.Length = Str.size();
1619 Op->Tok.IsSuffix = IsSuffix;
1625 static std::unique_ptr<AArch64Operand>
1626 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1627 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1628 Op->Reg.RegNum = RegNum;
1629 Op->Reg.isVector = isVector;
1635 static std::unique_ptr<AArch64Operand>
1636 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1637 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1638 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1639 Op->VectorList.RegNum = RegNum;
1640 Op->VectorList.Count = Count;
1641 Op->VectorList.NumElements = NumElements;
1642 Op->VectorList.ElementKind = ElementKind;
1648 static std::unique_ptr<AArch64Operand>
1649 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1650 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1651 Op->VectorIndex.Val = Idx;
1657 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1658 SMLoc E, MCContext &Ctx) {
1659 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1666 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1667 unsigned ShiftAmount,
1670 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1671 Op->ShiftedImm .Val = Val;
1672 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1678 static std::unique_ptr<AArch64Operand>
1679 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1680 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1681 Op->CondCode.Code = Code;
1687 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1689 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1690 Op->FPImm.Val = Val;
1696 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1700 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1701 Op->Barrier.Val = Val;
1702 Op->Barrier.Data = Str.data();
1703 Op->Barrier.Length = Str.size();
1709 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1712 uint32_t PStateField,
1714 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1715 Op->SysReg.Data = Str.data();
1716 Op->SysReg.Length = Str.size();
1717 Op->SysReg.MRSReg = MRSReg;
1718 Op->SysReg.MSRReg = MSRReg;
1719 Op->SysReg.PStateField = PStateField;
1725 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1726 SMLoc E, MCContext &Ctx) {
1727 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1728 Op->SysCRImm.Val = Val;
1734 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1738 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1739 Op->Prefetch.Val = Val;
1740 Op->Barrier.Data = Str.data();
1741 Op->Barrier.Length = Str.size();
1747 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1751 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1752 Op->PSBHint.Val = Val;
1753 Op->PSBHint.Data = Str.data();
1754 Op->PSBHint.Length = Str.size();
1760 static std::unique_ptr<AArch64Operand>
1761 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1762 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1763 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1764 Op->ShiftExtend.Type = ShOp;
1765 Op->ShiftExtend.Amount = Val;
1766 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1773 } // end anonymous namespace.
1775 void AArch64Operand::print(raw_ostream &OS) const {
1778 OS << "<fpimm " << getFPImm() << "("
1779 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1782 StringRef Name = getBarrierName();
1784 OS << "<barrier " << Name << ">";
1786 OS << "<barrier invalid #" << getBarrier() << ">";
1792 case k_ShiftedImm: {
1793 unsigned Shift = getShiftedImmShift();
1794 OS << "<shiftedimm ";
1795 OS << *getShiftedImmVal();
1796 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1800 OS << "<condcode " << getCondCode() << ">";
1803 OS << "<register " << getReg() << ">";
1805 case k_VectorList: {
1806 OS << "<vectorlist ";
1807 unsigned Reg = getVectorListStart();
1808 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1809 OS << Reg + i << " ";
1814 OS << "<vectorindex " << getVectorIndex() << ">";
1817 OS << "<sysreg: " << getSysReg() << '>';
1820 OS << "'" << getToken() << "'";
1823 OS << "c" << getSysCR();
1826 StringRef Name = getPrefetchName();
1828 OS << "<prfop " << Name << ">";
1830 OS << "<prfop invalid #" << getPrefetch() << ">";
1834 OS << getPSBHintName();
1837 case k_ShiftExtend: {
1838 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1839 << getShiftExtendAmount();
1840 if (!hasShiftExtendAmount())
1848 /// @name Auto-generated Match Functions
1851 static unsigned MatchRegisterName(StringRef Name);
1855 static unsigned matchVectorRegName(StringRef Name) {
1856 return StringSwitch<unsigned>(Name.lower())
1857 .Case("v0", AArch64::Q0)
1858 .Case("v1", AArch64::Q1)
1859 .Case("v2", AArch64::Q2)
1860 .Case("v3", AArch64::Q3)
1861 .Case("v4", AArch64::Q4)
1862 .Case("v5", AArch64::Q5)
1863 .Case("v6", AArch64::Q6)
1864 .Case("v7", AArch64::Q7)
1865 .Case("v8", AArch64::Q8)
1866 .Case("v9", AArch64::Q9)
1867 .Case("v10", AArch64::Q10)
1868 .Case("v11", AArch64::Q11)
1869 .Case("v12", AArch64::Q12)
1870 .Case("v13", AArch64::Q13)
1871 .Case("v14", AArch64::Q14)
1872 .Case("v15", AArch64::Q15)
1873 .Case("v16", AArch64::Q16)
1874 .Case("v17", AArch64::Q17)
1875 .Case("v18", AArch64::Q18)
1876 .Case("v19", AArch64::Q19)
1877 .Case("v20", AArch64::Q20)
1878 .Case("v21", AArch64::Q21)
1879 .Case("v22", AArch64::Q22)
1880 .Case("v23", AArch64::Q23)
1881 .Case("v24", AArch64::Q24)
1882 .Case("v25", AArch64::Q25)
1883 .Case("v26", AArch64::Q26)
1884 .Case("v27", AArch64::Q27)
1885 .Case("v28", AArch64::Q28)
1886 .Case("v29", AArch64::Q29)
1887 .Case("v30", AArch64::Q30)
1888 .Case("v31", AArch64::Q31)
1892 static bool isValidVectorKind(StringRef Name) {
1893 return StringSwitch<bool>(Name.lower())
1903 // Accept the width neutral ones, too, for verbose syntax. If those
1904 // aren't used in the right places, the token operand won't match so
1905 // all will work out.
1910 // Needed for fp16 scalar pairwise reductions
1915 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1916 char &ElementKind) {
1917 assert(isValidVectorKind(Name));
1919 ElementKind = Name.lower()[Name.size() - 1];
1922 if (Name.size() == 2)
1925 // Parse the lane count
1926 Name = Name.drop_front();
1927 while (isdigit(Name.front())) {
1928 NumElements = 10 * NumElements + (Name.front() - '0');
1929 Name = Name.drop_front();
1933 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1935 StartLoc = getLoc();
1936 RegNo = tryParseRegister();
1937 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1938 return (RegNo == (unsigned)-1);
1941 // Matches a register name or register alias previously defined by '.req'
1942 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1944 unsigned RegNum = isVector ? matchVectorRegName(Name)
1945 : MatchRegisterName(Name);
1948 // Check for aliases registered via .req. Canonicalize to lower case.
1949 // That's more consistent since register names are case insensitive, and
1950 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1951 auto Entry = RegisterReqs.find(Name.lower());
1952 if (Entry == RegisterReqs.end())
1954 // set RegNum if the match is the right kind of register
1955 if (isVector == Entry->getValue().first)
1956 RegNum = Entry->getValue().second;
1961 /// tryParseRegister - Try to parse a register name. The token must be an
1962 /// Identifier when called, and if it is a register name the token is eaten and
1963 /// the register is added to the operand list.
1964 int AArch64AsmParser::tryParseRegister() {
1965 MCAsmParser &Parser = getParser();
1966 const AsmToken &Tok = Parser.getTok();
1967 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1969 std::string lowerCase = Tok.getString().lower();
1970 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1971 // Also handle a few aliases of registers.
1973 RegNum = StringSwitch<unsigned>(lowerCase)
1974 .Case("fp", AArch64::FP)
1975 .Case("lr", AArch64::LR)
1976 .Case("x31", AArch64::XZR)
1977 .Case("w31", AArch64::WZR)
1983 Parser.Lex(); // Eat identifier token.
1987 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1988 /// kind specifier. If it is a register specifier, eat the token and return it.
1989 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1990 MCAsmParser &Parser = getParser();
1991 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1992 TokError("vector register expected");
1996 StringRef Name = Parser.getTok().getString();
1997 // If there is a kind specifier, it's separated from the register name by
1999 size_t Start = 0, Next = Name.find('.');
2000 StringRef Head = Name.slice(Start, Next);
2001 unsigned RegNum = matchRegisterNameAlias(Head, true);
2004 if (Next != StringRef::npos) {
2005 Kind = Name.slice(Next, StringRef::npos);
2006 if (!isValidVectorKind(Kind)) {
2007 TokError("invalid vector kind qualifier");
2011 Parser.Lex(); // Eat the register token.
2016 TokError("vector register expected");
2020 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2021 AArch64AsmParser::OperandMatchResultTy
2022 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2023 MCAsmParser &Parser = getParser();
2026 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2027 Error(S, "Expected cN operand where 0 <= N <= 15");
2028 return MatchOperand_ParseFail;
2031 StringRef Tok = Parser.getTok().getIdentifier();
2032 if (Tok[0] != 'c' && Tok[0] != 'C') {
2033 Error(S, "Expected cN operand where 0 <= N <= 15");
2034 return MatchOperand_ParseFail;
2038 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2039 if (BadNum || CRNum > 15) {
2040 Error(S, "Expected cN operand where 0 <= N <= 15");
2041 return MatchOperand_ParseFail;
2044 Parser.Lex(); // Eat identifier token.
2046 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2047 return MatchOperand_Success;
2050 /// tryParsePrefetch - Try to parse a prefetch operand.
2051 AArch64AsmParser::OperandMatchResultTy
2052 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2053 MCAsmParser &Parser = getParser();
2055 const AsmToken &Tok = Parser.getTok();
2056 // Either an identifier for named values or a 5-bit immediate.
2057 bool Hash = Tok.is(AsmToken::Hash);
2058 if (Hash || Tok.is(AsmToken::Integer)) {
2060 Parser.Lex(); // Eat hash token.
2061 const MCExpr *ImmVal;
2062 if (getParser().parseExpression(ImmVal))
2063 return MatchOperand_ParseFail;
2065 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2067 TokError("immediate value expected for prefetch operand");
2068 return MatchOperand_ParseFail;
2070 unsigned prfop = MCE->getValue();
2072 TokError("prefetch operand out of range, [0,31] expected");
2073 return MatchOperand_ParseFail;
2076 auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
2077 Operands.push_back(AArch64Operand::CreatePrefetch(
2078 prfop, PRFM ? PRFM->Name : "", S, getContext()));
2079 return MatchOperand_Success;
2082 if (Tok.isNot(AsmToken::Identifier)) {
2083 TokError("pre-fetch hint expected");
2084 return MatchOperand_ParseFail;
2087 auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
2089 TokError("pre-fetch hint expected");
2090 return MatchOperand_ParseFail;
2093 Parser.Lex(); // Eat identifier token.
2094 Operands.push_back(AArch64Operand::CreatePrefetch(
2095 PRFM->Encoding, Tok.getString(), S, getContext()));
2096 return MatchOperand_Success;
2099 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2100 AArch64AsmParser::OperandMatchResultTy
2101 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2102 MCAsmParser &Parser = getParser();
2104 const AsmToken &Tok = Parser.getTok();
2105 if (Tok.isNot(AsmToken::Identifier)) {
2106 TokError("invalid operand for instruction");
2107 return MatchOperand_ParseFail;
2110 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2112 TokError("invalid operand for instruction");
2113 return MatchOperand_ParseFail;
2116 Parser.Lex(); // Eat identifier token.
2117 Operands.push_back(AArch64Operand::CreatePSBHint(
2118 PSB->Encoding, Tok.getString(), S, getContext()));
2119 return MatchOperand_Success;
2122 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2124 AArch64AsmParser::OperandMatchResultTy
2125 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2126 MCAsmParser &Parser = getParser();
2130 if (Parser.getTok().is(AsmToken::Hash)) {
2131 Parser.Lex(); // Eat hash token.
2134 if (parseSymbolicImmVal(Expr))
2135 return MatchOperand_ParseFail;
2137 AArch64MCExpr::VariantKind ELFRefKind;
2138 MCSymbolRefExpr::VariantKind DarwinRefKind;
2140 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2141 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2142 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2143 // No modifier was specified at all; this is the syntax for an ELF basic
2144 // ADRP relocation (unfortunately).
2146 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2147 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2148 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2150 Error(S, "gotpage label reference not allowed an addend");
2151 return MatchOperand_ParseFail;
2152 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2153 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2154 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2155 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2156 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2157 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2158 // The operand must be an @page or @gotpage qualified symbolref.
2159 Error(S, "page or gotpage label reference expected");
2160 return MatchOperand_ParseFail;
2164 // We have either a label reference possibly with addend or an immediate. The
2165 // addend is a raw value here. The linker will adjust it to only reference the
2167 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2168 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2170 return MatchOperand_Success;
2173 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2175 AArch64AsmParser::OperandMatchResultTy
2176 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2177 MCAsmParser &Parser = getParser();
2181 if (Parser.getTok().is(AsmToken::Hash)) {
2182 Parser.Lex(); // Eat hash token.
2185 if (getParser().parseExpression(Expr))
2186 return MatchOperand_ParseFail;
2188 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2189 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2191 return MatchOperand_Success;
2194 /// tryParseFPImm - A floating point immediate expression operand.
2195 AArch64AsmParser::OperandMatchResultTy
2196 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2197 MCAsmParser &Parser = getParser();
2201 if (Parser.getTok().is(AsmToken::Hash)) {
2202 Parser.Lex(); // Eat '#'
2206 // Handle negation, as that still comes through as a separate token.
2207 bool isNegative = false;
2208 if (Parser.getTok().is(AsmToken::Minus)) {
2212 const AsmToken &Tok = Parser.getTok();
2213 if (Tok.is(AsmToken::Real)) {
2214 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2216 RealVal.changeSign();
2218 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2219 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2220 Parser.Lex(); // Eat the token.
2221 // Check for out of range values. As an exception, we let Zero through,
2222 // as we handle that special case in post-processing before matching in
2223 // order to use the zero register for it.
2224 if (Val == -1 && !RealVal.isPosZero()) {
2225 TokError("expected compatible register or floating-point constant");
2226 return MatchOperand_ParseFail;
2228 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2229 return MatchOperand_Success;
2231 if (Tok.is(AsmToken::Integer)) {
2233 if (!isNegative && Tok.getString().startswith("0x")) {
2234 Val = Tok.getIntVal();
2235 if (Val > 255 || Val < 0) {
2236 TokError("encoded floating point value out of range");
2237 return MatchOperand_ParseFail;
2240 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2241 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2242 // If we had a '-' in front, toggle the sign bit.
2243 IntVal ^= (uint64_t)isNegative << 63;
2244 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2246 Parser.Lex(); // Eat the token.
2247 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2248 return MatchOperand_Success;
2252 return MatchOperand_NoMatch;
2254 TokError("invalid floating point immediate");
2255 return MatchOperand_ParseFail;
2258 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2259 AArch64AsmParser::OperandMatchResultTy
2260 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2261 MCAsmParser &Parser = getParser();
2264 if (Parser.getTok().is(AsmToken::Hash))
2265 Parser.Lex(); // Eat '#'
2266 else if (Parser.getTok().isNot(AsmToken::Integer))
2267 // Operand should start from # or should be integer, emit error otherwise.
2268 return MatchOperand_NoMatch;
2271 if (parseSymbolicImmVal(Imm))
2272 return MatchOperand_ParseFail;
2273 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2274 uint64_t ShiftAmount = 0;
2275 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2277 int64_t Val = MCE->getValue();
2278 if (Val > 0xfff && (Val & 0xfff) == 0) {
2279 Imm = MCConstantExpr::create(Val >> 12, getContext());
2283 SMLoc E = Parser.getTok().getLoc();
2284 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2286 return MatchOperand_Success;
2292 // The optional operand must be "lsl #N" where N is non-negative.
2293 if (!Parser.getTok().is(AsmToken::Identifier) ||
2294 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2295 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2296 return MatchOperand_ParseFail;
2302 if (Parser.getTok().is(AsmToken::Hash)) {
2306 if (Parser.getTok().isNot(AsmToken::Integer)) {
2307 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2308 return MatchOperand_ParseFail;
2311 int64_t ShiftAmount = Parser.getTok().getIntVal();
2313 if (ShiftAmount < 0) {
2314 Error(Parser.getTok().getLoc(), "positive shift amount required");
2315 return MatchOperand_ParseFail;
2317 Parser.Lex(); // Eat the number
2319 SMLoc E = Parser.getTok().getLoc();
2320 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2321 S, E, getContext()));
2322 return MatchOperand_Success;
2325 /// parseCondCodeString - Parse a Condition Code string.
2326 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2327 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2328 .Case("eq", AArch64CC::EQ)
2329 .Case("ne", AArch64CC::NE)
2330 .Case("cs", AArch64CC::HS)
2331 .Case("hs", AArch64CC::HS)
2332 .Case("cc", AArch64CC::LO)
2333 .Case("lo", AArch64CC::LO)
2334 .Case("mi", AArch64CC::MI)
2335 .Case("pl", AArch64CC::PL)
2336 .Case("vs", AArch64CC::VS)
2337 .Case("vc", AArch64CC::VC)
2338 .Case("hi", AArch64CC::HI)
2339 .Case("ls", AArch64CC::LS)
2340 .Case("ge", AArch64CC::GE)
2341 .Case("lt", AArch64CC::LT)
2342 .Case("gt", AArch64CC::GT)
2343 .Case("le", AArch64CC::LE)
2344 .Case("al", AArch64CC::AL)
2345 .Case("nv", AArch64CC::NV)
2346 .Default(AArch64CC::Invalid);
2350 /// parseCondCode - Parse a Condition Code operand.
2351 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2352 bool invertCondCode) {
2353 MCAsmParser &Parser = getParser();
2355 const AsmToken &Tok = Parser.getTok();
2356 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2358 StringRef Cond = Tok.getString();
2359 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2360 if (CC == AArch64CC::Invalid)
2361 return TokError("invalid condition code");
2362 Parser.Lex(); // Eat identifier token.
2364 if (invertCondCode) {
2365 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2366 return TokError("condition codes AL and NV are invalid for this instruction");
2367 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2371 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2375 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2376 /// them if present.
2377 AArch64AsmParser::OperandMatchResultTy
2378 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2379 MCAsmParser &Parser = getParser();
2380 const AsmToken &Tok = Parser.getTok();
2381 std::string LowerID = Tok.getString().lower();
2382 AArch64_AM::ShiftExtendType ShOp =
2383 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2384 .Case("lsl", AArch64_AM::LSL)
2385 .Case("lsr", AArch64_AM::LSR)
2386 .Case("asr", AArch64_AM::ASR)
2387 .Case("ror", AArch64_AM::ROR)
2388 .Case("msl", AArch64_AM::MSL)
2389 .Case("uxtb", AArch64_AM::UXTB)
2390 .Case("uxth", AArch64_AM::UXTH)
2391 .Case("uxtw", AArch64_AM::UXTW)
2392 .Case("uxtx", AArch64_AM::UXTX)
2393 .Case("sxtb", AArch64_AM::SXTB)
2394 .Case("sxth", AArch64_AM::SXTH)
2395 .Case("sxtw", AArch64_AM::SXTW)
2396 .Case("sxtx", AArch64_AM::SXTX)
2397 .Default(AArch64_AM::InvalidShiftExtend);
2399 if (ShOp == AArch64_AM::InvalidShiftExtend)
2400 return MatchOperand_NoMatch;
2402 SMLoc S = Tok.getLoc();
2405 bool Hash = getLexer().is(AsmToken::Hash);
2406 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2407 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2408 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2409 ShOp == AArch64_AM::MSL) {
2410 // We expect a number here.
2411 TokError("expected #imm after shift specifier");
2412 return MatchOperand_ParseFail;
2415 // "extend" type operatoins don't need an immediate, #0 is implicit.
2416 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2418 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2419 return MatchOperand_Success;
2423 Parser.Lex(); // Eat the '#'.
2425 // Make sure we do actually have a number or a parenthesized expression.
2426 SMLoc E = Parser.getTok().getLoc();
2427 if (!Parser.getTok().is(AsmToken::Integer) &&
2428 !Parser.getTok().is(AsmToken::LParen)) {
2429 Error(E, "expected integer shift amount");
2430 return MatchOperand_ParseFail;
2433 const MCExpr *ImmVal;
2434 if (getParser().parseExpression(ImmVal))
2435 return MatchOperand_ParseFail;
2437 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2439 Error(E, "expected constant '#imm' after shift specifier");
2440 return MatchOperand_ParseFail;
2443 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2444 Operands.push_back(AArch64Operand::CreateShiftExtend(
2445 ShOp, MCE->getValue(), true, S, E, getContext()));
2446 return MatchOperand_Success;
2449 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2450 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2451 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2452 OperandVector &Operands) {
2453 if (Name.find('.') != StringRef::npos)
2454 return TokError("invalid operand");
2458 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2460 MCAsmParser &Parser = getParser();
2461 const AsmToken &Tok = Parser.getTok();
2462 StringRef Op = Tok.getString();
2463 SMLoc S = Tok.getLoc();
2465 const MCExpr *Expr = nullptr;
2467 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2469 Expr = MCConstantExpr::create(op1, getContext()); \
2470 Operands.push_back( \
2471 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2472 Operands.push_back( \
2473 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2474 Operands.push_back( \
2475 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2476 Expr = MCConstantExpr::create(op2, getContext()); \
2477 Operands.push_back( \
2478 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2481 if (Mnemonic == "ic") {
2482 if (!Op.compare_lower("ialluis")) {
2483 // SYS #0, C7, C1, #0
2484 SYS_ALIAS(0, 7, 1, 0);
2485 } else if (!Op.compare_lower("iallu")) {
2486 // SYS #0, C7, C5, #0
2487 SYS_ALIAS(0, 7, 5, 0);
2488 } else if (!Op.compare_lower("ivau")) {
2489 // SYS #3, C7, C5, #1
2490 SYS_ALIAS(3, 7, 5, 1);
2492 return TokError("invalid operand for IC instruction");
2494 } else if (Mnemonic == "dc") {
2495 if (!Op.compare_lower("zva")) {
2496 // SYS #3, C7, C4, #1
2497 SYS_ALIAS(3, 7, 4, 1);
2498 } else if (!Op.compare_lower("ivac")) {
2499 // SYS #3, C7, C6, #1
2500 SYS_ALIAS(0, 7, 6, 1);
2501 } else if (!Op.compare_lower("isw")) {
2502 // SYS #0, C7, C6, #2
2503 SYS_ALIAS(0, 7, 6, 2);
2504 } else if (!Op.compare_lower("cvac")) {
2505 // SYS #3, C7, C10, #1
2506 SYS_ALIAS(3, 7, 10, 1);
2507 } else if (!Op.compare_lower("csw")) {
2508 // SYS #0, C7, C10, #2
2509 SYS_ALIAS(0, 7, 10, 2);
2510 } else if (!Op.compare_lower("cvau")) {
2511 // SYS #3, C7, C11, #1
2512 SYS_ALIAS(3, 7, 11, 1);
2513 } else if (!Op.compare_lower("civac")) {
2514 // SYS #3, C7, C14, #1
2515 SYS_ALIAS(3, 7, 14, 1);
2516 } else if (!Op.compare_lower("cisw")) {
2517 // SYS #0, C7, C14, #2
2518 SYS_ALIAS(0, 7, 14, 2);
2519 } else if (!Op.compare_lower("cvap")) {
2520 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2521 // SYS #3, C7, C12, #1
2522 SYS_ALIAS(3, 7, 12, 1);
2524 return TokError("DC CVAP requires ARMv8.2a");
2527 return TokError("invalid operand for DC instruction");
2529 } else if (Mnemonic == "at") {
2530 if (!Op.compare_lower("s1e1r")) {
2531 // SYS #0, C7, C8, #0
2532 SYS_ALIAS(0, 7, 8, 0);
2533 } else if (!Op.compare_lower("s1e2r")) {
2534 // SYS #4, C7, C8, #0
2535 SYS_ALIAS(4, 7, 8, 0);
2536 } else if (!Op.compare_lower("s1e3r")) {
2537 // SYS #6, C7, C8, #0
2538 SYS_ALIAS(6, 7, 8, 0);
2539 } else if (!Op.compare_lower("s1e1w")) {
2540 // SYS #0, C7, C8, #1
2541 SYS_ALIAS(0, 7, 8, 1);
2542 } else if (!Op.compare_lower("s1e2w")) {
2543 // SYS #4, C7, C8, #1
2544 SYS_ALIAS(4, 7, 8, 1);
2545 } else if (!Op.compare_lower("s1e3w")) {
2546 // SYS #6, C7, C8, #1
2547 SYS_ALIAS(6, 7, 8, 1);
2548 } else if (!Op.compare_lower("s1e0r")) {
2549 // SYS #0, C7, C8, #3
2550 SYS_ALIAS(0, 7, 8, 2);
2551 } else if (!Op.compare_lower("s1e0w")) {
2552 // SYS #0, C7, C8, #3
2553 SYS_ALIAS(0, 7, 8, 3);
2554 } else if (!Op.compare_lower("s12e1r")) {
2555 // SYS #4, C7, C8, #4
2556 SYS_ALIAS(4, 7, 8, 4);
2557 } else if (!Op.compare_lower("s12e1w")) {
2558 // SYS #4, C7, C8, #5
2559 SYS_ALIAS(4, 7, 8, 5);
2560 } else if (!Op.compare_lower("s12e0r")) {
2561 // SYS #4, C7, C8, #6
2562 SYS_ALIAS(4, 7, 8, 6);
2563 } else if (!Op.compare_lower("s12e0w")) {
2564 // SYS #4, C7, C8, #7
2565 SYS_ALIAS(4, 7, 8, 7);
2566 } else if (!Op.compare_lower("s1e1rp")) {
2567 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2568 // SYS #0, C7, C9, #0
2569 SYS_ALIAS(0, 7, 9, 0);
2571 return TokError("AT S1E1RP requires ARMv8.2a");
2573 } else if (!Op.compare_lower("s1e1wp")) {
2574 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2575 // SYS #0, C7, C9, #1
2576 SYS_ALIAS(0, 7, 9, 1);
2578 return TokError("AT S1E1WP requires ARMv8.2a");
2581 return TokError("invalid operand for AT instruction");
2583 } else if (Mnemonic == "tlbi") {
2584 if (!Op.compare_lower("vmalle1is")) {
2585 // SYS #0, C8, C3, #0
2586 SYS_ALIAS(0, 8, 3, 0);
2587 } else if (!Op.compare_lower("alle2is")) {
2588 // SYS #4, C8, C3, #0
2589 SYS_ALIAS(4, 8, 3, 0);
2590 } else if (!Op.compare_lower("alle3is")) {
2591 // SYS #6, C8, C3, #0
2592 SYS_ALIAS(6, 8, 3, 0);
2593 } else if (!Op.compare_lower("vae1is")) {
2594 // SYS #0, C8, C3, #1
2595 SYS_ALIAS(0, 8, 3, 1);
2596 } else if (!Op.compare_lower("vae2is")) {
2597 // SYS #4, C8, C3, #1
2598 SYS_ALIAS(4, 8, 3, 1);
2599 } else if (!Op.compare_lower("vae3is")) {
2600 // SYS #6, C8, C3, #1
2601 SYS_ALIAS(6, 8, 3, 1);
2602 } else if (!Op.compare_lower("aside1is")) {
2603 // SYS #0, C8, C3, #2
2604 SYS_ALIAS(0, 8, 3, 2);
2605 } else if (!Op.compare_lower("vaae1is")) {
2606 // SYS #0, C8, C3, #3
2607 SYS_ALIAS(0, 8, 3, 3);
2608 } else if (!Op.compare_lower("alle1is")) {
2609 // SYS #4, C8, C3, #4
2610 SYS_ALIAS(4, 8, 3, 4);
2611 } else if (!Op.compare_lower("vale1is")) {
2612 // SYS #0, C8, C3, #5
2613 SYS_ALIAS(0, 8, 3, 5);
2614 } else if (!Op.compare_lower("vaale1is")) {
2615 // SYS #0, C8, C3, #7
2616 SYS_ALIAS(0, 8, 3, 7);
2617 } else if (!Op.compare_lower("vmalle1")) {
2618 // SYS #0, C8, C7, #0
2619 SYS_ALIAS(0, 8, 7, 0);
2620 } else if (!Op.compare_lower("alle2")) {
2621 // SYS #4, C8, C7, #0
2622 SYS_ALIAS(4, 8, 7, 0);
2623 } else if (!Op.compare_lower("vale2is")) {
2624 // SYS #4, C8, C3, #5
2625 SYS_ALIAS(4, 8, 3, 5);
2626 } else if (!Op.compare_lower("vale3is")) {
2627 // SYS #6, C8, C3, #5
2628 SYS_ALIAS(6, 8, 3, 5);
2629 } else if (!Op.compare_lower("alle3")) {
2630 // SYS #6, C8, C7, #0
2631 SYS_ALIAS(6, 8, 7, 0);
2632 } else if (!Op.compare_lower("vae1")) {
2633 // SYS #0, C8, C7, #1
2634 SYS_ALIAS(0, 8, 7, 1);
2635 } else if (!Op.compare_lower("vae2")) {
2636 // SYS #4, C8, C7, #1
2637 SYS_ALIAS(4, 8, 7, 1);
2638 } else if (!Op.compare_lower("vae3")) {
2639 // SYS #6, C8, C7, #1
2640 SYS_ALIAS(6, 8, 7, 1);
2641 } else if (!Op.compare_lower("aside1")) {
2642 // SYS #0, C8, C7, #2
2643 SYS_ALIAS(0, 8, 7, 2);
2644 } else if (!Op.compare_lower("vaae1")) {
2645 // SYS #0, C8, C7, #3
2646 SYS_ALIAS(0, 8, 7, 3);
2647 } else if (!Op.compare_lower("alle1")) {
2648 // SYS #4, C8, C7, #4
2649 SYS_ALIAS(4, 8, 7, 4);
2650 } else if (!Op.compare_lower("vale1")) {
2651 // SYS #0, C8, C7, #5
2652 SYS_ALIAS(0, 8, 7, 5);
2653 } else if (!Op.compare_lower("vale2")) {
2654 // SYS #4, C8, C7, #5
2655 SYS_ALIAS(4, 8, 7, 5);
2656 } else if (!Op.compare_lower("vale3")) {
2657 // SYS #6, C8, C7, #5
2658 SYS_ALIAS(6, 8, 7, 5);
2659 } else if (!Op.compare_lower("vaale1")) {
2660 // SYS #0, C8, C7, #7
2661 SYS_ALIAS(0, 8, 7, 7);
2662 } else if (!Op.compare_lower("ipas2e1")) {
2663 // SYS #4, C8, C4, #1
2664 SYS_ALIAS(4, 8, 4, 1);
2665 } else if (!Op.compare_lower("ipas2le1")) {
2666 // SYS #4, C8, C4, #5
2667 SYS_ALIAS(4, 8, 4, 5);
2668 } else if (!Op.compare_lower("ipas2e1is")) {
2669 // SYS #4, C8, C4, #1
2670 SYS_ALIAS(4, 8, 0, 1);
2671 } else if (!Op.compare_lower("ipas2le1is")) {
2672 // SYS #4, C8, C4, #5
2673 SYS_ALIAS(4, 8, 0, 5);
2674 } else if (!Op.compare_lower("vmalls12e1")) {
2675 // SYS #4, C8, C7, #6
2676 SYS_ALIAS(4, 8, 7, 6);
2677 } else if (!Op.compare_lower("vmalls12e1is")) {
2678 // SYS #4, C8, C3, #6
2679 SYS_ALIAS(4, 8, 3, 6);
2681 return TokError("invalid operand for TLBI instruction");
2687 Parser.Lex(); // Eat operand.
2689 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2690 bool HasRegister = false;
2692 // Check for the optional register operand.
2693 if (getLexer().is(AsmToken::Comma)) {
2694 Parser.Lex(); // Eat comma.
2696 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2697 return TokError("expected register operand");
2702 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2703 Parser.eatToEndOfStatement();
2704 return TokError("unexpected token in argument list");
2707 if (ExpectRegister && !HasRegister) {
2708 return TokError("specified " + Mnemonic + " op requires a register");
2710 else if (!ExpectRegister && HasRegister) {
2711 return TokError("specified " + Mnemonic + " op does not use a register");
2714 Parser.Lex(); // Consume the EndOfStatement
2718 AArch64AsmParser::OperandMatchResultTy
2719 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2720 MCAsmParser &Parser = getParser();
2721 const AsmToken &Tok = Parser.getTok();
2723 // Can be either a #imm style literal or an option name
2724 bool Hash = Tok.is(AsmToken::Hash);
2725 if (Hash || Tok.is(AsmToken::Integer)) {
2726 // Immediate operand.
2728 Parser.Lex(); // Eat the '#'
2729 const MCExpr *ImmVal;
2730 SMLoc ExprLoc = getLoc();
2731 if (getParser().parseExpression(ImmVal))
2732 return MatchOperand_ParseFail;
2733 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2735 Error(ExprLoc, "immediate value expected for barrier operand");
2736 return MatchOperand_ParseFail;
2738 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2739 Error(ExprLoc, "barrier operand out of range");
2740 return MatchOperand_ParseFail;
2742 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2743 Operands.push_back(AArch64Operand::CreateBarrier(
2744 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2745 return MatchOperand_Success;
2748 if (Tok.isNot(AsmToken::Identifier)) {
2749 TokError("invalid operand for instruction");
2750 return MatchOperand_ParseFail;
2753 auto DB = AArch64DB::lookupDBByName(Tok.getString());
2755 TokError("invalid barrier option name");
2756 return MatchOperand_ParseFail;
2759 // The only valid named option for ISB is 'sy'
2760 if (Mnemonic == "isb" && DB->Encoding != AArch64DB::sy) {
2761 TokError("'sy' or #imm operand expected");
2762 return MatchOperand_ParseFail;
2765 Operands.push_back(AArch64Operand::CreateBarrier(
2766 DB->Encoding, Tok.getString(), getLoc(), getContext()));
2767 Parser.Lex(); // Consume the option
2769 return MatchOperand_Success;
2772 AArch64AsmParser::OperandMatchResultTy
2773 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2774 MCAsmParser &Parser = getParser();
2775 const AsmToken &Tok = Parser.getTok();
2777 if (Tok.isNot(AsmToken::Identifier))
2778 return MatchOperand_NoMatch;
2781 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2782 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2783 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2784 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2786 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2788 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2789 unsigned PStateImm = -1;
2790 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2791 PStateImm = PState->Encoding;
2794 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2795 PStateImm, getContext()));
2796 Parser.Lex(); // Eat identifier
2798 return MatchOperand_Success;
2801 /// tryParseVectorRegister - Parse a vector register operand.
2802 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2803 MCAsmParser &Parser = getParser();
2804 if (Parser.getTok().isNot(AsmToken::Identifier))
2808 // Check for a vector register specifier first.
2810 int64_t Reg = tryMatchVectorRegister(Kind, false);
2814 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2815 // If there was an explicit qualifier, that goes on as a literal text
2819 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2821 // If there is an index specifier following the register, parse that too.
2822 if (Parser.getTok().is(AsmToken::LBrac)) {
2823 SMLoc SIdx = getLoc();
2824 Parser.Lex(); // Eat left bracket token.
2826 const MCExpr *ImmVal;
2827 if (getParser().parseExpression(ImmVal))
2829 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2831 TokError("immediate value expected for vector index");
2836 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2837 Error(E, "']' expected");
2841 Parser.Lex(); // Eat right bracket token.
2843 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2850 /// parseRegister - Parse a non-vector register operand.
2851 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2852 MCAsmParser &Parser = getParser();
2854 // Try for a vector register.
2855 if (!tryParseVectorRegister(Operands))
2858 // Try for a scalar register.
2859 int64_t Reg = tryParseRegister();
2863 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2865 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2866 // as a string token in the instruction itself.
2867 if (getLexer().getKind() == AsmToken::LBrac) {
2868 SMLoc LBracS = getLoc();
2870 const AsmToken &Tok = Parser.getTok();
2871 if (Tok.is(AsmToken::Integer)) {
2872 SMLoc IntS = getLoc();
2873 int64_t Val = Tok.getIntVal();
2876 if (getLexer().getKind() == AsmToken::RBrac) {
2877 SMLoc RBracS = getLoc();
2880 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2882 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2884 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2894 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2895 MCAsmParser &Parser = getParser();
2896 bool HasELFModifier = false;
2897 AArch64MCExpr::VariantKind RefKind;
2899 if (Parser.getTok().is(AsmToken::Colon)) {
2900 Parser.Lex(); // Eat ':"
2901 HasELFModifier = true;
2903 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2904 Error(Parser.getTok().getLoc(),
2905 "expect relocation specifier in operand after ':'");
2909 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2910 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2911 .Case("lo12", AArch64MCExpr::VK_LO12)
2912 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2913 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2914 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2915 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2916 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2917 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2918 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2919 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2920 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2921 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2922 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2923 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2924 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2925 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2926 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2927 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2928 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2929 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2930 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2931 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2932 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2933 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2934 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2935 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2936 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2937 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2938 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2939 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2940 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2941 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2942 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2943 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2944 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2945 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2946 .Default(AArch64MCExpr::VK_INVALID);
2948 if (RefKind == AArch64MCExpr::VK_INVALID) {
2949 Error(Parser.getTok().getLoc(),
2950 "expect relocation specifier in operand after ':'");
2954 Parser.Lex(); // Eat identifier
2956 if (Parser.getTok().isNot(AsmToken::Colon)) {
2957 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2960 Parser.Lex(); // Eat ':'
2963 if (getParser().parseExpression(ImmVal))
2967 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2972 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2973 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2974 MCAsmParser &Parser = getParser();
2975 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2977 Parser.Lex(); // Eat left bracket token.
2979 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2982 int64_t PrevReg = FirstReg;
2985 if (Parser.getTok().is(AsmToken::Minus)) {
2986 Parser.Lex(); // Eat the minus.
2988 SMLoc Loc = getLoc();
2990 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2993 // Any Kind suffices must match on all regs in the list.
2994 if (Kind != NextKind)
2995 return Error(Loc, "mismatched register size suffix");
2997 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2999 if (Space == 0 || Space > 3) {
3000 return Error(Loc, "invalid number of vectors");
3006 while (Parser.getTok().is(AsmToken::Comma)) {
3007 Parser.Lex(); // Eat the comma token.
3009 SMLoc Loc = getLoc();
3011 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3014 // Any Kind suffices must match on all regs in the list.
3015 if (Kind != NextKind)
3016 return Error(Loc, "mismatched register size suffix");
3018 // Registers must be incremental (with wraparound at 31)
3019 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3020 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3021 return Error(Loc, "registers must be sequential");
3028 if (Parser.getTok().isNot(AsmToken::RCurly))
3029 return Error(getLoc(), "'}' expected");
3030 Parser.Lex(); // Eat the '}' token.
3033 return Error(S, "invalid number of vectors");
3035 unsigned NumElements = 0;
3036 char ElementKind = 0;
3038 parseValidVectorKind(Kind, NumElements, ElementKind);
3040 Operands.push_back(AArch64Operand::CreateVectorList(
3041 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3043 // If there is an index specifier following the list, parse that too.
3044 if (Parser.getTok().is(AsmToken::LBrac)) {
3045 SMLoc SIdx = getLoc();
3046 Parser.Lex(); // Eat left bracket token.
3048 const MCExpr *ImmVal;
3049 if (getParser().parseExpression(ImmVal))
3051 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3053 TokError("immediate value expected for vector index");
3058 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3059 Error(E, "']' expected");
3063 Parser.Lex(); // Eat right bracket token.
3065 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3071 AArch64AsmParser::OperandMatchResultTy
3072 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3073 MCAsmParser &Parser = getParser();
3074 const AsmToken &Tok = Parser.getTok();
3075 if (!Tok.is(AsmToken::Identifier))
3076 return MatchOperand_NoMatch;
3078 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3080 MCContext &Ctx = getContext();
3081 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3082 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3083 return MatchOperand_NoMatch;
3086 Parser.Lex(); // Eat register
3088 if (Parser.getTok().isNot(AsmToken::Comma)) {
3090 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3091 return MatchOperand_Success;
3093 Parser.Lex(); // Eat comma.
3095 if (Parser.getTok().is(AsmToken::Hash))
3096 Parser.Lex(); // Eat hash
3098 if (Parser.getTok().isNot(AsmToken::Integer)) {
3099 Error(getLoc(), "index must be absent or #0");
3100 return MatchOperand_ParseFail;
3103 const MCExpr *ImmVal;
3104 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3105 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3106 Error(getLoc(), "index must be absent or #0");
3107 return MatchOperand_ParseFail;
3111 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3112 return MatchOperand_Success;
3115 /// parseOperand - Parse a arm instruction operand. For now this parses the
3116 /// operand regardless of the mnemonic.
3117 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3118 bool invertCondCode) {
3119 MCAsmParser &Parser = getParser();
3120 // Check if the current operand has a custom associated parser, if so, try to
3121 // custom parse the operand, or fallback to the general approach.
3122 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3123 if (ResTy == MatchOperand_Success)
3125 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3126 // there was a match, but an error occurred, in which case, just return that
3127 // the operand parsing failed.
3128 if (ResTy == MatchOperand_ParseFail)
3131 // Nothing custom, so do general case parsing.
3133 switch (getLexer().getKind()) {
3137 if (parseSymbolicImmVal(Expr))
3138 return Error(S, "invalid operand");
3140 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3141 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3144 case AsmToken::LBrac: {
3145 SMLoc Loc = Parser.getTok().getLoc();
3146 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3148 Parser.Lex(); // Eat '['
3150 // There's no comma after a '[', so we can parse the next operand
3152 return parseOperand(Operands, false, false);
3154 case AsmToken::LCurly:
3155 return parseVectorList(Operands);
3156 case AsmToken::Identifier: {
3157 // If we're expecting a Condition Code operand, then just parse that.
3159 return parseCondCode(Operands, invertCondCode);
3161 // If it's a register name, parse it.
3162 if (!parseRegister(Operands))
3165 // This could be an optional "shift" or "extend" operand.
3166 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3167 // We can only continue if no tokens were eaten.
3168 if (GotShift != MatchOperand_NoMatch)
3171 // This was not a register so parse other operands that start with an
3172 // identifier (like labels) as expressions and create them as immediates.
3173 const MCExpr *IdVal;
3175 if (getParser().parseExpression(IdVal))
3178 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3179 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3182 case AsmToken::Integer:
3183 case AsmToken::Real:
3184 case AsmToken::Hash: {
3185 // #42 -> immediate.
3187 if (getLexer().is(AsmToken::Hash))
3190 // Parse a negative sign
3191 bool isNegative = false;
3192 if (Parser.getTok().is(AsmToken::Minus)) {
3194 // We need to consume this token only when we have a Real, otherwise
3195 // we let parseSymbolicImmVal take care of it
3196 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3200 // The only Real that should come through here is a literal #0.0 for
3201 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3202 // so convert the value.
3203 const AsmToken &Tok = Parser.getTok();
3204 if (Tok.is(AsmToken::Real)) {
3205 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3206 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3207 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3208 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3209 Mnemonic != "fcmlt")
3210 return TokError("unexpected floating point literal");
3211 else if (IntVal != 0 || isNegative)
3212 return TokError("expected floating-point constant #0.0");
3213 Parser.Lex(); // Eat the token.
3216 AArch64Operand::CreateToken("#0", false, S, getContext()));
3218 AArch64Operand::CreateToken(".0", false, S, getContext()));
3222 const MCExpr *ImmVal;
3223 if (parseSymbolicImmVal(ImmVal))
3226 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3227 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3230 case AsmToken::Equal: {
3231 SMLoc Loc = Parser.getTok().getLoc();
3232 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3233 return Error(Loc, "unexpected token in operand");
3234 Parser.Lex(); // Eat '='
3235 const MCExpr *SubExprVal;
3236 if (getParser().parseExpression(SubExprVal))
3239 if (Operands.size() < 2 ||
3240 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3241 return Error(Loc, "Only valid when first operand is register");
3244 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3245 Operands[1]->getReg());
3247 MCContext& Ctx = getContext();
3248 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3249 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3250 if (isa<MCConstantExpr>(SubExprVal)) {
3251 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3252 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3253 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3257 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3258 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3259 Operands.push_back(AArch64Operand::CreateImm(
3260 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3262 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3263 ShiftAmt, true, S, E, Ctx));
3266 APInt Simm = APInt(64, Imm << ShiftAmt);
3267 // check if the immediate is an unsigned or signed 32-bit int for W regs
3268 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3269 return Error(Loc, "Immediate too large for register");
3271 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3272 const MCExpr *CPLoc =
3273 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3274 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3280 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3282 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3283 StringRef Name, SMLoc NameLoc,
3284 OperandVector &Operands) {
3285 MCAsmParser &Parser = getParser();
3286 Name = StringSwitch<StringRef>(Name.lower())
3287 .Case("beq", "b.eq")
3288 .Case("bne", "b.ne")
3289 .Case("bhs", "b.hs")
3290 .Case("bcs", "b.cs")
3291 .Case("blo", "b.lo")
3292 .Case("bcc", "b.cc")
3293 .Case("bmi", "b.mi")
3294 .Case("bpl", "b.pl")
3295 .Case("bvs", "b.vs")
3296 .Case("bvc", "b.vc")
3297 .Case("bhi", "b.hi")
3298 .Case("bls", "b.ls")
3299 .Case("bge", "b.ge")
3300 .Case("blt", "b.lt")
3301 .Case("bgt", "b.gt")
3302 .Case("ble", "b.le")
3303 .Case("bal", "b.al")
3304 .Case("bnv", "b.nv")
3307 // First check for the AArch64-specific .req directive.
3308 if (Parser.getTok().is(AsmToken::Identifier) &&
3309 Parser.getTok().getIdentifier() == ".req") {
3310 parseDirectiveReq(Name, NameLoc);
3311 // We always return 'error' for this, as we're done with this
3312 // statement and don't need to match the 'instruction."
3316 // Create the leading tokens for the mnemonic, split by '.' characters.
3317 size_t Start = 0, Next = Name.find('.');
3318 StringRef Head = Name.slice(Start, Next);
3320 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3321 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3322 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3323 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3324 Parser.eatToEndOfStatement();
3329 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3332 // Handle condition codes for a branch mnemonic
3333 if (Head == "b" && Next != StringRef::npos) {
3335 Next = Name.find('.', Start + 1);
3336 Head = Name.slice(Start + 1, Next);
3338 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3339 (Head.data() - Name.data()));
3340 AArch64CC::CondCode CC = parseCondCodeString(Head);
3341 if (CC == AArch64CC::Invalid)
3342 return Error(SuffixLoc, "invalid condition code");
3344 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3346 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3349 // Add the remaining tokens in the mnemonic.
3350 while (Next != StringRef::npos) {
3352 Next = Name.find('.', Start + 1);
3353 Head = Name.slice(Start, Next);
3354 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3355 (Head.data() - Name.data()) + 1);
3357 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3360 // Conditional compare instructions have a Condition Code operand, which needs
3361 // to be parsed and an immediate operand created.
3362 bool condCodeFourthOperand =
3363 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3364 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3365 Head == "csinc" || Head == "csinv" || Head == "csneg");
3367 // These instructions are aliases to some of the conditional select
3368 // instructions. However, the condition code is inverted in the aliased
3371 // FIXME: Is this the correct way to handle these? Or should the parser
3372 // generate the aliased instructions directly?
3373 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3374 bool condCodeThirdOperand =
3375 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3377 // Read the remaining operands.
3378 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3379 // Read the first operand.
3380 if (parseOperand(Operands, false, false)) {
3381 Parser.eatToEndOfStatement();
3386 while (getLexer().is(AsmToken::Comma)) {
3387 Parser.Lex(); // Eat the comma.
3389 // Parse and remember the operand.
3390 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3391 (N == 3 && condCodeThirdOperand) ||
3392 (N == 2 && condCodeSecondOperand),
3393 condCodeSecondOperand || condCodeThirdOperand)) {
3394 Parser.eatToEndOfStatement();
3398 // After successfully parsing some operands there are two special cases to
3399 // consider (i.e. notional operands not separated by commas). Both are due
3400 // to memory specifiers:
3401 // + An RBrac will end an address for load/store/prefetch
3402 // + An '!' will indicate a pre-indexed operation.
3404 // It's someone else's responsibility to make sure these tokens are sane
3405 // in the given context!
3406 if (Parser.getTok().is(AsmToken::RBrac)) {
3407 SMLoc Loc = Parser.getTok().getLoc();
3408 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3413 if (Parser.getTok().is(AsmToken::Exclaim)) {
3414 SMLoc Loc = Parser.getTok().getLoc();
3415 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3424 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3425 SMLoc Loc = Parser.getTok().getLoc();
3426 Parser.eatToEndOfStatement();
3427 return Error(Loc, "unexpected token in argument list");
3430 Parser.Lex(); // Consume the EndOfStatement
3434 // FIXME: This entire function is a giant hack to provide us with decent
3435 // operand range validation/diagnostics until TableGen/MC can be extended
3436 // to support autogeneration of this kind of validation.
3437 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3438 SmallVectorImpl<SMLoc> &Loc) {
3439 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3440 // Check for indexed addressing modes w/ the base register being the
3441 // same as a destination/source register or pair load where
3442 // the Rt == Rt2. All of those are undefined behaviour.
3443 switch (Inst.getOpcode()) {
3444 case AArch64::LDPSWpre:
3445 case AArch64::LDPWpost:
3446 case AArch64::LDPWpre:
3447 case AArch64::LDPXpost:
3448 case AArch64::LDPXpre: {
3449 unsigned Rt = Inst.getOperand(1).getReg();
3450 unsigned Rt2 = Inst.getOperand(2).getReg();
3451 unsigned Rn = Inst.getOperand(3).getReg();
3452 if (RI->isSubRegisterEq(Rn, Rt))
3453 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3454 "is also a destination");
3455 if (RI->isSubRegisterEq(Rn, Rt2))
3456 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3457 "is also a destination");
3460 case AArch64::LDPDi:
3461 case AArch64::LDPQi:
3462 case AArch64::LDPSi:
3463 case AArch64::LDPSWi:
3464 case AArch64::LDPWi:
3465 case AArch64::LDPXi: {
3466 unsigned Rt = Inst.getOperand(0).getReg();
3467 unsigned Rt2 = Inst.getOperand(1).getReg();
3469 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3472 case AArch64::LDPDpost:
3473 case AArch64::LDPDpre:
3474 case AArch64::LDPQpost:
3475 case AArch64::LDPQpre:
3476 case AArch64::LDPSpost:
3477 case AArch64::LDPSpre:
3478 case AArch64::LDPSWpost: {
3479 unsigned Rt = Inst.getOperand(1).getReg();
3480 unsigned Rt2 = Inst.getOperand(2).getReg();
3482 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3485 case AArch64::STPDpost:
3486 case AArch64::STPDpre:
3487 case AArch64::STPQpost:
3488 case AArch64::STPQpre:
3489 case AArch64::STPSpost:
3490 case AArch64::STPSpre:
3491 case AArch64::STPWpost:
3492 case AArch64::STPWpre:
3493 case AArch64::STPXpost:
3494 case AArch64::STPXpre: {
3495 unsigned Rt = Inst.getOperand(1).getReg();
3496 unsigned Rt2 = Inst.getOperand(2).getReg();
3497 unsigned Rn = Inst.getOperand(3).getReg();
3498 if (RI->isSubRegisterEq(Rn, Rt))
3499 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3500 "is also a source");
3501 if (RI->isSubRegisterEq(Rn, Rt2))
3502 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3503 "is also a source");
3506 case AArch64::LDRBBpre:
3507 case AArch64::LDRBpre:
3508 case AArch64::LDRHHpre:
3509 case AArch64::LDRHpre:
3510 case AArch64::LDRSBWpre:
3511 case AArch64::LDRSBXpre:
3512 case AArch64::LDRSHWpre:
3513 case AArch64::LDRSHXpre:
3514 case AArch64::LDRSWpre:
3515 case AArch64::LDRWpre:
3516 case AArch64::LDRXpre:
3517 case AArch64::LDRBBpost:
3518 case AArch64::LDRBpost:
3519 case AArch64::LDRHHpost:
3520 case AArch64::LDRHpost:
3521 case AArch64::LDRSBWpost:
3522 case AArch64::LDRSBXpost:
3523 case AArch64::LDRSHWpost:
3524 case AArch64::LDRSHXpost:
3525 case AArch64::LDRSWpost:
3526 case AArch64::LDRWpost:
3527 case AArch64::LDRXpost: {
3528 unsigned Rt = Inst.getOperand(1).getReg();
3529 unsigned Rn = Inst.getOperand(2).getReg();
3530 if (RI->isSubRegisterEq(Rn, Rt))
3531 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3532 "is also a source");
3535 case AArch64::STRBBpost:
3536 case AArch64::STRBpost:
3537 case AArch64::STRHHpost:
3538 case AArch64::STRHpost:
3539 case AArch64::STRWpost:
3540 case AArch64::STRXpost:
3541 case AArch64::STRBBpre:
3542 case AArch64::STRBpre:
3543 case AArch64::STRHHpre:
3544 case AArch64::STRHpre:
3545 case AArch64::STRWpre:
3546 case AArch64::STRXpre: {
3547 unsigned Rt = Inst.getOperand(1).getReg();
3548 unsigned Rn = Inst.getOperand(2).getReg();
3549 if (RI->isSubRegisterEq(Rn, Rt))
3550 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3551 "is also a source");
3556 // Now check immediate ranges. Separate from the above as there is overlap
3557 // in the instructions being checked and this keeps the nested conditionals
3559 switch (Inst.getOpcode()) {
3560 case AArch64::ADDSWri:
3561 case AArch64::ADDSXri:
3562 case AArch64::ADDWri:
3563 case AArch64::ADDXri:
3564 case AArch64::SUBSWri:
3565 case AArch64::SUBSXri:
3566 case AArch64::SUBWri:
3567 case AArch64::SUBXri: {
3568 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3569 // some slight duplication here.
3570 if (Inst.getOperand(2).isExpr()) {
3571 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3572 AArch64MCExpr::VariantKind ELFRefKind;
3573 MCSymbolRefExpr::VariantKind DarwinRefKind;
3575 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3576 return Error(Loc[2], "invalid immediate expression");
3579 // Only allow these with ADDXri.
3580 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3581 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3582 Inst.getOpcode() == AArch64::ADDXri)
3585 // Only allow these with ADDXri/ADDWri
3586 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3587 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3588 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3589 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3590 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3591 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3592 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3593 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3594 (Inst.getOpcode() == AArch64::ADDXri ||
3595 Inst.getOpcode() == AArch64::ADDWri))
3598 // Don't allow expressions in the immediate field otherwise
3599 return Error(Loc[2], "invalid immediate expression");
3608 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3610 case Match_MissingFeature:
3612 "instruction requires a CPU feature not currently enabled");
3613 case Match_InvalidOperand:
3614 return Error(Loc, "invalid operand for instruction");
3615 case Match_InvalidSuffix:
3616 return Error(Loc, "invalid type suffix for instruction");
3617 case Match_InvalidCondCode:
3618 return Error(Loc, "expected AArch64 condition code");
3619 case Match_AddSubRegExtendSmall:
3621 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3622 case Match_AddSubRegExtendLarge:
3624 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3625 case Match_AddSubSecondSource:
3627 "expected compatible register, symbol or integer in range [0, 4095]");
3628 case Match_LogicalSecondSource:
3629 return Error(Loc, "expected compatible register or logical immediate");
3630 case Match_InvalidMovImm32Shift:
3631 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3632 case Match_InvalidMovImm64Shift:
3633 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3634 case Match_AddSubRegShift32:
3636 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3637 case Match_AddSubRegShift64:
3639 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3640 case Match_InvalidFPImm:
3642 "expected compatible register or floating-point constant");
3643 case Match_InvalidMemoryIndexedSImm9:
3644 return Error(Loc, "index must be an integer in range [-256, 255].");
3645 case Match_InvalidMemoryIndexed4SImm7:
3646 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3647 case Match_InvalidMemoryIndexed8SImm7:
3648 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3649 case Match_InvalidMemoryIndexed16SImm7:
3650 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3651 case Match_InvalidMemoryWExtend8:
3653 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3654 case Match_InvalidMemoryWExtend16:
3656 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3657 case Match_InvalidMemoryWExtend32:
3659 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3660 case Match_InvalidMemoryWExtend64:
3662 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3663 case Match_InvalidMemoryWExtend128:
3665 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3666 case Match_InvalidMemoryXExtend8:
3668 "expected 'lsl' or 'sxtx' with optional shift of #0");
3669 case Match_InvalidMemoryXExtend16:
3671 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3672 case Match_InvalidMemoryXExtend32:
3674 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3675 case Match_InvalidMemoryXExtend64:
3677 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3678 case Match_InvalidMemoryXExtend128:
3680 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3681 case Match_InvalidMemoryIndexed1:
3682 return Error(Loc, "index must be an integer in range [0, 4095].");
3683 case Match_InvalidMemoryIndexed2:
3684 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3685 case Match_InvalidMemoryIndexed4:
3686 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3687 case Match_InvalidMemoryIndexed8:
3688 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3689 case Match_InvalidMemoryIndexed16:
3690 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3691 case Match_InvalidImm0_1:
3692 return Error(Loc, "immediate must be an integer in range [0, 1].");
3693 case Match_InvalidImm0_7:
3694 return Error(Loc, "immediate must be an integer in range [0, 7].");
3695 case Match_InvalidImm0_15:
3696 return Error(Loc, "immediate must be an integer in range [0, 15].");
3697 case Match_InvalidImm0_31:
3698 return Error(Loc, "immediate must be an integer in range [0, 31].");
3699 case Match_InvalidImm0_63:
3700 return Error(Loc, "immediate must be an integer in range [0, 63].");
3701 case Match_InvalidImm0_127:
3702 return Error(Loc, "immediate must be an integer in range [0, 127].");
3703 case Match_InvalidImm0_65535:
3704 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3705 case Match_InvalidImm1_8:
3706 return Error(Loc, "immediate must be an integer in range [1, 8].");
3707 case Match_InvalidImm1_16:
3708 return Error(Loc, "immediate must be an integer in range [1, 16].");
3709 case Match_InvalidImm1_32:
3710 return Error(Loc, "immediate must be an integer in range [1, 32].");
3711 case Match_InvalidImm1_64:
3712 return Error(Loc, "immediate must be an integer in range [1, 64].");
3713 case Match_InvalidIndex1:
3714 return Error(Loc, "expected lane specifier '[1]'");
3715 case Match_InvalidIndexB:
3716 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3717 case Match_InvalidIndexH:
3718 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3719 case Match_InvalidIndexS:
3720 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3721 case Match_InvalidIndexD:
3722 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3723 case Match_InvalidLabel:
3724 return Error(Loc, "expected label or encodable integer pc offset");
3726 return Error(Loc, "expected readable system register");
3728 return Error(Loc, "expected writable system register or pstate");
3729 case Match_MnemonicFail:
3730 return Error(Loc, "unrecognized instruction mnemonic");
3732 llvm_unreachable("unexpected error code!");
3736 static const char *getSubtargetFeatureName(uint64_t Val);
3738 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3739 OperandVector &Operands,
3741 uint64_t &ErrorInfo,
3742 bool MatchingInlineAsm) {
3743 assert(!Operands.empty() && "Unexpect empty operand list!");
3744 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3745 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3747 StringRef Tok = Op.getToken();
3748 unsigned NumOperands = Operands.size();
3750 if (NumOperands == 4 && Tok == "lsl") {
3751 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3752 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3753 if (Op2.isReg() && Op3.isImm()) {
3754 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3756 uint64_t Op3Val = Op3CE->getValue();
3757 uint64_t NewOp3Val = 0;
3758 uint64_t NewOp4Val = 0;
3759 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3761 NewOp3Val = (32 - Op3Val) & 0x1f;
3762 NewOp4Val = 31 - Op3Val;
3764 NewOp3Val = (64 - Op3Val) & 0x3f;
3765 NewOp4Val = 63 - Op3Val;
3768 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3769 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3771 Operands[0] = AArch64Operand::CreateToken(
3772 "ubfm", false, Op.getStartLoc(), getContext());
3773 Operands.push_back(AArch64Operand::CreateImm(
3774 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3775 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3776 Op3.getEndLoc(), getContext());
3779 } else if (NumOperands == 4 && Tok == "bfc") {
3780 // FIXME: Horrible hack to handle BFC->BFM alias.
3781 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3782 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3783 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3785 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3786 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3787 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3789 if (LSBCE && WidthCE) {
3790 uint64_t LSB = LSBCE->getValue();
3791 uint64_t Width = WidthCE->getValue();
3793 uint64_t RegWidth = 0;
3794 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3800 if (LSB >= RegWidth)
3801 return Error(LSBOp.getStartLoc(),
3802 "expected integer in range [0, 31]");
3803 if (Width < 1 || Width > RegWidth)
3804 return Error(WidthOp.getStartLoc(),
3805 "expected integer in range [1, 32]");
3809 ImmR = (32 - LSB) & 0x1f;
3811 ImmR = (64 - LSB) & 0x3f;
3813 uint64_t ImmS = Width - 1;
3815 if (ImmR != 0 && ImmS >= ImmR)
3816 return Error(WidthOp.getStartLoc(),
3817 "requested insert overflows register");
3819 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3820 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3821 Operands[0] = AArch64Operand::CreateToken(
3822 "bfm", false, Op.getStartLoc(), getContext());
3823 Operands[2] = AArch64Operand::CreateReg(
3824 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3825 SMLoc(), getContext());
3826 Operands[3] = AArch64Operand::CreateImm(
3827 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3828 Operands.emplace_back(
3829 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3830 WidthOp.getEndLoc(), getContext()));
3833 } else if (NumOperands == 5) {
3834 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3835 // UBFIZ -> UBFM aliases.
3836 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3837 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3838 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3839 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3841 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3842 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3843 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3845 if (Op3CE && Op4CE) {
3846 uint64_t Op3Val = Op3CE->getValue();
3847 uint64_t Op4Val = Op4CE->getValue();
3849 uint64_t RegWidth = 0;
3850 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3856 if (Op3Val >= RegWidth)
3857 return Error(Op3.getStartLoc(),
3858 "expected integer in range [0, 31]");
3859 if (Op4Val < 1 || Op4Val > RegWidth)
3860 return Error(Op4.getStartLoc(),
3861 "expected integer in range [1, 32]");
3863 uint64_t NewOp3Val = 0;
3865 NewOp3Val = (32 - Op3Val) & 0x1f;
3867 NewOp3Val = (64 - Op3Val) & 0x3f;
3869 uint64_t NewOp4Val = Op4Val - 1;
3871 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3872 return Error(Op4.getStartLoc(),
3873 "requested insert overflows register");
3875 const MCExpr *NewOp3 =
3876 MCConstantExpr::create(NewOp3Val, getContext());
3877 const MCExpr *NewOp4 =
3878 MCConstantExpr::create(NewOp4Val, getContext());
3879 Operands[3] = AArch64Operand::CreateImm(
3880 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3881 Operands[4] = AArch64Operand::CreateImm(
3882 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3884 Operands[0] = AArch64Operand::CreateToken(
3885 "bfm", false, Op.getStartLoc(), getContext());
3886 else if (Tok == "sbfiz")
3887 Operands[0] = AArch64Operand::CreateToken(
3888 "sbfm", false, Op.getStartLoc(), getContext());
3889 else if (Tok == "ubfiz")
3890 Operands[0] = AArch64Operand::CreateToken(
3891 "ubfm", false, Op.getStartLoc(), getContext());
3893 llvm_unreachable("No valid mnemonic for alias?");
3897 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3898 // UBFX -> UBFM aliases.
3899 } else if (NumOperands == 5 &&
3900 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3901 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3902 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3903 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3905 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3906 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3907 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3909 if (Op3CE && Op4CE) {
3910 uint64_t Op3Val = Op3CE->getValue();
3911 uint64_t Op4Val = Op4CE->getValue();
3913 uint64_t RegWidth = 0;
3914 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3920 if (Op3Val >= RegWidth)
3921 return Error(Op3.getStartLoc(),
3922 "expected integer in range [0, 31]");
3923 if (Op4Val < 1 || Op4Val > RegWidth)
3924 return Error(Op4.getStartLoc(),
3925 "expected integer in range [1, 32]");
3927 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3929 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3930 return Error(Op4.getStartLoc(),
3931 "requested extract overflows register");
3933 const MCExpr *NewOp4 =
3934 MCConstantExpr::create(NewOp4Val, getContext());
3935 Operands[4] = AArch64Operand::CreateImm(
3936 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3938 Operands[0] = AArch64Operand::CreateToken(
3939 "bfm", false, Op.getStartLoc(), getContext());
3940 else if (Tok == "sbfx")
3941 Operands[0] = AArch64Operand::CreateToken(
3942 "sbfm", false, Op.getStartLoc(), getContext());
3943 else if (Tok == "ubfx")
3944 Operands[0] = AArch64Operand::CreateToken(
3945 "ubfm", false, Op.getStartLoc(), getContext());
3947 llvm_unreachable("No valid mnemonic for alias?");
3952 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3953 // InstAlias can't quite handle this since the reg classes aren't
3955 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3956 // The source register can be Wn here, but the matcher expects a
3957 // GPR64. Twiddle it here if necessary.
3958 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3960 unsigned Reg = getXRegFromWReg(Op.getReg());
3961 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3962 Op.getEndLoc(), getContext());
3965 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3966 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3967 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3969 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3971 // The source register can be Wn here, but the matcher expects a
3972 // GPR64. Twiddle it here if necessary.
3973 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3975 unsigned Reg = getXRegFromWReg(Op.getReg());
3976 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3977 Op.getEndLoc(), getContext());
3981 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3982 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3983 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3985 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3987 // The source register can be Wn here, but the matcher expects a
3988 // GPR32. Twiddle it here if necessary.
3989 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3991 unsigned Reg = getWRegFromXReg(Op.getReg());
3992 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3993 Op.getEndLoc(), getContext());
3998 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3999 if (NumOperands == 3 && Tok == "fmov") {
4000 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
4001 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
4002 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
4004 !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(
4008 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
4009 Op.getEndLoc(), getContext());
4014 // First try to match against the secondary set of tables containing the
4015 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4016 unsigned MatchResult =
4017 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4019 // If that fails, try against the alternate table containing long-form NEON:
4020 // "fadd v0.2s, v1.2s, v2.2s"
4021 if (MatchResult != Match_Success) {
4022 // But first, save the short-form match result: we can use it in case the
4023 // long-form match also fails.
4024 auto ShortFormNEONErrorInfo = ErrorInfo;
4025 auto ShortFormNEONMatchResult = MatchResult;
4028 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4030 // Now, both matches failed, and the long-form match failed on the mnemonic
4031 // suffix token operand. The short-form match failure is probably more
4032 // relevant: use it instead.
4033 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4034 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4035 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4036 MatchResult = ShortFormNEONMatchResult;
4037 ErrorInfo = ShortFormNEONErrorInfo;
4042 switch (MatchResult) {
4043 case Match_Success: {
4044 // Perform range checking and other semantic validations
4045 SmallVector<SMLoc, 8> OperandLocs;
4046 NumOperands = Operands.size();
4047 for (unsigned i = 1; i < NumOperands; ++i)
4048 OperandLocs.push_back(Operands[i]->getStartLoc());
4049 if (validateInstruction(Inst, OperandLocs))
4053 Out.EmitInstruction(Inst, getSTI());
4056 case Match_MissingFeature: {
4057 assert(ErrorInfo && "Unknown missing feature!");
4058 // Special case the error message for the very common case where only
4059 // a single subtarget feature is missing (neon, e.g.).
4060 std::string Msg = "instruction requires:";
4062 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4063 if (ErrorInfo & Mask) {
4065 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4069 return Error(IDLoc, Msg);
4071 case Match_MnemonicFail:
4072 return showMatchError(IDLoc, MatchResult);
4073 case Match_InvalidOperand: {
4074 SMLoc ErrorLoc = IDLoc;
4076 if (ErrorInfo != ~0ULL) {
4077 if (ErrorInfo >= Operands.size())
4078 return Error(IDLoc, "too few operands for instruction");
4080 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4081 if (ErrorLoc == SMLoc())
4084 // If the match failed on a suffix token operand, tweak the diagnostic
4086 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4087 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4088 MatchResult = Match_InvalidSuffix;
4090 return showMatchError(ErrorLoc, MatchResult);
4092 case Match_InvalidMemoryIndexed1:
4093 case Match_InvalidMemoryIndexed2:
4094 case Match_InvalidMemoryIndexed4:
4095 case Match_InvalidMemoryIndexed8:
4096 case Match_InvalidMemoryIndexed16:
4097 case Match_InvalidCondCode:
4098 case Match_AddSubRegExtendSmall:
4099 case Match_AddSubRegExtendLarge:
4100 case Match_AddSubSecondSource:
4101 case Match_LogicalSecondSource:
4102 case Match_AddSubRegShift32:
4103 case Match_AddSubRegShift64:
4104 case Match_InvalidMovImm32Shift:
4105 case Match_InvalidMovImm64Shift:
4106 case Match_InvalidFPImm:
4107 case Match_InvalidMemoryWExtend8:
4108 case Match_InvalidMemoryWExtend16:
4109 case Match_InvalidMemoryWExtend32:
4110 case Match_InvalidMemoryWExtend64:
4111 case Match_InvalidMemoryWExtend128:
4112 case Match_InvalidMemoryXExtend8:
4113 case Match_InvalidMemoryXExtend16:
4114 case Match_InvalidMemoryXExtend32:
4115 case Match_InvalidMemoryXExtend64:
4116 case Match_InvalidMemoryXExtend128:
4117 case Match_InvalidMemoryIndexed4SImm7:
4118 case Match_InvalidMemoryIndexed8SImm7:
4119 case Match_InvalidMemoryIndexed16SImm7:
4120 case Match_InvalidMemoryIndexedSImm9:
4121 case Match_InvalidImm0_1:
4122 case Match_InvalidImm0_7:
4123 case Match_InvalidImm0_15:
4124 case Match_InvalidImm0_31:
4125 case Match_InvalidImm0_63:
4126 case Match_InvalidImm0_127:
4127 case Match_InvalidImm0_65535:
4128 case Match_InvalidImm1_8:
4129 case Match_InvalidImm1_16:
4130 case Match_InvalidImm1_32:
4131 case Match_InvalidImm1_64:
4132 case Match_InvalidIndex1:
4133 case Match_InvalidIndexB:
4134 case Match_InvalidIndexH:
4135 case Match_InvalidIndexS:
4136 case Match_InvalidIndexD:
4137 case Match_InvalidLabel:
4140 if (ErrorInfo >= Operands.size())
4141 return Error(IDLoc, "too few operands for instruction");
4142 // Any time we get here, there's nothing fancy to do. Just get the
4143 // operand SMLoc and display the diagnostic.
4144 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4145 if (ErrorLoc == SMLoc())
4147 return showMatchError(ErrorLoc, MatchResult);
4151 llvm_unreachable("Implement any new match types added!");
4154 /// ParseDirective parses the arm specific directives
4155 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4156 const MCObjectFileInfo::Environment Format =
4157 getContext().getObjectFileInfo()->getObjectFileType();
4158 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4159 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4161 StringRef IDVal = DirectiveID.getIdentifier();
4162 SMLoc Loc = DirectiveID.getLoc();
4163 if (IDVal == ".arch")
4164 return parseDirectiveArch(Loc);
4165 if (IDVal == ".cpu")
4166 return parseDirectiveCPU(Loc);
4167 if (IDVal == ".hword")
4168 return parseDirectiveWord(2, Loc);
4169 if (IDVal == ".word")
4170 return parseDirectiveWord(4, Loc);
4171 if (IDVal == ".xword")
4172 return parseDirectiveWord(8, Loc);
4173 if (IDVal == ".tlsdesccall")
4174 return parseDirectiveTLSDescCall(Loc);
4175 if (IDVal == ".ltorg" || IDVal == ".pool")
4176 return parseDirectiveLtorg(Loc);
4177 if (IDVal == ".unreq")
4178 return parseDirectiveUnreq(Loc);
4180 if (!IsMachO && !IsCOFF) {
4181 if (IDVal == ".inst")
4182 return parseDirectiveInst(Loc);
4185 return parseDirectiveLOH(IDVal, Loc);
4188 static const struct {
4190 const FeatureBitset Features;
4191 } ExtensionMap[] = {
4192 { "crc", {AArch64::FeatureCRC} },
4193 { "crypto", {AArch64::FeatureCrypto} },
4194 { "fp", {AArch64::FeatureFPARMv8} },
4195 { "simd", {AArch64::FeatureNEON} },
4197 // FIXME: Unsupported extensions
4205 /// parseDirectiveArch
4207 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4208 SMLoc ArchLoc = getLoc();
4210 StringRef Arch, ExtensionString;
4211 std::tie(Arch, ExtensionString) =
4212 getParser().parseStringToEndOfStatement().trim().split('+');
4214 unsigned ID = AArch64::parseArch(Arch);
4215 if (ID == ARM::AK_INVALID) {
4216 Error(ArchLoc, "unknown arch name");
4220 MCSubtargetInfo &STI = copySTI();
4221 STI.setDefaultFeatures("", "");
4222 if (!ExtensionString.empty())
4223 STI.setDefaultFeatures("", ("+" + ExtensionString).str());
4224 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
4229 /// parseDirectiveCPU
4231 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4232 SMLoc CPULoc = getLoc();
4234 StringRef CPU, ExtensionString;
4235 std::tie(CPU, ExtensionString) =
4236 getParser().parseStringToEndOfStatement().trim().split('+');
4238 SmallVector<StringRef, 4> RequestedExtensions;
4239 if (!ExtensionString.empty())
4240 ExtensionString.split(RequestedExtensions, '+');
4242 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4243 // once that is tablegen'ed
4244 if (!getSTI().isCPUStringValid(CPU)) {
4245 Error(CPULoc, "unknown CPU name");
4249 MCSubtargetInfo &STI = copySTI();
4250 STI.setDefaultFeatures(CPU, "");
4252 FeatureBitset Features = STI.getFeatureBits();
4253 for (auto Name : RequestedExtensions) {
4254 bool EnableFeature = true;
4256 if (Name.startswith_lower("no")) {
4257 EnableFeature = false;
4258 Name = Name.substr(2);
4261 for (const auto &Extension : ExtensionMap) {
4262 if (Extension.Name != Name)
4265 if (Extension.Features.none())
4266 report_fatal_error("unsupported architectural extension: " + Name);
4268 FeatureBitset ToggleFeatures = EnableFeature
4269 ? (~Features & Extension.Features)
4270 : ( Features & Extension.Features);
4272 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4273 setAvailableFeatures(Features);
4281 /// parseDirectiveWord
4282 /// ::= .word [ expression (, expression)* ]
4283 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4284 MCAsmParser &Parser = getParser();
4285 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4287 const MCExpr *Value;
4288 if (getParser().parseExpression(Value))
4291 getParser().getStreamer().EmitValue(Value, Size, L);
4293 if (getLexer().is(AsmToken::EndOfStatement))
4296 // FIXME: Improve diagnostic.
4297 if (getLexer().isNot(AsmToken::Comma))
4298 return Error(L, "unexpected token in directive");
4307 /// parseDirectiveInst
4308 /// ::= .inst opcode [, ...]
4309 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4310 MCAsmParser &Parser = getParser();
4311 if (getLexer().is(AsmToken::EndOfStatement)) {
4312 Parser.eatToEndOfStatement();
4313 Error(Loc, "expected expression following directive");
4320 if (getParser().parseExpression(Expr)) {
4321 Error(Loc, "expected expression");
4325 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4327 Error(Loc, "expected constant expression");
4331 getTargetStreamer().emitInst(Value->getValue());
4333 if (getLexer().is(AsmToken::EndOfStatement))
4336 if (getLexer().isNot(AsmToken::Comma)) {
4337 Error(Loc, "unexpected token in directive");
4341 Parser.Lex(); // Eat comma.
4348 // parseDirectiveTLSDescCall:
4349 // ::= .tlsdesccall symbol
4350 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4352 if (getParser().parseIdentifier(Name))
4353 return Error(L, "expected symbol after directive");
4355 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4356 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4357 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4360 Inst.setOpcode(AArch64::TLSDESCCALL);
4361 Inst.addOperand(MCOperand::createExpr(Expr));
4363 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4367 /// ::= .loh <lohName | lohId> label1, ..., labelN
4368 /// The number of arguments depends on the loh identifier.
4369 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4370 if (IDVal != MCLOHDirectiveName())
4373 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4374 if (getParser().getTok().isNot(AsmToken::Integer))
4375 return TokError("expected an identifier or a number in directive");
4376 // We successfully get a numeric value for the identifier.
4377 // Check if it is valid.
4378 int64_t Id = getParser().getTok().getIntVal();
4379 if (Id <= -1U && !isValidMCLOHType(Id))
4380 return TokError("invalid numeric identifier in directive");
4381 Kind = (MCLOHType)Id;
4383 StringRef Name = getTok().getIdentifier();
4384 // We successfully parse an identifier.
4385 // Check if it is a recognized one.
4386 int Id = MCLOHNameToId(Name);
4389 return TokError("invalid identifier in directive");
4390 Kind = (MCLOHType)Id;
4392 // Consume the identifier.
4394 // Get the number of arguments of this LOH.
4395 int NbArgs = MCLOHIdToNbArgs(Kind);
4397 assert(NbArgs != -1 && "Invalid number of arguments");
4399 SmallVector<MCSymbol *, 3> Args;
4400 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4402 if (getParser().parseIdentifier(Name))
4403 return TokError("expected identifier in directive");
4404 Args.push_back(getContext().getOrCreateSymbol(Name));
4406 if (Idx + 1 == NbArgs)
4408 if (getLexer().isNot(AsmToken::Comma))
4409 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4412 if (getLexer().isNot(AsmToken::EndOfStatement))
4413 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4415 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4419 /// parseDirectiveLtorg
4420 /// ::= .ltorg | .pool
4421 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4422 getTargetStreamer().emitCurrentConstantPool();
4426 /// parseDirectiveReq
4427 /// ::= name .req registername
4428 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4429 MCAsmParser &Parser = getParser();
4430 Parser.Lex(); // Eat the '.req' token.
4431 SMLoc SRegLoc = getLoc();
4432 unsigned RegNum = tryParseRegister();
4433 bool IsVector = false;
4435 if (RegNum == static_cast<unsigned>(-1)) {
4437 RegNum = tryMatchVectorRegister(Kind, false);
4438 if (!Kind.empty()) {
4439 Error(SRegLoc, "vector register without type specifier expected");
4445 if (RegNum == static_cast<unsigned>(-1)) {
4446 Parser.eatToEndOfStatement();
4447 Error(SRegLoc, "register name or alias expected");
4451 // Shouldn't be anything else.
4452 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4453 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4454 Parser.eatToEndOfStatement();
4458 Parser.Lex(); // Consume the EndOfStatement
4460 auto pair = std::make_pair(IsVector, RegNum);
4461 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4462 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4467 /// parseDirectiveUneq
4468 /// ::= .unreq registername
4469 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4470 MCAsmParser &Parser = getParser();
4471 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4472 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4473 Parser.eatToEndOfStatement();
4476 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4477 Parser.Lex(); // Eat the identifier.
4482 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4483 AArch64MCExpr::VariantKind &ELFRefKind,
4484 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4486 ELFRefKind = AArch64MCExpr::VK_INVALID;
4487 DarwinRefKind = MCSymbolRefExpr::VK_None;
4490 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4491 ELFRefKind = AE->getKind();
4492 Expr = AE->getSubExpr();
4495 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4497 // It's a simple symbol reference with no addend.
4498 DarwinRefKind = SE->getKind();
4502 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4506 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4509 DarwinRefKind = SE->getKind();
4511 if (BE->getOpcode() != MCBinaryExpr::Add &&
4512 BE->getOpcode() != MCBinaryExpr::Sub)
4515 // See if the addend is is a constant, otherwise there's more going
4516 // on here than we can deal with.
4517 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4521 Addend = AddendExpr->getValue();
4522 if (BE->getOpcode() == MCBinaryExpr::Sub)
4525 // It's some symbol reference + a constant addend, but really
4526 // shouldn't use both Darwin and ELF syntax.
4527 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4528 DarwinRefKind == MCSymbolRefExpr::VK_None;
4531 /// Force static initialization.
4532 extern "C" void LLVMInitializeAArch64AsmParser() {
4533 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4534 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4535 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4538 #define GET_REGISTER_MATCHER
4539 #define GET_SUBTARGET_FEATURE_NAME
4540 #define GET_MATCHER_IMPLEMENTATION
4541 #include "AArch64GenAsmMatcher.inc"
4543 // Define this matcher function after the auto-generated include so we
4544 // have the match class enum definitions.
4545 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4547 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4548 // If the kind is a token for a literal immediate, check if our asm
4549 // operand matches. This is for InstAliases which have a fixed-value
4550 // immediate in the syntax.
4551 int64_t ExpectedVal;
4554 return Match_InvalidOperand;
4596 return Match_InvalidOperand;
4597 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4599 return Match_InvalidOperand;
4600 if (CE->getValue() == ExpectedVal)
4601 return Match_Success;
4602 return Match_InvalidOperand;
4606 AArch64AsmParser::OperandMatchResultTy
4607 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4611 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4612 Error(S, "expected register");
4613 return MatchOperand_ParseFail;
4616 int FirstReg = tryParseRegister();
4617 if (FirstReg == -1) {
4618 return MatchOperand_ParseFail;
4620 const MCRegisterClass &WRegClass =
4621 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4622 const MCRegisterClass &XRegClass =
4623 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4625 bool isXReg = XRegClass.contains(FirstReg),
4626 isWReg = WRegClass.contains(FirstReg);
4627 if (!isXReg && !isWReg) {
4628 Error(S, "expected first even register of a "
4629 "consecutive same-size even/odd register pair");
4630 return MatchOperand_ParseFail;
4633 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4634 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4636 if (FirstEncoding & 0x1) {
4637 Error(S, "expected first even register of a "
4638 "consecutive same-size even/odd register pair");
4639 return MatchOperand_ParseFail;
4643 if (getParser().getTok().isNot(AsmToken::Comma)) {
4644 Error(M, "expected comma");
4645 return MatchOperand_ParseFail;
4651 int SecondReg = tryParseRegister();
4652 if (SecondReg ==-1) {
4653 return MatchOperand_ParseFail;
4656 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4657 (isXReg && !XRegClass.contains(SecondReg)) ||
4658 (isWReg && !WRegClass.contains(SecondReg))) {
4659 Error(E,"expected second odd register of a "
4660 "consecutive same-size even/odd register pair");
4661 return MatchOperand_ParseFail;
4666 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4667 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4669 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4670 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4673 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4676 return MatchOperand_Success;