1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64MCTargetDesc.h"
13 #include "MCTargetDesc/AArch64TargetStreamer.h"
14 #include "Utils/AArch64BaseInfo.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/StringSwitch.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCInst.h"
28 #include "llvm/MC/MCLinkerOptimizationHint.h"
29 #include "llvm/MC/MCObjectFileInfo.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
33 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCStreamer.h"
37 #include "llvm/MC/MCSubtargetInfo.h"
38 #include "llvm/MC/MCSymbol.h"
39 #include "llvm/MC/MCTargetOptions.h"
40 #include "llvm/MC/SubtargetFeature.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/Compiler.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/SMLoc.h"
46 #include "llvm/Support/TargetParser.h"
47 #include "llvm/Support/TargetRegistry.h"
48 #include "llvm/Support/raw_ostream.h"
62 class AArch64AsmParser : public MCTargetAsmParser {
64 StringRef Mnemonic; ///< Instruction mnemonic.
66 // Map of register aliases registers via the .req directive.
67 StringMap<std::pair<bool, unsigned>> RegisterReqs;
69 AArch64TargetStreamer &getTargetStreamer() {
70 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
71 return static_cast<AArch64TargetStreamer &>(TS);
74 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
76 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
77 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
78 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
79 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
80 int tryParseRegister();
81 int tryMatchVectorRegister(StringRef &Kind, bool expected);
82 bool parseRegister(OperandVector &Operands);
83 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
84 bool parseVectorList(OperandVector &Operands);
85 bool parseOperand(OperandVector &Operands, bool isCondCode,
88 bool showMatchError(SMLoc Loc, unsigned ErrCode);
90 bool parseDirectiveArch(SMLoc L);
91 bool parseDirectiveCPU(SMLoc L);
92 bool parseDirectiveWord(unsigned Size, SMLoc L);
93 bool parseDirectiveInst(SMLoc L);
95 bool parseDirectiveTLSDescCall(SMLoc L);
97 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
98 bool parseDirectiveLtorg(SMLoc L);
100 bool parseDirectiveReq(StringRef Name, SMLoc L);
101 bool parseDirectiveUnreq(SMLoc L);
103 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
104 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
105 OperandVector &Operands, MCStreamer &Out,
107 bool MatchingInlineAsm) override;
108 /// @name Auto-generated Match Functions
111 #define GET_ASSEMBLER_HEADER
112 #include "AArch64GenAsmMatcher.inc"
116 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
117 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
118 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
119 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
120 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
121 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
122 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
123 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
124 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
125 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
126 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
127 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
128 bool tryParseVectorRegister(OperandVector &Operands);
129 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
132 enum AArch64MatchResultTy {
133 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
134 #define GET_OPERAND_DIAGNOSTIC_TYPES
135 #include "AArch64GenAsmMatcher.inc"
139 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
140 const MCInstrInfo &MII, const MCTargetOptions &Options)
141 : MCTargetAsmParser(Options, STI) {
142 IsILP32 = Options.getABIName() == "ilp32";
143 MCAsmParserExtension::Initialize(Parser);
144 MCStreamer &S = getParser().getStreamer();
145 if (S.getTargetStreamer() == nullptr)
146 new AArch64TargetStreamer(S);
148 // Initialize the set of available features.
149 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
152 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
153 SMLoc NameLoc, OperandVector &Operands) override;
154 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
155 bool ParseDirective(AsmToken DirectiveID) override;
156 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
157 unsigned Kind) override;
159 static bool classifySymbolRef(const MCExpr *Expr,
160 AArch64MCExpr::VariantKind &ELFRefKind,
161 MCSymbolRefExpr::VariantKind &DarwinRefKind,
165 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
167 class AArch64Operand : public MCParsedAsmOperand {
186 SMLoc StartLoc, EndLoc;
191 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
199 struct VectorListOp {
202 unsigned NumElements;
203 unsigned ElementKind;
206 struct VectorIndexOp {
214 struct ShiftedImmOp {
216 unsigned ShiftAmount;
220 AArch64CC::CondCode Code;
224 unsigned Val; // Encoded 8-bit representation.
230 unsigned Val; // Not the enum since not all values have names.
238 uint32_t PStateField;
257 struct ShiftExtendOp {
258 AArch64_AM::ShiftExtendType Type;
260 bool HasExplicitAmount;
270 struct VectorListOp VectorList;
271 struct VectorIndexOp VectorIndex;
273 struct ShiftedImmOp ShiftedImm;
274 struct CondCodeOp CondCode;
275 struct FPImmOp FPImm;
276 struct BarrierOp Barrier;
277 struct SysRegOp SysReg;
278 struct SysCRImmOp SysCRImm;
279 struct PrefetchOp Prefetch;
280 struct PSBHintOp PSBHint;
281 struct ShiftExtendOp ShiftExtend;
284 // Keep the MCContext around as the MCExprs may need manipulated during
285 // the add<>Operands() calls.
289 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
291 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
293 StartLoc = o.StartLoc;
303 ShiftedImm = o.ShiftedImm;
306 CondCode = o.CondCode;
318 VectorList = o.VectorList;
321 VectorIndex = o.VectorIndex;
327 SysCRImm = o.SysCRImm;
330 Prefetch = o.Prefetch;
336 ShiftExtend = o.ShiftExtend;
341 /// getStartLoc - Get the location of the first token of this operand.
342 SMLoc getStartLoc() const override { return StartLoc; }
343 /// getEndLoc - Get the location of the last token of this operand.
344 SMLoc getEndLoc() const override { return EndLoc; }
346 StringRef getToken() const {
347 assert(Kind == k_Token && "Invalid access!");
348 return StringRef(Tok.Data, Tok.Length);
351 bool isTokenSuffix() const {
352 assert(Kind == k_Token && "Invalid access!");
356 const MCExpr *getImm() const {
357 assert(Kind == k_Immediate && "Invalid access!");
361 const MCExpr *getShiftedImmVal() const {
362 assert(Kind == k_ShiftedImm && "Invalid access!");
363 return ShiftedImm.Val;
366 unsigned getShiftedImmShift() const {
367 assert(Kind == k_ShiftedImm && "Invalid access!");
368 return ShiftedImm.ShiftAmount;
371 AArch64CC::CondCode getCondCode() const {
372 assert(Kind == k_CondCode && "Invalid access!");
373 return CondCode.Code;
376 unsigned getFPImm() const {
377 assert(Kind == k_FPImm && "Invalid access!");
381 unsigned getBarrier() const {
382 assert(Kind == k_Barrier && "Invalid access!");
386 StringRef getBarrierName() const {
387 assert(Kind == k_Barrier && "Invalid access!");
388 return StringRef(Barrier.Data, Barrier.Length);
391 unsigned getReg() const override {
392 assert(Kind == k_Register && "Invalid access!");
396 unsigned getVectorListStart() const {
397 assert(Kind == k_VectorList && "Invalid access!");
398 return VectorList.RegNum;
401 unsigned getVectorListCount() const {
402 assert(Kind == k_VectorList && "Invalid access!");
403 return VectorList.Count;
406 unsigned getVectorIndex() const {
407 assert(Kind == k_VectorIndex && "Invalid access!");
408 return VectorIndex.Val;
411 StringRef getSysReg() const {
412 assert(Kind == k_SysReg && "Invalid access!");
413 return StringRef(SysReg.Data, SysReg.Length);
416 unsigned getSysCR() const {
417 assert(Kind == k_SysCR && "Invalid access!");
421 unsigned getPrefetch() const {
422 assert(Kind == k_Prefetch && "Invalid access!");
426 unsigned getPSBHint() const {
427 assert(Kind == k_PSBHint && "Invalid access!");
431 StringRef getPSBHintName() const {
432 assert(Kind == k_PSBHint && "Invalid access!");
433 return StringRef(PSBHint.Data, PSBHint.Length);
436 StringRef getPrefetchName() const {
437 assert(Kind == k_Prefetch && "Invalid access!");
438 return StringRef(Prefetch.Data, Prefetch.Length);
441 AArch64_AM::ShiftExtendType getShiftExtendType() const {
442 assert(Kind == k_ShiftExtend && "Invalid access!");
443 return ShiftExtend.Type;
446 unsigned getShiftExtendAmount() const {
447 assert(Kind == k_ShiftExtend && "Invalid access!");
448 return ShiftExtend.Amount;
451 bool hasShiftExtendAmount() const {
452 assert(Kind == k_ShiftExtend && "Invalid access!");
453 return ShiftExtend.HasExplicitAmount;
456 bool isImm() const override { return Kind == k_Immediate; }
457 bool isMem() const override { return false; }
458 bool isSImm9() const {
461 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
464 int64_t Val = MCE->getValue();
465 return (Val >= -256 && Val < 256);
467 bool isSImm7s4() const {
470 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
473 int64_t Val = MCE->getValue();
474 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
476 bool isSImm7s8() const {
479 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
482 int64_t Val = MCE->getValue();
483 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
485 bool isSImm7s16() const {
488 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
491 int64_t Val = MCE->getValue();
492 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
495 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
496 AArch64MCExpr::VariantKind ELFRefKind;
497 MCSymbolRefExpr::VariantKind DarwinRefKind;
499 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
501 // If we don't understand the expression, assume the best and
502 // let the fixup and relocation code deal with it.
506 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
507 ELFRefKind == AArch64MCExpr::VK_LO12 ||
508 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
509 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
510 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
511 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
512 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
513 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
514 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
515 // Note that we don't range-check the addend. It's adjusted modulo page
516 // size when converted, so there is no "out of range" condition when using
518 return Addend >= 0 && (Addend % Scale) == 0;
519 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
520 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
521 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
528 template <int Scale> bool isUImm12Offset() const {
532 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
534 return isSymbolicUImm12Offset(getImm(), Scale);
536 int64_t Val = MCE->getValue();
537 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
540 bool isImm0_1() const {
543 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
546 int64_t Val = MCE->getValue();
547 return (Val >= 0 && Val < 2);
550 bool isImm0_7() const {
553 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
556 int64_t Val = MCE->getValue();
557 return (Val >= 0 && Val < 8);
560 bool isImm1_8() const {
563 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
566 int64_t Val = MCE->getValue();
567 return (Val > 0 && Val < 9);
570 bool isImm0_15() const {
573 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
576 int64_t Val = MCE->getValue();
577 return (Val >= 0 && Val < 16);
580 bool isImm1_16() const {
583 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
586 int64_t Val = MCE->getValue();
587 return (Val > 0 && Val < 17);
590 bool isImm0_31() const {
593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
596 int64_t Val = MCE->getValue();
597 return (Val >= 0 && Val < 32);
600 bool isImm1_31() const {
603 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
606 int64_t Val = MCE->getValue();
607 return (Val >= 1 && Val < 32);
610 bool isImm1_32() const {
613 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
616 int64_t Val = MCE->getValue();
617 return (Val >= 1 && Val < 33);
620 bool isImm0_63() const {
623 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
626 int64_t Val = MCE->getValue();
627 return (Val >= 0 && Val < 64);
630 bool isImm1_63() const {
633 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
636 int64_t Val = MCE->getValue();
637 return (Val >= 1 && Val < 64);
640 bool isImm1_64() const {
643 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
646 int64_t Val = MCE->getValue();
647 return (Val >= 1 && Val < 65);
650 bool isImm0_127() const {
653 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
656 int64_t Val = MCE->getValue();
657 return (Val >= 0 && Val < 128);
660 bool isImm0_255() const {
663 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
666 int64_t Val = MCE->getValue();
667 return (Val >= 0 && Val < 256);
670 bool isImm0_65535() const {
673 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
676 int64_t Val = MCE->getValue();
677 return (Val >= 0 && Val < 65536);
680 bool isImm32_63() const {
683 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
686 int64_t Val = MCE->getValue();
687 return (Val >= 32 && Val < 64);
690 bool isLogicalImm32() const {
693 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
696 int64_t Val = MCE->getValue();
697 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
700 return AArch64_AM::isLogicalImmediate(Val, 32);
703 bool isLogicalImm64() const {
706 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
709 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
712 bool isLogicalImm32Not() const {
715 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
718 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
719 return AArch64_AM::isLogicalImmediate(Val, 32);
722 bool isLogicalImm64Not() const {
725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
728 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
731 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
733 bool isAddSubImm() const {
734 if (!isShiftedImm() && !isImm())
739 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
740 if (isShiftedImm()) {
741 unsigned Shift = ShiftedImm.ShiftAmount;
742 Expr = ShiftedImm.Val;
743 if (Shift != 0 && Shift != 12)
749 AArch64MCExpr::VariantKind ELFRefKind;
750 MCSymbolRefExpr::VariantKind DarwinRefKind;
752 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
753 DarwinRefKind, Addend)) {
754 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
755 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
756 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
757 || ELFRefKind == AArch64MCExpr::VK_LO12
758 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
759 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
760 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
761 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
762 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
763 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
764 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
767 // If it's a constant, it should be a real immediate in range:
768 if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
769 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
771 // If it's an expression, we hope for the best and let the fixup/relocation
772 // code deal with it.
776 bool isAddSubImmNeg() const {
777 if (!isShiftedImm() && !isImm())
782 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
783 if (isShiftedImm()) {
784 unsigned Shift = ShiftedImm.ShiftAmount;
785 Expr = ShiftedImm.Val;
786 if (Shift != 0 && Shift != 12)
791 // Otherwise it should be a real negative immediate in range:
792 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
793 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
796 bool isCondCode() const { return Kind == k_CondCode; }
798 bool isSIMDImmType10() const {
801 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
804 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
807 bool isBranchTarget26() const {
810 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
813 int64_t Val = MCE->getValue();
816 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
819 bool isPCRelLabel19() const {
822 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
825 int64_t Val = MCE->getValue();
828 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
831 bool isBranchTarget14() const {
834 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
837 int64_t Val = MCE->getValue();
840 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
844 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
848 AArch64MCExpr::VariantKind ELFRefKind;
849 MCSymbolRefExpr::VariantKind DarwinRefKind;
851 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
852 DarwinRefKind, Addend)) {
855 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
858 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
859 if (ELFRefKind == AllowedModifiers[i])
866 bool isMovZSymbolG3() const {
867 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
870 bool isMovZSymbolG2() const {
871 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
872 AArch64MCExpr::VK_TPREL_G2,
873 AArch64MCExpr::VK_DTPREL_G2});
876 bool isMovZSymbolG1() const {
877 return isMovWSymbol({
878 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
879 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
880 AArch64MCExpr::VK_DTPREL_G1,
884 bool isMovZSymbolG0() const {
885 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
886 AArch64MCExpr::VK_TPREL_G0,
887 AArch64MCExpr::VK_DTPREL_G0});
890 bool isMovKSymbolG3() const {
891 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
894 bool isMovKSymbolG2() const {
895 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
898 bool isMovKSymbolG1() const {
899 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
900 AArch64MCExpr::VK_TPREL_G1_NC,
901 AArch64MCExpr::VK_DTPREL_G1_NC});
904 bool isMovKSymbolG0() const {
906 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
907 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
910 template<int RegWidth, int Shift>
911 bool isMOVZMovAlias() const {
912 if (!isImm()) return false;
914 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
915 if (!CE) return false;
916 uint64_t Value = CE->getValue();
918 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
921 template<int RegWidth, int Shift>
922 bool isMOVNMovAlias() const {
923 if (!isImm()) return false;
925 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
926 if (!CE) return false;
927 uint64_t Value = CE->getValue();
929 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
932 bool isFPImm() const { return Kind == k_FPImm; }
933 bool isBarrier() const { return Kind == k_Barrier; }
934 bool isSysReg() const { return Kind == k_SysReg; }
936 bool isMRSSystemRegister() const {
937 if (!isSysReg()) return false;
939 return SysReg.MRSReg != -1U;
942 bool isMSRSystemRegister() const {
943 if (!isSysReg()) return false;
944 return SysReg.MSRReg != -1U;
947 bool isSystemPStateFieldWithImm0_1() const {
948 if (!isSysReg()) return false;
949 return (SysReg.PStateField == AArch64PState::PAN ||
950 SysReg.PStateField == AArch64PState::UAO);
953 bool isSystemPStateFieldWithImm0_15() const {
954 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
955 return SysReg.PStateField != -1U;
958 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
959 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
961 bool isVectorRegLo() const {
962 return Kind == k_Register && Reg.isVector &&
963 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
967 bool isGPR32as64() const {
968 return Kind == k_Register && !Reg.isVector &&
969 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
972 bool isWSeqPair() const {
973 return Kind == k_Register && !Reg.isVector &&
974 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
978 bool isXSeqPair() const {
979 return Kind == k_Register && !Reg.isVector &&
980 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
984 bool isGPR64sp0() const {
985 return Kind == k_Register && !Reg.isVector &&
986 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
989 /// Is this a vector list with the type implicit (presumably attached to the
990 /// instruction itself)?
991 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
992 return Kind == k_VectorList && VectorList.Count == NumRegs &&
993 !VectorList.ElementKind;
996 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
997 bool isTypedVectorList() const {
998 if (Kind != k_VectorList)
1000 if (VectorList.Count != NumRegs)
1002 if (VectorList.ElementKind != ElementKind)
1004 return VectorList.NumElements == NumElements;
1007 bool isVectorIndex1() const {
1008 return Kind == k_VectorIndex && VectorIndex.Val == 1;
1011 bool isVectorIndexB() const {
1012 return Kind == k_VectorIndex && VectorIndex.Val < 16;
1015 bool isVectorIndexH() const {
1016 return Kind == k_VectorIndex && VectorIndex.Val < 8;
1019 bool isVectorIndexS() const {
1020 return Kind == k_VectorIndex && VectorIndex.Val < 4;
1023 bool isVectorIndexD() const {
1024 return Kind == k_VectorIndex && VectorIndex.Val < 2;
1027 bool isToken() const override { return Kind == k_Token; }
1029 bool isTokenEqual(StringRef Str) const {
1030 return Kind == k_Token && getToken() == Str;
1032 bool isSysCR() const { return Kind == k_SysCR; }
1033 bool isPrefetch() const { return Kind == k_Prefetch; }
1034 bool isPSBHint() const { return Kind == k_PSBHint; }
1035 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1036 bool isShifter() const {
1037 if (!isShiftExtend())
1040 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1041 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1042 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1043 ST == AArch64_AM::MSL);
1045 bool isExtend() const {
1046 if (!isShiftExtend())
1049 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1050 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1051 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1052 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1053 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1054 ET == AArch64_AM::LSL) &&
1055 getShiftExtendAmount() <= 4;
1058 bool isExtend64() const {
1061 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1062 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1063 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1066 bool isExtendLSL64() const {
1069 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1070 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1071 ET == AArch64_AM::LSL) &&
1072 getShiftExtendAmount() <= 4;
1075 template<int Width> bool isMemXExtend() const {
1078 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1079 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1080 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1081 getShiftExtendAmount() == 0);
1084 template<int Width> bool isMemWExtend() const {
1087 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1088 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1089 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1090 getShiftExtendAmount() == 0);
1093 template <unsigned width>
1094 bool isArithmeticShifter() const {
1098 // An arithmetic shifter is LSL, LSR, or ASR.
1099 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1100 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1101 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1104 template <unsigned width>
1105 bool isLogicalShifter() const {
1109 // A logical shifter is LSL, LSR, ASR or ROR.
1110 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1111 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1112 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1113 getShiftExtendAmount() < width;
1116 bool isMovImm32Shifter() const {
1120 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1121 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1122 if (ST != AArch64_AM::LSL)
1124 uint64_t Val = getShiftExtendAmount();
1125 return (Val == 0 || Val == 16);
1128 bool isMovImm64Shifter() const {
1132 // A MOVi shifter is LSL of 0 or 16.
1133 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1134 if (ST != AArch64_AM::LSL)
1136 uint64_t Val = getShiftExtendAmount();
1137 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1140 bool isLogicalVecShifter() const {
1144 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1145 unsigned Shift = getShiftExtendAmount();
1146 return getShiftExtendType() == AArch64_AM::LSL &&
1147 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1150 bool isLogicalVecHalfWordShifter() const {
1151 if (!isLogicalVecShifter())
1154 // A logical vector shifter is a left shift by 0 or 8.
1155 unsigned Shift = getShiftExtendAmount();
1156 return getShiftExtendType() == AArch64_AM::LSL &&
1157 (Shift == 0 || Shift == 8);
1160 bool isMoveVecShifter() const {
1161 if (!isShiftExtend())
1164 // A logical vector shifter is a left shift by 8 or 16.
1165 unsigned Shift = getShiftExtendAmount();
1166 return getShiftExtendType() == AArch64_AM::MSL &&
1167 (Shift == 8 || Shift == 16);
1170 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1171 // to LDUR/STUR when the offset is not legal for the former but is for
1172 // the latter. As such, in addition to checking for being a legal unscaled
1173 // address, also check that it is not a legal scaled address. This avoids
1174 // ambiguity in the matcher.
1176 bool isSImm9OffsetFB() const {
1177 return isSImm9() && !isUImm12Offset<Width / 8>();
1180 bool isAdrpLabel() const {
1181 // Validation was handled during parsing, so we just sanity check that
1182 // something didn't go haywire.
1186 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1187 int64_t Val = CE->getValue();
1188 int64_t Min = - (4096 * (1LL << (21 - 1)));
1189 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1190 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1196 bool isAdrLabel() const {
1197 // Validation was handled during parsing, so we just sanity check that
1198 // something didn't go haywire.
1202 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1203 int64_t Val = CE->getValue();
1204 int64_t Min = - (1LL << (21 - 1));
1205 int64_t Max = ((1LL << (21 - 1)) - 1);
1206 return Val >= Min && Val <= Max;
1212 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1213 // Add as immediates when possible. Null MCExpr = 0.
1215 Inst.addOperand(MCOperand::createImm(0));
1216 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1217 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1219 Inst.addOperand(MCOperand::createExpr(Expr));
1222 void addRegOperands(MCInst &Inst, unsigned N) const {
1223 assert(N == 1 && "Invalid number of operands!");
1224 Inst.addOperand(MCOperand::createReg(getReg()));
1227 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1228 assert(N == 1 && "Invalid number of operands!");
1230 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1232 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1233 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1234 RI->getEncodingValue(getReg()));
1236 Inst.addOperand(MCOperand::createReg(Reg));
1239 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1240 assert(N == 1 && "Invalid number of operands!");
1242 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1243 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1246 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1249 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1250 Inst.addOperand(MCOperand::createReg(getReg()));
1253 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1254 assert(N == 1 && "Invalid number of operands!");
1255 Inst.addOperand(MCOperand::createReg(getReg()));
1258 template <unsigned NumRegs>
1259 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1260 assert(N == 1 && "Invalid number of operands!");
1261 static const unsigned FirstRegs[] = { AArch64::D0,
1264 AArch64::D0_D1_D2_D3 };
1265 unsigned FirstReg = FirstRegs[NumRegs - 1];
1268 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1271 template <unsigned NumRegs>
1272 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!");
1274 static const unsigned FirstRegs[] = { AArch64::Q0,
1277 AArch64::Q0_Q1_Q2_Q3 };
1278 unsigned FirstReg = FirstRegs[NumRegs - 1];
1281 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1284 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1285 assert(N == 1 && "Invalid number of operands!");
1286 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1289 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1294 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1295 assert(N == 1 && "Invalid number of operands!");
1296 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1299 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1300 assert(N == 1 && "Invalid number of operands!");
1301 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1304 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1305 assert(N == 1 && "Invalid number of operands!");
1306 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1309 void addImmOperands(MCInst &Inst, unsigned N) const {
1310 assert(N == 1 && "Invalid number of operands!");
1311 // If this is a pageoff symrefexpr with an addend, adjust the addend
1312 // to be only the page-offset portion. Otherwise, just add the expr
1314 addExpr(Inst, getImm());
1317 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1318 assert(N == 2 && "Invalid number of operands!");
1319 if (isShiftedImm()) {
1320 addExpr(Inst, getShiftedImmVal());
1321 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1323 addExpr(Inst, getImm());
1324 Inst.addOperand(MCOperand::createImm(0));
1328 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1329 assert(N == 2 && "Invalid number of operands!");
1331 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1332 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1333 int64_t Val = -CE->getValue();
1334 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1336 Inst.addOperand(MCOperand::createImm(Val));
1337 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1340 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
1342 Inst.addOperand(MCOperand::createImm(getCondCode()));
1345 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1346 assert(N == 1 && "Invalid number of operands!");
1347 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1349 addExpr(Inst, getImm());
1351 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1354 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1355 addImmOperands(Inst, N);
1359 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1360 assert(N == 1 && "Invalid number of operands!");
1361 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1364 Inst.addOperand(MCOperand::createExpr(getImm()));
1367 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1370 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1371 assert(N == 1 && "Invalid number of operands!");
1372 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1373 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1376 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1377 assert(N == 1 && "Invalid number of operands!");
1378 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1379 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1382 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1383 assert(N == 1 && "Invalid number of operands!");
1384 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1385 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1388 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1389 assert(N == 1 && "Invalid number of operands!");
1390 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1391 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1394 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1395 assert(N == 1 && "Invalid number of operands!");
1396 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1397 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1400 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1401 assert(N == 1 && "Invalid number of operands!");
1402 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1403 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1406 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1407 assert(N == 1 && "Invalid number of operands!");
1408 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1409 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1412 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1413 assert(N == 1 && "Invalid number of operands!");
1414 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1415 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1418 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1419 assert(N == 1 && "Invalid number of operands!");
1420 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1421 assert(MCE && "Invalid constant immediate operand!");
1422 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1425 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1426 assert(N == 1 && "Invalid number of operands!");
1427 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1428 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1431 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1432 assert(N == 1 && "Invalid number of operands!");
1433 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1434 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1437 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1438 assert(N == 1 && "Invalid number of operands!");
1439 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1440 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1443 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1444 assert(N == 1 && "Invalid number of operands!");
1445 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1446 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1449 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1450 assert(N == 1 && "Invalid number of operands!");
1451 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1452 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1455 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1456 assert(N == 1 && "Invalid number of operands!");
1457 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1458 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1461 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1462 assert(N == 1 && "Invalid number of operands!");
1463 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1464 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1467 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1468 assert(N == 1 && "Invalid number of operands!");
1469 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1470 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1473 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1474 assert(N == 1 && "Invalid number of operands!");
1475 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1476 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1479 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1480 assert(N == 1 && "Invalid number of operands!");
1481 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1482 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1485 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1486 assert(N == 1 && "Invalid number of operands!");
1487 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1489 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1490 Inst.addOperand(MCOperand::createImm(encoding));
1493 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1494 assert(N == 1 && "Invalid number of operands!");
1495 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1496 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1497 Inst.addOperand(MCOperand::createImm(encoding));
1500 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1501 assert(N == 1 && "Invalid number of operands!");
1502 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1503 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1504 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1505 Inst.addOperand(MCOperand::createImm(encoding));
1508 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1509 assert(N == 1 && "Invalid number of operands!");
1510 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1512 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1513 Inst.addOperand(MCOperand::createImm(encoding));
1516 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1517 assert(N == 1 && "Invalid number of operands!");
1518 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1519 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1520 Inst.addOperand(MCOperand::createImm(encoding));
1523 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1524 // Branch operands don't encode the low bits, so shift them off
1525 // here. If it's a label, however, just put it on directly as there's
1526 // not enough information now to do anything.
1527 assert(N == 1 && "Invalid number of operands!");
1528 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1530 addExpr(Inst, getImm());
1533 assert(MCE && "Invalid constant immediate operand!");
1534 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1537 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1538 // Branch operands don't encode the low bits, so shift them off
1539 // here. If it's a label, however, just put it on directly as there's
1540 // not enough information now to do anything.
1541 assert(N == 1 && "Invalid number of operands!");
1542 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1544 addExpr(Inst, getImm());
1547 assert(MCE && "Invalid constant immediate operand!");
1548 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1551 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1552 // Branch operands don't encode the low bits, so shift them off
1553 // here. If it's a label, however, just put it on directly as there's
1554 // not enough information now to do anything.
1555 assert(N == 1 && "Invalid number of operands!");
1556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1558 addExpr(Inst, getImm());
1561 assert(MCE && "Invalid constant immediate operand!");
1562 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1565 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1566 assert(N == 1 && "Invalid number of operands!");
1567 Inst.addOperand(MCOperand::createImm(getFPImm()));
1570 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1571 assert(N == 1 && "Invalid number of operands!");
1572 Inst.addOperand(MCOperand::createImm(getBarrier()));
1575 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1576 assert(N == 1 && "Invalid number of operands!");
1578 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1581 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1582 assert(N == 1 && "Invalid number of operands!");
1584 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1587 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1588 assert(N == 1 && "Invalid number of operands!");
1590 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1593 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1594 assert(N == 1 && "Invalid number of operands!");
1596 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1599 void addSysCROperands(MCInst &Inst, unsigned N) const {
1600 assert(N == 1 && "Invalid number of operands!");
1601 Inst.addOperand(MCOperand::createImm(getSysCR()));
1604 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1605 assert(N == 1 && "Invalid number of operands!");
1606 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1609 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1610 assert(N == 1 && "Invalid number of operands!");
1611 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1614 void addShifterOperands(MCInst &Inst, unsigned N) const {
1615 assert(N == 1 && "Invalid number of operands!");
1617 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1618 Inst.addOperand(MCOperand::createImm(Imm));
1621 void addExtendOperands(MCInst &Inst, unsigned N) const {
1622 assert(N == 1 && "Invalid number of operands!");
1623 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1624 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1625 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1626 Inst.addOperand(MCOperand::createImm(Imm));
1629 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1630 assert(N == 1 && "Invalid number of operands!");
1631 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1632 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1633 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1634 Inst.addOperand(MCOperand::createImm(Imm));
1637 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1638 assert(N == 2 && "Invalid number of operands!");
1639 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1640 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1641 Inst.addOperand(MCOperand::createImm(IsSigned));
1642 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1645 // For 8-bit load/store instructions with a register offset, both the
1646 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1647 // they're disambiguated by whether the shift was explicit or implicit rather
1649 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1650 assert(N == 2 && "Invalid number of operands!");
1651 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1652 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1653 Inst.addOperand(MCOperand::createImm(IsSigned));
1654 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1658 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1659 assert(N == 1 && "Invalid number of operands!");
1661 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1662 uint64_t Value = CE->getValue();
1663 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1667 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1668 assert(N == 1 && "Invalid number of operands!");
1670 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1671 uint64_t Value = CE->getValue();
1672 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1675 void print(raw_ostream &OS) const override;
1677 static std::unique_ptr<AArch64Operand>
1678 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1679 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1680 Op->Tok.Data = Str.data();
1681 Op->Tok.Length = Str.size();
1682 Op->Tok.IsSuffix = IsSuffix;
1688 static std::unique_ptr<AArch64Operand>
1689 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1690 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1691 Op->Reg.RegNum = RegNum;
1692 Op->Reg.isVector = isVector;
1698 static std::unique_ptr<AArch64Operand>
1699 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1700 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1701 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1702 Op->VectorList.RegNum = RegNum;
1703 Op->VectorList.Count = Count;
1704 Op->VectorList.NumElements = NumElements;
1705 Op->VectorList.ElementKind = ElementKind;
1711 static std::unique_ptr<AArch64Operand>
1712 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1713 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1714 Op->VectorIndex.Val = Idx;
1720 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1721 SMLoc E, MCContext &Ctx) {
1722 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1729 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1730 unsigned ShiftAmount,
1733 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1734 Op->ShiftedImm .Val = Val;
1735 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1741 static std::unique_ptr<AArch64Operand>
1742 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1743 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1744 Op->CondCode.Code = Code;
1750 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1752 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1753 Op->FPImm.Val = Val;
1759 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1763 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1764 Op->Barrier.Val = Val;
1765 Op->Barrier.Data = Str.data();
1766 Op->Barrier.Length = Str.size();
1772 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1775 uint32_t PStateField,
1777 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1778 Op->SysReg.Data = Str.data();
1779 Op->SysReg.Length = Str.size();
1780 Op->SysReg.MRSReg = MRSReg;
1781 Op->SysReg.MSRReg = MSRReg;
1782 Op->SysReg.PStateField = PStateField;
1788 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1789 SMLoc E, MCContext &Ctx) {
1790 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1791 Op->SysCRImm.Val = Val;
1797 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1801 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1802 Op->Prefetch.Val = Val;
1803 Op->Barrier.Data = Str.data();
1804 Op->Barrier.Length = Str.size();
1810 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1814 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1815 Op->PSBHint.Val = Val;
1816 Op->PSBHint.Data = Str.data();
1817 Op->PSBHint.Length = Str.size();
1823 static std::unique_ptr<AArch64Operand>
1824 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1825 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1826 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1827 Op->ShiftExtend.Type = ShOp;
1828 Op->ShiftExtend.Amount = Val;
1829 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1836 } // end anonymous namespace.
1838 void AArch64Operand::print(raw_ostream &OS) const {
1841 OS << "<fpimm " << getFPImm() << "("
1842 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1845 StringRef Name = getBarrierName();
1847 OS << "<barrier " << Name << ">";
1849 OS << "<barrier invalid #" << getBarrier() << ">";
1855 case k_ShiftedImm: {
1856 unsigned Shift = getShiftedImmShift();
1857 OS << "<shiftedimm ";
1858 OS << *getShiftedImmVal();
1859 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1863 OS << "<condcode " << getCondCode() << ">";
1866 OS << "<register " << getReg() << ">";
1868 case k_VectorList: {
1869 OS << "<vectorlist ";
1870 unsigned Reg = getVectorListStart();
1871 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1872 OS << Reg + i << " ";
1877 OS << "<vectorindex " << getVectorIndex() << ">";
1880 OS << "<sysreg: " << getSysReg() << '>';
1883 OS << "'" << getToken() << "'";
1886 OS << "c" << getSysCR();
1889 StringRef Name = getPrefetchName();
1891 OS << "<prfop " << Name << ">";
1893 OS << "<prfop invalid #" << getPrefetch() << ">";
1897 OS << getPSBHintName();
1900 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1901 << getShiftExtendAmount();
1902 if (!hasShiftExtendAmount())
1909 /// @name Auto-generated Match Functions
1912 static unsigned MatchRegisterName(StringRef Name);
1916 static unsigned matchVectorRegName(StringRef Name) {
1917 return StringSwitch<unsigned>(Name.lower())
1918 .Case("v0", AArch64::Q0)
1919 .Case("v1", AArch64::Q1)
1920 .Case("v2", AArch64::Q2)
1921 .Case("v3", AArch64::Q3)
1922 .Case("v4", AArch64::Q4)
1923 .Case("v5", AArch64::Q5)
1924 .Case("v6", AArch64::Q6)
1925 .Case("v7", AArch64::Q7)
1926 .Case("v8", AArch64::Q8)
1927 .Case("v9", AArch64::Q9)
1928 .Case("v10", AArch64::Q10)
1929 .Case("v11", AArch64::Q11)
1930 .Case("v12", AArch64::Q12)
1931 .Case("v13", AArch64::Q13)
1932 .Case("v14", AArch64::Q14)
1933 .Case("v15", AArch64::Q15)
1934 .Case("v16", AArch64::Q16)
1935 .Case("v17", AArch64::Q17)
1936 .Case("v18", AArch64::Q18)
1937 .Case("v19", AArch64::Q19)
1938 .Case("v20", AArch64::Q20)
1939 .Case("v21", AArch64::Q21)
1940 .Case("v22", AArch64::Q22)
1941 .Case("v23", AArch64::Q23)
1942 .Case("v24", AArch64::Q24)
1943 .Case("v25", AArch64::Q25)
1944 .Case("v26", AArch64::Q26)
1945 .Case("v27", AArch64::Q27)
1946 .Case("v28", AArch64::Q28)
1947 .Case("v29", AArch64::Q29)
1948 .Case("v30", AArch64::Q30)
1949 .Case("v31", AArch64::Q31)
1953 static bool isValidVectorKind(StringRef Name) {
1954 return StringSwitch<bool>(Name.lower())
1964 // Accept the width neutral ones, too, for verbose syntax. If those
1965 // aren't used in the right places, the token operand won't match so
1966 // all will work out.
1971 // Needed for fp16 scalar pairwise reductions
1976 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1977 char &ElementKind) {
1978 assert(isValidVectorKind(Name));
1980 ElementKind = Name.lower()[Name.size() - 1];
1983 if (Name.size() == 2)
1986 // Parse the lane count
1987 Name = Name.drop_front();
1988 while (isdigit(Name.front())) {
1989 NumElements = 10 * NumElements + (Name.front() - '0');
1990 Name = Name.drop_front();
1994 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1996 StartLoc = getLoc();
1997 RegNo = tryParseRegister();
1998 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1999 return (RegNo == (unsigned)-1);
2002 // Matches a register name or register alias previously defined by '.req'
2003 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2005 unsigned RegNum = isVector ? matchVectorRegName(Name)
2006 : MatchRegisterName(Name);
2009 // Check for aliases registered via .req. Canonicalize to lower case.
2010 // That's more consistent since register names are case insensitive, and
2011 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2012 auto Entry = RegisterReqs.find(Name.lower());
2013 if (Entry == RegisterReqs.end())
2015 // set RegNum if the match is the right kind of register
2016 if (isVector == Entry->getValue().first)
2017 RegNum = Entry->getValue().second;
2022 /// tryParseRegister - Try to parse a register name. The token must be an
2023 /// Identifier when called, and if it is a register name the token is eaten and
2024 /// the register is added to the operand list.
2025 int AArch64AsmParser::tryParseRegister() {
2026 MCAsmParser &Parser = getParser();
2027 const AsmToken &Tok = Parser.getTok();
2028 if (Tok.isNot(AsmToken::Identifier))
2031 std::string lowerCase = Tok.getString().lower();
2032 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
2033 // Also handle a few aliases of registers.
2035 RegNum = StringSwitch<unsigned>(lowerCase)
2036 .Case("fp", AArch64::FP)
2037 .Case("lr", AArch64::LR)
2038 .Case("x31", AArch64::XZR)
2039 .Case("w31", AArch64::WZR)
2045 Parser.Lex(); // Eat identifier token.
2049 /// tryMatchVectorRegister - Try to parse a vector register name with optional
2050 /// kind specifier. If it is a register specifier, eat the token and return it.
2051 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
2052 MCAsmParser &Parser = getParser();
2053 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2054 TokError("vector register expected");
2058 StringRef Name = Parser.getTok().getString();
2059 // If there is a kind specifier, it's separated from the register name by
2061 size_t Start = 0, Next = Name.find('.');
2062 StringRef Head = Name.slice(Start, Next);
2063 unsigned RegNum = matchRegisterNameAlias(Head, true);
2066 if (Next != StringRef::npos) {
2067 Kind = Name.slice(Next, StringRef::npos);
2068 if (!isValidVectorKind(Kind)) {
2069 TokError("invalid vector kind qualifier");
2073 Parser.Lex(); // Eat the register token.
2078 TokError("vector register expected");
2082 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2083 OperandMatchResultTy
2084 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2085 MCAsmParser &Parser = getParser();
2088 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2089 Error(S, "Expected cN operand where 0 <= N <= 15");
2090 return MatchOperand_ParseFail;
2093 StringRef Tok = Parser.getTok().getIdentifier();
2094 if (Tok[0] != 'c' && Tok[0] != 'C') {
2095 Error(S, "Expected cN operand where 0 <= N <= 15");
2096 return MatchOperand_ParseFail;
2100 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2101 if (BadNum || CRNum > 15) {
2102 Error(S, "Expected cN operand where 0 <= N <= 15");
2103 return MatchOperand_ParseFail;
2106 Parser.Lex(); // Eat identifier token.
2108 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2109 return MatchOperand_Success;
2112 /// tryParsePrefetch - Try to parse a prefetch operand.
2113 OperandMatchResultTy
2114 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2115 MCAsmParser &Parser = getParser();
2117 const AsmToken &Tok = Parser.getTok();
2118 // Either an identifier for named values or a 5-bit immediate.
2119 // Eat optional hash.
2120 if (parseOptionalToken(AsmToken::Hash) ||
2121 Tok.is(AsmToken::Integer)) {
2122 const MCExpr *ImmVal;
2123 if (getParser().parseExpression(ImmVal))
2124 return MatchOperand_ParseFail;
2126 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2128 TokError("immediate value expected for prefetch operand");
2129 return MatchOperand_ParseFail;
2131 unsigned prfop = MCE->getValue();
2133 TokError("prefetch operand out of range, [0,31] expected");
2134 return MatchOperand_ParseFail;
2137 auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
2138 Operands.push_back(AArch64Operand::CreatePrefetch(
2139 prfop, PRFM ? PRFM->Name : "", S, getContext()));
2140 return MatchOperand_Success;
2143 if (Tok.isNot(AsmToken::Identifier)) {
2144 TokError("pre-fetch hint expected");
2145 return MatchOperand_ParseFail;
2148 auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
2150 TokError("pre-fetch hint expected");
2151 return MatchOperand_ParseFail;
2154 Parser.Lex(); // Eat identifier token.
2155 Operands.push_back(AArch64Operand::CreatePrefetch(
2156 PRFM->Encoding, Tok.getString(), S, getContext()));
2157 return MatchOperand_Success;
2160 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2161 OperandMatchResultTy
2162 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2163 MCAsmParser &Parser = getParser();
2165 const AsmToken &Tok = Parser.getTok();
2166 if (Tok.isNot(AsmToken::Identifier)) {
2167 TokError("invalid operand for instruction");
2168 return MatchOperand_ParseFail;
2171 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2173 TokError("invalid operand for instruction");
2174 return MatchOperand_ParseFail;
2177 Parser.Lex(); // Eat identifier token.
2178 Operands.push_back(AArch64Operand::CreatePSBHint(
2179 PSB->Encoding, Tok.getString(), S, getContext()));
2180 return MatchOperand_Success;
2183 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2185 OperandMatchResultTy
2186 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2187 MCAsmParser &Parser = getParser();
2191 if (Parser.getTok().is(AsmToken::Hash)) {
2192 Parser.Lex(); // Eat hash token.
2195 if (parseSymbolicImmVal(Expr))
2196 return MatchOperand_ParseFail;
2198 AArch64MCExpr::VariantKind ELFRefKind;
2199 MCSymbolRefExpr::VariantKind DarwinRefKind;
2201 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2202 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2203 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2204 // No modifier was specified at all; this is the syntax for an ELF basic
2205 // ADRP relocation (unfortunately).
2207 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2208 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2209 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2211 Error(S, "gotpage label reference not allowed an addend");
2212 return MatchOperand_ParseFail;
2213 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2214 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2215 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2216 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2217 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2218 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2219 // The operand must be an @page or @gotpage qualified symbolref.
2220 Error(S, "page or gotpage label reference expected");
2221 return MatchOperand_ParseFail;
2225 // We have either a label reference possibly with addend or an immediate. The
2226 // addend is a raw value here. The linker will adjust it to only reference the
2228 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2229 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2231 return MatchOperand_Success;
2234 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2236 OperandMatchResultTy
2237 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2241 parseOptionalToken(AsmToken::Hash);
2242 if (getParser().parseExpression(Expr))
2243 return MatchOperand_ParseFail;
2245 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2246 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2248 return MatchOperand_Success;
2251 /// tryParseFPImm - A floating point immediate expression operand.
2252 OperandMatchResultTy
2253 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2254 MCAsmParser &Parser = getParser();
2257 bool Hash = parseOptionalToken(AsmToken::Hash);
2259 // Handle negation, as that still comes through as a separate token.
2260 bool isNegative = parseOptionalToken(AsmToken::Minus);
2262 const AsmToken &Tok = Parser.getTok();
2263 if (Tok.is(AsmToken::Real)) {
2264 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2266 RealVal.changeSign();
2268 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2269 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2270 Parser.Lex(); // Eat the token.
2271 // Check for out of range values. As an exception, we let Zero through,
2272 // as we handle that special case in post-processing before matching in
2273 // order to use the zero register for it.
2274 if (Val == -1 && !RealVal.isPosZero()) {
2275 TokError("expected compatible register or floating-point constant");
2276 return MatchOperand_ParseFail;
2278 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2279 return MatchOperand_Success;
2281 if (Tok.is(AsmToken::Integer)) {
2283 if (!isNegative && Tok.getString().startswith("0x")) {
2284 Val = Tok.getIntVal();
2285 if (Val > 255 || Val < 0) {
2286 TokError("encoded floating point value out of range");
2287 return MatchOperand_ParseFail;
2290 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2291 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2292 // If we had a '-' in front, toggle the sign bit.
2293 IntVal ^= (uint64_t)isNegative << 63;
2294 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2296 Parser.Lex(); // Eat the token.
2297 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2298 return MatchOperand_Success;
2302 return MatchOperand_NoMatch;
2304 TokError("invalid floating point immediate");
2305 return MatchOperand_ParseFail;
2308 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2309 OperandMatchResultTy
2310 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2311 MCAsmParser &Parser = getParser();
2314 if (Parser.getTok().is(AsmToken::Hash))
2315 Parser.Lex(); // Eat '#'
2316 else if (Parser.getTok().isNot(AsmToken::Integer))
2317 // Operand should start from # or should be integer, emit error otherwise.
2318 return MatchOperand_NoMatch;
2321 if (parseSymbolicImmVal(Imm))
2322 return MatchOperand_ParseFail;
2323 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2324 uint64_t ShiftAmount = 0;
2325 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2327 int64_t Val = MCE->getValue();
2328 if (Val > 0xfff && (Val & 0xfff) == 0) {
2329 Imm = MCConstantExpr::create(Val >> 12, getContext());
2333 SMLoc E = Parser.getTok().getLoc();
2334 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2336 return MatchOperand_Success;
2342 // The optional operand must be "lsl #N" where N is non-negative.
2343 if (!Parser.getTok().is(AsmToken::Identifier) ||
2344 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2345 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2346 return MatchOperand_ParseFail;
2352 parseOptionalToken(AsmToken::Hash);
2354 if (Parser.getTok().isNot(AsmToken::Integer)) {
2355 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2356 return MatchOperand_ParseFail;
2359 int64_t ShiftAmount = Parser.getTok().getIntVal();
2361 if (ShiftAmount < 0) {
2362 Error(Parser.getTok().getLoc(), "positive shift amount required");
2363 return MatchOperand_ParseFail;
2365 Parser.Lex(); // Eat the number
2367 SMLoc E = Parser.getTok().getLoc();
2368 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2369 S, E, getContext()));
2370 return MatchOperand_Success;
2373 /// parseCondCodeString - Parse a Condition Code string.
2374 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2375 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2376 .Case("eq", AArch64CC::EQ)
2377 .Case("ne", AArch64CC::NE)
2378 .Case("cs", AArch64CC::HS)
2379 .Case("hs", AArch64CC::HS)
2380 .Case("cc", AArch64CC::LO)
2381 .Case("lo", AArch64CC::LO)
2382 .Case("mi", AArch64CC::MI)
2383 .Case("pl", AArch64CC::PL)
2384 .Case("vs", AArch64CC::VS)
2385 .Case("vc", AArch64CC::VC)
2386 .Case("hi", AArch64CC::HI)
2387 .Case("ls", AArch64CC::LS)
2388 .Case("ge", AArch64CC::GE)
2389 .Case("lt", AArch64CC::LT)
2390 .Case("gt", AArch64CC::GT)
2391 .Case("le", AArch64CC::LE)
2392 .Case("al", AArch64CC::AL)
2393 .Case("nv", AArch64CC::NV)
2394 .Default(AArch64CC::Invalid);
2398 /// parseCondCode - Parse a Condition Code operand.
2399 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2400 bool invertCondCode) {
2401 MCAsmParser &Parser = getParser();
2403 const AsmToken &Tok = Parser.getTok();
2404 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2406 StringRef Cond = Tok.getString();
2407 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2408 if (CC == AArch64CC::Invalid)
2409 return TokError("invalid condition code");
2410 Parser.Lex(); // Eat identifier token.
2412 if (invertCondCode) {
2413 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2414 return TokError("condition codes AL and NV are invalid for this instruction");
2415 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2419 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2423 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2424 /// them if present.
2425 OperandMatchResultTy
2426 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2427 MCAsmParser &Parser = getParser();
2428 const AsmToken &Tok = Parser.getTok();
2429 std::string LowerID = Tok.getString().lower();
2430 AArch64_AM::ShiftExtendType ShOp =
2431 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2432 .Case("lsl", AArch64_AM::LSL)
2433 .Case("lsr", AArch64_AM::LSR)
2434 .Case("asr", AArch64_AM::ASR)
2435 .Case("ror", AArch64_AM::ROR)
2436 .Case("msl", AArch64_AM::MSL)
2437 .Case("uxtb", AArch64_AM::UXTB)
2438 .Case("uxth", AArch64_AM::UXTH)
2439 .Case("uxtw", AArch64_AM::UXTW)
2440 .Case("uxtx", AArch64_AM::UXTX)
2441 .Case("sxtb", AArch64_AM::SXTB)
2442 .Case("sxth", AArch64_AM::SXTH)
2443 .Case("sxtw", AArch64_AM::SXTW)
2444 .Case("sxtx", AArch64_AM::SXTX)
2445 .Default(AArch64_AM::InvalidShiftExtend);
2447 if (ShOp == AArch64_AM::InvalidShiftExtend)
2448 return MatchOperand_NoMatch;
2450 SMLoc S = Tok.getLoc();
2453 bool Hash = parseOptionalToken(AsmToken::Hash);
2455 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2456 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2457 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2458 ShOp == AArch64_AM::MSL) {
2459 // We expect a number here.
2460 TokError("expected #imm after shift specifier");
2461 return MatchOperand_ParseFail;
2464 // "extend" type operations don't need an immediate, #0 is implicit.
2465 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2467 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2468 return MatchOperand_Success;
2471 // Make sure we do actually have a number, identifier or a parenthesized
2473 SMLoc E = Parser.getTok().getLoc();
2474 if (!Parser.getTok().is(AsmToken::Integer) &&
2475 !Parser.getTok().is(AsmToken::LParen) &&
2476 !Parser.getTok().is(AsmToken::Identifier)) {
2477 Error(E, "expected integer shift amount");
2478 return MatchOperand_ParseFail;
2481 const MCExpr *ImmVal;
2482 if (getParser().parseExpression(ImmVal))
2483 return MatchOperand_ParseFail;
2485 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2487 Error(E, "expected constant '#imm' after shift specifier");
2488 return MatchOperand_ParseFail;
2491 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2492 Operands.push_back(AArch64Operand::CreateShiftExtend(
2493 ShOp, MCE->getValue(), true, S, E, getContext()));
2494 return MatchOperand_Success;
2497 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2498 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2499 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2500 OperandVector &Operands) {
2501 if (Name.find('.') != StringRef::npos)
2502 return TokError("invalid operand");
2506 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2508 MCAsmParser &Parser = getParser();
2509 const AsmToken &Tok = Parser.getTok();
2510 StringRef Op = Tok.getString();
2511 SMLoc S = Tok.getLoc();
2513 const MCExpr *Expr = nullptr;
2515 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2517 Expr = MCConstantExpr::create(op1, getContext()); \
2518 Operands.push_back( \
2519 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2520 Operands.push_back( \
2521 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2522 Operands.push_back( \
2523 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2524 Expr = MCConstantExpr::create(op2, getContext()); \
2525 Operands.push_back( \
2526 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2529 if (Mnemonic == "ic") {
2530 if (!Op.compare_lower("ialluis")) {
2531 // SYS #0, C7, C1, #0
2532 SYS_ALIAS(0, 7, 1, 0);
2533 } else if (!Op.compare_lower("iallu")) {
2534 // SYS #0, C7, C5, #0
2535 SYS_ALIAS(0, 7, 5, 0);
2536 } else if (!Op.compare_lower("ivau")) {
2537 // SYS #3, C7, C5, #1
2538 SYS_ALIAS(3, 7, 5, 1);
2540 return TokError("invalid operand for IC instruction");
2542 } else if (Mnemonic == "dc") {
2543 if (!Op.compare_lower("zva")) {
2544 // SYS #3, C7, C4, #1
2545 SYS_ALIAS(3, 7, 4, 1);
2546 } else if (!Op.compare_lower("ivac")) {
2547 // SYS #3, C7, C6, #1
2548 SYS_ALIAS(0, 7, 6, 1);
2549 } else if (!Op.compare_lower("isw")) {
2550 // SYS #0, C7, C6, #2
2551 SYS_ALIAS(0, 7, 6, 2);
2552 } else if (!Op.compare_lower("cvac")) {
2553 // SYS #3, C7, C10, #1
2554 SYS_ALIAS(3, 7, 10, 1);
2555 } else if (!Op.compare_lower("csw")) {
2556 // SYS #0, C7, C10, #2
2557 SYS_ALIAS(0, 7, 10, 2);
2558 } else if (!Op.compare_lower("cvau")) {
2559 // SYS #3, C7, C11, #1
2560 SYS_ALIAS(3, 7, 11, 1);
2561 } else if (!Op.compare_lower("civac")) {
2562 // SYS #3, C7, C14, #1
2563 SYS_ALIAS(3, 7, 14, 1);
2564 } else if (!Op.compare_lower("cisw")) {
2565 // SYS #0, C7, C14, #2
2566 SYS_ALIAS(0, 7, 14, 2);
2567 } else if (!Op.compare_lower("cvap")) {
2568 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2569 // SYS #3, C7, C12, #1
2570 SYS_ALIAS(3, 7, 12, 1);
2572 return TokError("DC CVAP requires ARMv8.2a");
2575 return TokError("invalid operand for DC instruction");
2577 } else if (Mnemonic == "at") {
2578 if (!Op.compare_lower("s1e1r")) {
2579 // SYS #0, C7, C8, #0
2580 SYS_ALIAS(0, 7, 8, 0);
2581 } else if (!Op.compare_lower("s1e2r")) {
2582 // SYS #4, C7, C8, #0
2583 SYS_ALIAS(4, 7, 8, 0);
2584 } else if (!Op.compare_lower("s1e3r")) {
2585 // SYS #6, C7, C8, #0
2586 SYS_ALIAS(6, 7, 8, 0);
2587 } else if (!Op.compare_lower("s1e1w")) {
2588 // SYS #0, C7, C8, #1
2589 SYS_ALIAS(0, 7, 8, 1);
2590 } else if (!Op.compare_lower("s1e2w")) {
2591 // SYS #4, C7, C8, #1
2592 SYS_ALIAS(4, 7, 8, 1);
2593 } else if (!Op.compare_lower("s1e3w")) {
2594 // SYS #6, C7, C8, #1
2595 SYS_ALIAS(6, 7, 8, 1);
2596 } else if (!Op.compare_lower("s1e0r")) {
2597 // SYS #0, C7, C8, #3
2598 SYS_ALIAS(0, 7, 8, 2);
2599 } else if (!Op.compare_lower("s1e0w")) {
2600 // SYS #0, C7, C8, #3
2601 SYS_ALIAS(0, 7, 8, 3);
2602 } else if (!Op.compare_lower("s12e1r")) {
2603 // SYS #4, C7, C8, #4
2604 SYS_ALIAS(4, 7, 8, 4);
2605 } else if (!Op.compare_lower("s12e1w")) {
2606 // SYS #4, C7, C8, #5
2607 SYS_ALIAS(4, 7, 8, 5);
2608 } else if (!Op.compare_lower("s12e0r")) {
2609 // SYS #4, C7, C8, #6
2610 SYS_ALIAS(4, 7, 8, 6);
2611 } else if (!Op.compare_lower("s12e0w")) {
2612 // SYS #4, C7, C8, #7
2613 SYS_ALIAS(4, 7, 8, 7);
2614 } else if (!Op.compare_lower("s1e1rp")) {
2615 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2616 // SYS #0, C7, C9, #0
2617 SYS_ALIAS(0, 7, 9, 0);
2619 return TokError("AT S1E1RP requires ARMv8.2a");
2621 } else if (!Op.compare_lower("s1e1wp")) {
2622 if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2623 // SYS #0, C7, C9, #1
2624 SYS_ALIAS(0, 7, 9, 1);
2626 return TokError("AT S1E1WP requires ARMv8.2a");
2629 return TokError("invalid operand for AT instruction");
2631 } else if (Mnemonic == "tlbi") {
2632 if (!Op.compare_lower("vmalle1is")) {
2633 // SYS #0, C8, C3, #0
2634 SYS_ALIAS(0, 8, 3, 0);
2635 } else if (!Op.compare_lower("alle2is")) {
2636 // SYS #4, C8, C3, #0
2637 SYS_ALIAS(4, 8, 3, 0);
2638 } else if (!Op.compare_lower("alle3is")) {
2639 // SYS #6, C8, C3, #0
2640 SYS_ALIAS(6, 8, 3, 0);
2641 } else if (!Op.compare_lower("vae1is")) {
2642 // SYS #0, C8, C3, #1
2643 SYS_ALIAS(0, 8, 3, 1);
2644 } else if (!Op.compare_lower("vae2is")) {
2645 // SYS #4, C8, C3, #1
2646 SYS_ALIAS(4, 8, 3, 1);
2647 } else if (!Op.compare_lower("vae3is")) {
2648 // SYS #6, C8, C3, #1
2649 SYS_ALIAS(6, 8, 3, 1);
2650 } else if (!Op.compare_lower("aside1is")) {
2651 // SYS #0, C8, C3, #2
2652 SYS_ALIAS(0, 8, 3, 2);
2653 } else if (!Op.compare_lower("vaae1is")) {
2654 // SYS #0, C8, C3, #3
2655 SYS_ALIAS(0, 8, 3, 3);
2656 } else if (!Op.compare_lower("alle1is")) {
2657 // SYS #4, C8, C3, #4
2658 SYS_ALIAS(4, 8, 3, 4);
2659 } else if (!Op.compare_lower("vale1is")) {
2660 // SYS #0, C8, C3, #5
2661 SYS_ALIAS(0, 8, 3, 5);
2662 } else if (!Op.compare_lower("vaale1is")) {
2663 // SYS #0, C8, C3, #7
2664 SYS_ALIAS(0, 8, 3, 7);
2665 } else if (!Op.compare_lower("vmalle1")) {
2666 // SYS #0, C8, C7, #0
2667 SYS_ALIAS(0, 8, 7, 0);
2668 } else if (!Op.compare_lower("alle2")) {
2669 // SYS #4, C8, C7, #0
2670 SYS_ALIAS(4, 8, 7, 0);
2671 } else if (!Op.compare_lower("vale2is")) {
2672 // SYS #4, C8, C3, #5
2673 SYS_ALIAS(4, 8, 3, 5);
2674 } else if (!Op.compare_lower("vale3is")) {
2675 // SYS #6, C8, C3, #5
2676 SYS_ALIAS(6, 8, 3, 5);
2677 } else if (!Op.compare_lower("alle3")) {
2678 // SYS #6, C8, C7, #0
2679 SYS_ALIAS(6, 8, 7, 0);
2680 } else if (!Op.compare_lower("vae1")) {
2681 // SYS #0, C8, C7, #1
2682 SYS_ALIAS(0, 8, 7, 1);
2683 } else if (!Op.compare_lower("vae2")) {
2684 // SYS #4, C8, C7, #1
2685 SYS_ALIAS(4, 8, 7, 1);
2686 } else if (!Op.compare_lower("vae3")) {
2687 // SYS #6, C8, C7, #1
2688 SYS_ALIAS(6, 8, 7, 1);
2689 } else if (!Op.compare_lower("aside1")) {
2690 // SYS #0, C8, C7, #2
2691 SYS_ALIAS(0, 8, 7, 2);
2692 } else if (!Op.compare_lower("vaae1")) {
2693 // SYS #0, C8, C7, #3
2694 SYS_ALIAS(0, 8, 7, 3);
2695 } else if (!Op.compare_lower("alle1")) {
2696 // SYS #4, C8, C7, #4
2697 SYS_ALIAS(4, 8, 7, 4);
2698 } else if (!Op.compare_lower("vale1")) {
2699 // SYS #0, C8, C7, #5
2700 SYS_ALIAS(0, 8, 7, 5);
2701 } else if (!Op.compare_lower("vale2")) {
2702 // SYS #4, C8, C7, #5
2703 SYS_ALIAS(4, 8, 7, 5);
2704 } else if (!Op.compare_lower("vale3")) {
2705 // SYS #6, C8, C7, #5
2706 SYS_ALIAS(6, 8, 7, 5);
2707 } else if (!Op.compare_lower("vaale1")) {
2708 // SYS #0, C8, C7, #7
2709 SYS_ALIAS(0, 8, 7, 7);
2710 } else if (!Op.compare_lower("ipas2e1")) {
2711 // SYS #4, C8, C4, #1
2712 SYS_ALIAS(4, 8, 4, 1);
2713 } else if (!Op.compare_lower("ipas2le1")) {
2714 // SYS #4, C8, C4, #5
2715 SYS_ALIAS(4, 8, 4, 5);
2716 } else if (!Op.compare_lower("ipas2e1is")) {
2717 // SYS #4, C8, C4, #1
2718 SYS_ALIAS(4, 8, 0, 1);
2719 } else if (!Op.compare_lower("ipas2le1is")) {
2720 // SYS #4, C8, C4, #5
2721 SYS_ALIAS(4, 8, 0, 5);
2722 } else if (!Op.compare_lower("vmalls12e1")) {
2723 // SYS #4, C8, C7, #6
2724 SYS_ALIAS(4, 8, 7, 6);
2725 } else if (!Op.compare_lower("vmalls12e1is")) {
2726 // SYS #4, C8, C3, #6
2727 SYS_ALIAS(4, 8, 3, 6);
2729 return TokError("invalid operand for TLBI instruction");
2735 Parser.Lex(); // Eat operand.
2737 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2738 bool HasRegister = false;
2740 // Check for the optional register operand.
2741 if (parseOptionalToken(AsmToken::Comma)) {
2742 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2743 return TokError("expected register operand");
2747 if (ExpectRegister && !HasRegister) {
2748 return TokError("specified " + Mnemonic + " op requires a register");
2750 else if (!ExpectRegister && HasRegister) {
2751 return TokError("specified " + Mnemonic + " op does not use a register");
2754 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2760 OperandMatchResultTy
2761 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2762 MCAsmParser &Parser = getParser();
2763 const AsmToken &Tok = Parser.getTok();
2765 // Can be either a #imm style literal or an option name
2766 if (parseOptionalToken(AsmToken::Hash) ||
2767 Tok.is(AsmToken::Integer)) {
2768 // Immediate operand.
2769 const MCExpr *ImmVal;
2770 SMLoc ExprLoc = getLoc();
2771 if (getParser().parseExpression(ImmVal))
2772 return MatchOperand_ParseFail;
2773 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2775 Error(ExprLoc, "immediate value expected for barrier operand");
2776 return MatchOperand_ParseFail;
2778 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2779 Error(ExprLoc, "barrier operand out of range");
2780 return MatchOperand_ParseFail;
2782 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2783 Operands.push_back(AArch64Operand::CreateBarrier(
2784 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2785 return MatchOperand_Success;
2788 if (Tok.isNot(AsmToken::Identifier)) {
2789 TokError("invalid operand for instruction");
2790 return MatchOperand_ParseFail;
2793 auto DB = AArch64DB::lookupDBByName(Tok.getString());
2795 TokError("invalid barrier option name");
2796 return MatchOperand_ParseFail;
2799 // The only valid named option for ISB is 'sy'
2800 if (Mnemonic == "isb" && DB->Encoding != AArch64DB::sy) {
2801 TokError("'sy' or #imm operand expected");
2802 return MatchOperand_ParseFail;
2805 Operands.push_back(AArch64Operand::CreateBarrier(
2806 DB->Encoding, Tok.getString(), getLoc(), getContext()));
2807 Parser.Lex(); // Consume the option
2809 return MatchOperand_Success;
2812 OperandMatchResultTy
2813 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2814 MCAsmParser &Parser = getParser();
2815 const AsmToken &Tok = Parser.getTok();
2817 if (Tok.isNot(AsmToken::Identifier))
2818 return MatchOperand_NoMatch;
2821 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2822 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2823 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2824 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2826 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2828 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2829 unsigned PStateImm = -1;
2830 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2831 PStateImm = PState->Encoding;
2834 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2835 PStateImm, getContext()));
2836 Parser.Lex(); // Eat identifier
2838 return MatchOperand_Success;
2841 /// tryParseVectorRegister - Parse a vector register operand.
2842 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2843 MCAsmParser &Parser = getParser();
2844 if (Parser.getTok().isNot(AsmToken::Identifier))
2848 // Check for a vector register specifier first.
2850 int64_t Reg = tryMatchVectorRegister(Kind, false);
2854 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2855 // If there was an explicit qualifier, that goes on as a literal text
2859 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2861 // If there is an index specifier following the register, parse that too.
2862 SMLoc SIdx = getLoc();
2863 if (parseOptionalToken(AsmToken::LBrac)) {
2864 const MCExpr *ImmVal;
2865 if (getParser().parseExpression(ImmVal))
2867 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2869 TokError("immediate value expected for vector index");
2875 if (parseToken(AsmToken::RBrac, "']' expected"))
2878 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2885 /// parseRegister - Parse a non-vector register operand.
2886 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2887 MCAsmParser &Parser = getParser();
2889 // Try for a vector register.
2890 if (!tryParseVectorRegister(Operands))
2893 // Try for a scalar register.
2894 int64_t Reg = tryParseRegister();
2898 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2900 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2901 // as a string token in the instruction itself.
2902 SMLoc LBracS = getLoc();
2903 const AsmToken &Tok = Parser.getTok();
2904 if (parseOptionalToken(AsmToken::LBrac)) {
2905 if (Tok.is(AsmToken::Integer)) {
2906 SMLoc IntS = getLoc();
2907 int64_t Val = Tok.getIntVal();
2910 SMLoc RBracS = getLoc();
2911 if (parseOptionalToken(AsmToken::RBrac)) {
2913 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2915 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2917 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2927 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2928 MCAsmParser &Parser = getParser();
2929 bool HasELFModifier = false;
2930 AArch64MCExpr::VariantKind RefKind;
2932 if (parseOptionalToken(AsmToken::Colon)) {
2933 HasELFModifier = true;
2935 if (Parser.getTok().isNot(AsmToken::Identifier))
2936 return TokError("expect relocation specifier in operand after ':'");
2938 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2939 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2940 .Case("lo12", AArch64MCExpr::VK_LO12)
2941 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2942 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2943 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2944 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2945 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2946 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2947 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2948 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2949 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2950 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2951 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2952 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2953 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2954 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2955 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2956 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2957 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2958 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2959 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2960 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2961 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2962 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2963 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2964 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2965 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2966 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2967 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2968 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2969 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2970 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2971 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2972 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2973 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2974 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2975 .Default(AArch64MCExpr::VK_INVALID);
2977 if (RefKind == AArch64MCExpr::VK_INVALID)
2978 return TokError("expect relocation specifier in operand after ':'");
2980 Parser.Lex(); // Eat identifier
2982 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
2986 if (getParser().parseExpression(ImmVal))
2990 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2995 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2996 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2997 MCAsmParser &Parser = getParser();
2998 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3000 Parser.Lex(); // Eat left bracket token.
3002 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3005 int64_t PrevReg = FirstReg;
3008 if (parseOptionalToken(AsmToken::Minus)) {
3009 SMLoc Loc = getLoc();
3011 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3014 // Any Kind suffices must match on all regs in the list.
3015 if (Kind != NextKind)
3016 return Error(Loc, "mismatched register size suffix");
3018 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3020 if (Space == 0 || Space > 3) {
3021 return Error(Loc, "invalid number of vectors");
3027 while (parseOptionalToken(AsmToken::Comma)) {
3028 SMLoc Loc = getLoc();
3030 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3033 // Any Kind suffices must match on all regs in the list.
3034 if (Kind != NextKind)
3035 return Error(Loc, "mismatched register size suffix");
3037 // Registers must be incremental (with wraparound at 31)
3038 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3039 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3040 return Error(Loc, "registers must be sequential");
3047 if (parseToken(AsmToken::RCurly, "'}' expected"))
3051 return Error(S, "invalid number of vectors");
3053 unsigned NumElements = 0;
3054 char ElementKind = 0;
3056 parseValidVectorKind(Kind, NumElements, ElementKind);
3058 Operands.push_back(AArch64Operand::CreateVectorList(
3059 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3061 // If there is an index specifier following the list, parse that too.
3062 SMLoc SIdx = getLoc();
3063 if (parseOptionalToken(AsmToken::LBrac)) { // Eat left bracket token.
3064 const MCExpr *ImmVal;
3065 if (getParser().parseExpression(ImmVal))
3067 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3069 TokError("immediate value expected for vector index");
3074 if (parseToken(AsmToken::RBrac, "']' expected"))
3077 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3083 OperandMatchResultTy
3084 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3085 MCAsmParser &Parser = getParser();
3086 const AsmToken &Tok = Parser.getTok();
3087 if (!Tok.is(AsmToken::Identifier))
3088 return MatchOperand_NoMatch;
3090 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3092 MCContext &Ctx = getContext();
3093 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3094 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3095 return MatchOperand_NoMatch;
3098 Parser.Lex(); // Eat register
3100 if (!parseOptionalToken(AsmToken::Comma)) {
3102 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3103 return MatchOperand_Success;
3106 parseOptionalToken(AsmToken::Hash);
3108 if (Parser.getTok().isNot(AsmToken::Integer)) {
3109 Error(getLoc(), "index must be absent or #0");
3110 return MatchOperand_ParseFail;
3113 const MCExpr *ImmVal;
3114 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3115 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3116 Error(getLoc(), "index must be absent or #0");
3117 return MatchOperand_ParseFail;
3121 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3122 return MatchOperand_Success;
3125 /// parseOperand - Parse a arm instruction operand. For now this parses the
3126 /// operand regardless of the mnemonic.
3127 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3128 bool invertCondCode) {
3129 MCAsmParser &Parser = getParser();
3130 // Check if the current operand has a custom associated parser, if so, try to
3131 // custom parse the operand, or fallback to the general approach.
3132 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3133 if (ResTy == MatchOperand_Success)
3135 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3136 // there was a match, but an error occurred, in which case, just return that
3137 // the operand parsing failed.
3138 if (ResTy == MatchOperand_ParseFail)
3141 // Nothing custom, so do general case parsing.
3143 switch (getLexer().getKind()) {
3147 if (parseSymbolicImmVal(Expr))
3148 return Error(S, "invalid operand");
3150 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3151 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3154 case AsmToken::LBrac: {
3155 SMLoc Loc = Parser.getTok().getLoc();
3156 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3158 Parser.Lex(); // Eat '['
3160 // There's no comma after a '[', so we can parse the next operand
3162 return parseOperand(Operands, false, false);
3164 case AsmToken::LCurly:
3165 return parseVectorList(Operands);
3166 case AsmToken::Identifier: {
3167 // If we're expecting a Condition Code operand, then just parse that.
3169 return parseCondCode(Operands, invertCondCode);
3171 // If it's a register name, parse it.
3172 if (!parseRegister(Operands))
3175 // This could be an optional "shift" or "extend" operand.
3176 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3177 // We can only continue if no tokens were eaten.
3178 if (GotShift != MatchOperand_NoMatch)
3181 // This was not a register so parse other operands that start with an
3182 // identifier (like labels) as expressions and create them as immediates.
3183 const MCExpr *IdVal;
3185 if (getParser().parseExpression(IdVal))
3187 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3188 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3191 case AsmToken::Integer:
3192 case AsmToken::Real:
3193 case AsmToken::Hash: {
3194 // #42 -> immediate.
3197 parseOptionalToken(AsmToken::Hash);
3199 // Parse a negative sign
3200 bool isNegative = false;
3201 if (Parser.getTok().is(AsmToken::Minus)) {
3203 // We need to consume this token only when we have a Real, otherwise
3204 // we let parseSymbolicImmVal take care of it
3205 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3209 // The only Real that should come through here is a literal #0.0 for
3210 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3211 // so convert the value.
3212 const AsmToken &Tok = Parser.getTok();
3213 if (Tok.is(AsmToken::Real)) {
3214 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3215 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3216 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3217 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3218 Mnemonic != "fcmlt")
3219 return TokError("unexpected floating point literal");
3220 else if (IntVal != 0 || isNegative)
3221 return TokError("expected floating-point constant #0.0");
3222 Parser.Lex(); // Eat the token.
3225 AArch64Operand::CreateToken("#0", false, S, getContext()));
3227 AArch64Operand::CreateToken(".0", false, S, getContext()));
3231 const MCExpr *ImmVal;
3232 if (parseSymbolicImmVal(ImmVal))
3235 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3236 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3239 case AsmToken::Equal: {
3240 SMLoc Loc = getLoc();
3241 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3242 return TokError("unexpected token in operand");
3243 Parser.Lex(); // Eat '='
3244 const MCExpr *SubExprVal;
3245 if (getParser().parseExpression(SubExprVal))
3248 if (Operands.size() < 2 ||
3249 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3250 return Error(Loc, "Only valid when first operand is register");
3253 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3254 Operands[1]->getReg());
3256 MCContext& Ctx = getContext();
3257 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3258 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3259 if (isa<MCConstantExpr>(SubExprVal)) {
3260 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3261 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3262 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3266 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3267 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3268 Operands.push_back(AArch64Operand::CreateImm(
3269 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3271 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3272 ShiftAmt, true, S, E, Ctx));
3275 APInt Simm = APInt(64, Imm << ShiftAmt);
3276 // check if the immediate is an unsigned or signed 32-bit int for W regs
3277 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3278 return Error(Loc, "Immediate too large for register");
3280 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3281 const MCExpr *CPLoc =
3282 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3283 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3289 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3291 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3292 StringRef Name, SMLoc NameLoc,
3293 OperandVector &Operands) {
3294 MCAsmParser &Parser = getParser();
3295 Name = StringSwitch<StringRef>(Name.lower())
3296 .Case("beq", "b.eq")
3297 .Case("bne", "b.ne")
3298 .Case("bhs", "b.hs")
3299 .Case("bcs", "b.cs")
3300 .Case("blo", "b.lo")
3301 .Case("bcc", "b.cc")
3302 .Case("bmi", "b.mi")
3303 .Case("bpl", "b.pl")
3304 .Case("bvs", "b.vs")
3305 .Case("bvc", "b.vc")
3306 .Case("bhi", "b.hi")
3307 .Case("bls", "b.ls")
3308 .Case("bge", "b.ge")
3309 .Case("blt", "b.lt")
3310 .Case("bgt", "b.gt")
3311 .Case("ble", "b.le")
3312 .Case("bal", "b.al")
3313 .Case("bnv", "b.nv")
3316 // First check for the AArch64-specific .req directive.
3317 if (Parser.getTok().is(AsmToken::Identifier) &&
3318 Parser.getTok().getIdentifier() == ".req") {
3319 parseDirectiveReq(Name, NameLoc);
3320 // We always return 'error' for this, as we're done with this
3321 // statement and don't need to match the 'instruction."
3325 // Create the leading tokens for the mnemonic, split by '.' characters.
3326 size_t Start = 0, Next = Name.find('.');
3327 StringRef Head = Name.slice(Start, Next);
3329 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3330 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3331 return parseSysAlias(Head, NameLoc, Operands);
3334 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3337 // Handle condition codes for a branch mnemonic
3338 if (Head == "b" && Next != StringRef::npos) {
3340 Next = Name.find('.', Start + 1);
3341 Head = Name.slice(Start + 1, Next);
3343 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3344 (Head.data() - Name.data()));
3345 AArch64CC::CondCode CC = parseCondCodeString(Head);
3346 if (CC == AArch64CC::Invalid)
3347 return Error(SuffixLoc, "invalid condition code");
3349 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3351 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3354 // Add the remaining tokens in the mnemonic.
3355 while (Next != StringRef::npos) {
3357 Next = Name.find('.', Start + 1);
3358 Head = Name.slice(Start, Next);
3359 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3360 (Head.data() - Name.data()) + 1);
3362 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3365 // Conditional compare instructions have a Condition Code operand, which needs
3366 // to be parsed and an immediate operand created.
3367 bool condCodeFourthOperand =
3368 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3369 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3370 Head == "csinc" || Head == "csinv" || Head == "csneg");
3372 // These instructions are aliases to some of the conditional select
3373 // instructions. However, the condition code is inverted in the aliased
3376 // FIXME: Is this the correct way to handle these? Or should the parser
3377 // generate the aliased instructions directly?
3378 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3379 bool condCodeThirdOperand =
3380 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3382 // Read the remaining operands.
3383 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3384 // Read the first operand.
3385 if (parseOperand(Operands, false, false)) {
3390 while (parseOptionalToken(AsmToken::Comma)) {
3391 // Parse and remember the operand.
3392 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3393 (N == 3 && condCodeThirdOperand) ||
3394 (N == 2 && condCodeSecondOperand),
3395 condCodeSecondOperand || condCodeThirdOperand)) {
3399 // After successfully parsing some operands there are two special cases to
3400 // consider (i.e. notional operands not separated by commas). Both are due
3401 // to memory specifiers:
3402 // + An RBrac will end an address for load/store/prefetch
3403 // + An '!' will indicate a pre-indexed operation.
3405 // It's someone else's responsibility to make sure these tokens are sane
3406 // in the given context!
3408 SMLoc RLoc = Parser.getTok().getLoc();
3409 if (parseOptionalToken(AsmToken::RBrac))
3411 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3412 SMLoc ELoc = Parser.getTok().getLoc();
3413 if (parseOptionalToken(AsmToken::Exclaim))
3415 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3421 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3427 // FIXME: This entire function is a giant hack to provide us with decent
3428 // operand range validation/diagnostics until TableGen/MC can be extended
3429 // to support autogeneration of this kind of validation.
3430 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3431 SmallVectorImpl<SMLoc> &Loc) {
3432 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3433 // Check for indexed addressing modes w/ the base register being the
3434 // same as a destination/source register or pair load where
3435 // the Rt == Rt2. All of those are undefined behaviour.
3436 switch (Inst.getOpcode()) {
3437 case AArch64::LDPSWpre:
3438 case AArch64::LDPWpost:
3439 case AArch64::LDPWpre:
3440 case AArch64::LDPXpost:
3441 case AArch64::LDPXpre: {
3442 unsigned Rt = Inst.getOperand(1).getReg();
3443 unsigned Rt2 = Inst.getOperand(2).getReg();
3444 unsigned Rn = Inst.getOperand(3).getReg();
3445 if (RI->isSubRegisterEq(Rn, Rt))
3446 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3447 "is also a destination");
3448 if (RI->isSubRegisterEq(Rn, Rt2))
3449 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3450 "is also a destination");
3453 case AArch64::LDPDi:
3454 case AArch64::LDPQi:
3455 case AArch64::LDPSi:
3456 case AArch64::LDPSWi:
3457 case AArch64::LDPWi:
3458 case AArch64::LDPXi: {
3459 unsigned Rt = Inst.getOperand(0).getReg();
3460 unsigned Rt2 = Inst.getOperand(1).getReg();
3462 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3465 case AArch64::LDPDpost:
3466 case AArch64::LDPDpre:
3467 case AArch64::LDPQpost:
3468 case AArch64::LDPQpre:
3469 case AArch64::LDPSpost:
3470 case AArch64::LDPSpre:
3471 case AArch64::LDPSWpost: {
3472 unsigned Rt = Inst.getOperand(1).getReg();
3473 unsigned Rt2 = Inst.getOperand(2).getReg();
3475 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3478 case AArch64::STPDpost:
3479 case AArch64::STPDpre:
3480 case AArch64::STPQpost:
3481 case AArch64::STPQpre:
3482 case AArch64::STPSpost:
3483 case AArch64::STPSpre:
3484 case AArch64::STPWpost:
3485 case AArch64::STPWpre:
3486 case AArch64::STPXpost:
3487 case AArch64::STPXpre: {
3488 unsigned Rt = Inst.getOperand(1).getReg();
3489 unsigned Rt2 = Inst.getOperand(2).getReg();
3490 unsigned Rn = Inst.getOperand(3).getReg();
3491 if (RI->isSubRegisterEq(Rn, Rt))
3492 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3493 "is also a source");
3494 if (RI->isSubRegisterEq(Rn, Rt2))
3495 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3496 "is also a source");
3499 case AArch64::LDRBBpre:
3500 case AArch64::LDRBpre:
3501 case AArch64::LDRHHpre:
3502 case AArch64::LDRHpre:
3503 case AArch64::LDRSBWpre:
3504 case AArch64::LDRSBXpre:
3505 case AArch64::LDRSHWpre:
3506 case AArch64::LDRSHXpre:
3507 case AArch64::LDRSWpre:
3508 case AArch64::LDRWpre:
3509 case AArch64::LDRXpre:
3510 case AArch64::LDRBBpost:
3511 case AArch64::LDRBpost:
3512 case AArch64::LDRHHpost:
3513 case AArch64::LDRHpost:
3514 case AArch64::LDRSBWpost:
3515 case AArch64::LDRSBXpost:
3516 case AArch64::LDRSHWpost:
3517 case AArch64::LDRSHXpost:
3518 case AArch64::LDRSWpost:
3519 case AArch64::LDRWpost:
3520 case AArch64::LDRXpost: {
3521 unsigned Rt = Inst.getOperand(1).getReg();
3522 unsigned Rn = Inst.getOperand(2).getReg();
3523 if (RI->isSubRegisterEq(Rn, Rt))
3524 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3525 "is also a source");
3528 case AArch64::STRBBpost:
3529 case AArch64::STRBpost:
3530 case AArch64::STRHHpost:
3531 case AArch64::STRHpost:
3532 case AArch64::STRWpost:
3533 case AArch64::STRXpost:
3534 case AArch64::STRBBpre:
3535 case AArch64::STRBpre:
3536 case AArch64::STRHHpre:
3537 case AArch64::STRHpre:
3538 case AArch64::STRWpre:
3539 case AArch64::STRXpre: {
3540 unsigned Rt = Inst.getOperand(1).getReg();
3541 unsigned Rn = Inst.getOperand(2).getReg();
3542 if (RI->isSubRegisterEq(Rn, Rt))
3543 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3544 "is also a source");
3549 // Now check immediate ranges. Separate from the above as there is overlap
3550 // in the instructions being checked and this keeps the nested conditionals
3552 switch (Inst.getOpcode()) {
3553 case AArch64::ADDSWri:
3554 case AArch64::ADDSXri:
3555 case AArch64::ADDWri:
3556 case AArch64::ADDXri:
3557 case AArch64::SUBSWri:
3558 case AArch64::SUBSXri:
3559 case AArch64::SUBWri:
3560 case AArch64::SUBXri: {
3561 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3562 // some slight duplication here.
3563 if (Inst.getOperand(2).isExpr()) {
3564 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3565 AArch64MCExpr::VariantKind ELFRefKind;
3566 MCSymbolRefExpr::VariantKind DarwinRefKind;
3568 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3570 // Only allow these with ADDXri.
3571 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3572 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3573 Inst.getOpcode() == AArch64::ADDXri)
3576 // Only allow these with ADDXri/ADDWri
3577 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3578 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3579 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3580 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3581 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3582 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3583 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3584 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3585 (Inst.getOpcode() == AArch64::ADDXri ||
3586 Inst.getOpcode() == AArch64::ADDWri))
3589 // Don't allow symbol refs in the immediate field otherwise
3590 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3591 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3592 // 'cmp w0, 'borked')
3593 return Error(Loc.back(), "invalid immediate expression");
3595 // We don't validate more complex expressions here
3604 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3606 case Match_MissingFeature:
3608 "instruction requires a CPU feature not currently enabled");
3609 case Match_InvalidOperand:
3610 return Error(Loc, "invalid operand for instruction");
3611 case Match_InvalidSuffix:
3612 return Error(Loc, "invalid type suffix for instruction");
3613 case Match_InvalidCondCode:
3614 return Error(Loc, "expected AArch64 condition code");
3615 case Match_AddSubRegExtendSmall:
3617 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3618 case Match_AddSubRegExtendLarge:
3620 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3621 case Match_AddSubSecondSource:
3623 "expected compatible register, symbol or integer in range [0, 4095]");
3624 case Match_LogicalSecondSource:
3625 return Error(Loc, "expected compatible register or logical immediate");
3626 case Match_InvalidMovImm32Shift:
3627 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3628 case Match_InvalidMovImm64Shift:
3629 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3630 case Match_AddSubRegShift32:
3632 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3633 case Match_AddSubRegShift64:
3635 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3636 case Match_InvalidFPImm:
3638 "expected compatible register or floating-point constant");
3639 case Match_InvalidMemoryIndexedSImm9:
3640 return Error(Loc, "index must be an integer in range [-256, 255].");
3641 case Match_InvalidMemoryIndexed4SImm7:
3642 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3643 case Match_InvalidMemoryIndexed8SImm7:
3644 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3645 case Match_InvalidMemoryIndexed16SImm7:
3646 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3647 case Match_InvalidMemoryWExtend8:
3649 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3650 case Match_InvalidMemoryWExtend16:
3652 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3653 case Match_InvalidMemoryWExtend32:
3655 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3656 case Match_InvalidMemoryWExtend64:
3658 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3659 case Match_InvalidMemoryWExtend128:
3661 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3662 case Match_InvalidMemoryXExtend8:
3664 "expected 'lsl' or 'sxtx' with optional shift of #0");
3665 case Match_InvalidMemoryXExtend16:
3667 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3668 case Match_InvalidMemoryXExtend32:
3670 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3671 case Match_InvalidMemoryXExtend64:
3673 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3674 case Match_InvalidMemoryXExtend128:
3676 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3677 case Match_InvalidMemoryIndexed1:
3678 return Error(Loc, "index must be an integer in range [0, 4095].");
3679 case Match_InvalidMemoryIndexed2:
3680 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3681 case Match_InvalidMemoryIndexed4:
3682 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3683 case Match_InvalidMemoryIndexed8:
3684 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3685 case Match_InvalidMemoryIndexed16:
3686 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3687 case Match_InvalidImm0_1:
3688 return Error(Loc, "immediate must be an integer in range [0, 1].");
3689 case Match_InvalidImm0_7:
3690 return Error(Loc, "immediate must be an integer in range [0, 7].");
3691 case Match_InvalidImm0_15:
3692 return Error(Loc, "immediate must be an integer in range [0, 15].");
3693 case Match_InvalidImm0_31:
3694 return Error(Loc, "immediate must be an integer in range [0, 31].");
3695 case Match_InvalidImm0_63:
3696 return Error(Loc, "immediate must be an integer in range [0, 63].");
3697 case Match_InvalidImm0_127:
3698 return Error(Loc, "immediate must be an integer in range [0, 127].");
3699 case Match_InvalidImm0_65535:
3700 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3701 case Match_InvalidImm1_8:
3702 return Error(Loc, "immediate must be an integer in range [1, 8].");
3703 case Match_InvalidImm1_16:
3704 return Error(Loc, "immediate must be an integer in range [1, 16].");
3705 case Match_InvalidImm1_32:
3706 return Error(Loc, "immediate must be an integer in range [1, 32].");
3707 case Match_InvalidImm1_64:
3708 return Error(Loc, "immediate must be an integer in range [1, 64].");
3709 case Match_InvalidIndex1:
3710 return Error(Loc, "expected lane specifier '[1]'");
3711 case Match_InvalidIndexB:
3712 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3713 case Match_InvalidIndexH:
3714 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3715 case Match_InvalidIndexS:
3716 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3717 case Match_InvalidIndexD:
3718 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3719 case Match_InvalidLabel:
3720 return Error(Loc, "expected label or encodable integer pc offset");
3722 return Error(Loc, "expected readable system register");
3724 return Error(Loc, "expected writable system register or pstate");
3725 case Match_MnemonicFail:
3726 return Error(Loc, "unrecognized instruction mnemonic");
3728 llvm_unreachable("unexpected error code!");
3732 static const char *getSubtargetFeatureName(uint64_t Val);
3734 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3735 OperandVector &Operands,
3737 uint64_t &ErrorInfo,
3738 bool MatchingInlineAsm) {
3739 assert(!Operands.empty() && "Unexpect empty operand list!");
3740 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3741 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3743 StringRef Tok = Op.getToken();
3744 unsigned NumOperands = Operands.size();
3746 if (NumOperands == 4 && Tok == "lsl") {
3747 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3748 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3749 if (Op2.isReg() && Op3.isImm()) {
3750 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3752 uint64_t Op3Val = Op3CE->getValue();
3753 uint64_t NewOp3Val = 0;
3754 uint64_t NewOp4Val = 0;
3755 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3757 NewOp3Val = (32 - Op3Val) & 0x1f;
3758 NewOp4Val = 31 - Op3Val;
3760 NewOp3Val = (64 - Op3Val) & 0x3f;
3761 NewOp4Val = 63 - Op3Val;
3764 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3765 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3767 Operands[0] = AArch64Operand::CreateToken(
3768 "ubfm", false, Op.getStartLoc(), getContext());
3769 Operands.push_back(AArch64Operand::CreateImm(
3770 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3771 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3772 Op3.getEndLoc(), getContext());
3775 } else if (NumOperands == 4 && Tok == "bfc") {
3776 // FIXME: Horrible hack to handle BFC->BFM alias.
3777 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3778 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3779 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3781 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3782 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3783 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3785 if (LSBCE && WidthCE) {
3786 uint64_t LSB = LSBCE->getValue();
3787 uint64_t Width = WidthCE->getValue();
3789 uint64_t RegWidth = 0;
3790 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3796 if (LSB >= RegWidth)
3797 return Error(LSBOp.getStartLoc(),
3798 "expected integer in range [0, 31]");
3799 if (Width < 1 || Width > RegWidth)
3800 return Error(WidthOp.getStartLoc(),
3801 "expected integer in range [1, 32]");
3805 ImmR = (32 - LSB) & 0x1f;
3807 ImmR = (64 - LSB) & 0x3f;
3809 uint64_t ImmS = Width - 1;
3811 if (ImmR != 0 && ImmS >= ImmR)
3812 return Error(WidthOp.getStartLoc(),
3813 "requested insert overflows register");
3815 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3816 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3817 Operands[0] = AArch64Operand::CreateToken(
3818 "bfm", false, Op.getStartLoc(), getContext());
3819 Operands[2] = AArch64Operand::CreateReg(
3820 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3821 SMLoc(), getContext());
3822 Operands[3] = AArch64Operand::CreateImm(
3823 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3824 Operands.emplace_back(
3825 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3826 WidthOp.getEndLoc(), getContext()));
3829 } else if (NumOperands == 5) {
3830 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3831 // UBFIZ -> UBFM aliases.
3832 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3833 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3834 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3835 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3837 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3838 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3839 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3841 if (Op3CE && Op4CE) {
3842 uint64_t Op3Val = Op3CE->getValue();
3843 uint64_t Op4Val = Op4CE->getValue();
3845 uint64_t RegWidth = 0;
3846 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3852 if (Op3Val >= RegWidth)
3853 return Error(Op3.getStartLoc(),
3854 "expected integer in range [0, 31]");
3855 if (Op4Val < 1 || Op4Val > RegWidth)
3856 return Error(Op4.getStartLoc(),
3857 "expected integer in range [1, 32]");
3859 uint64_t NewOp3Val = 0;
3861 NewOp3Val = (32 - Op3Val) & 0x1f;
3863 NewOp3Val = (64 - Op3Val) & 0x3f;
3865 uint64_t NewOp4Val = Op4Val - 1;
3867 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3868 return Error(Op4.getStartLoc(),
3869 "requested insert overflows register");
3871 const MCExpr *NewOp3 =
3872 MCConstantExpr::create(NewOp3Val, getContext());
3873 const MCExpr *NewOp4 =
3874 MCConstantExpr::create(NewOp4Val, getContext());
3875 Operands[3] = AArch64Operand::CreateImm(
3876 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3877 Operands[4] = AArch64Operand::CreateImm(
3878 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3880 Operands[0] = AArch64Operand::CreateToken(
3881 "bfm", false, Op.getStartLoc(), getContext());
3882 else if (Tok == "sbfiz")
3883 Operands[0] = AArch64Operand::CreateToken(
3884 "sbfm", false, Op.getStartLoc(), getContext());
3885 else if (Tok == "ubfiz")
3886 Operands[0] = AArch64Operand::CreateToken(
3887 "ubfm", false, Op.getStartLoc(), getContext());
3889 llvm_unreachable("No valid mnemonic for alias?");
3893 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3894 // UBFX -> UBFM aliases.
3895 } else if (NumOperands == 5 &&
3896 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3897 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3898 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3899 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3901 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3902 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3903 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3905 if (Op3CE && Op4CE) {
3906 uint64_t Op3Val = Op3CE->getValue();
3907 uint64_t Op4Val = Op4CE->getValue();
3909 uint64_t RegWidth = 0;
3910 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3916 if (Op3Val >= RegWidth)
3917 return Error(Op3.getStartLoc(),
3918 "expected integer in range [0, 31]");
3919 if (Op4Val < 1 || Op4Val > RegWidth)
3920 return Error(Op4.getStartLoc(),
3921 "expected integer in range [1, 32]");
3923 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3925 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3926 return Error(Op4.getStartLoc(),
3927 "requested extract overflows register");
3929 const MCExpr *NewOp4 =
3930 MCConstantExpr::create(NewOp4Val, getContext());
3931 Operands[4] = AArch64Operand::CreateImm(
3932 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3934 Operands[0] = AArch64Operand::CreateToken(
3935 "bfm", false, Op.getStartLoc(), getContext());
3936 else if (Tok == "sbfx")
3937 Operands[0] = AArch64Operand::CreateToken(
3938 "sbfm", false, Op.getStartLoc(), getContext());
3939 else if (Tok == "ubfx")
3940 Operands[0] = AArch64Operand::CreateToken(
3941 "ubfm", false, Op.getStartLoc(), getContext());
3943 llvm_unreachable("No valid mnemonic for alias?");
3948 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3949 // InstAlias can't quite handle this since the reg classes aren't
3951 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3952 // The source register can be Wn here, but the matcher expects a
3953 // GPR64. Twiddle it here if necessary.
3954 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3956 unsigned Reg = getXRegFromWReg(Op.getReg());
3957 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3958 Op.getEndLoc(), getContext());
3961 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3962 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3963 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3965 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3967 // The source register can be Wn here, but the matcher expects a
3968 // GPR64. Twiddle it here if necessary.
3969 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3971 unsigned Reg = getXRegFromWReg(Op.getReg());
3972 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3973 Op.getEndLoc(), getContext());
3977 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3978 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3979 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3981 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3983 // The source register can be Wn here, but the matcher expects a
3984 // GPR32. Twiddle it here if necessary.
3985 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3987 unsigned Reg = getWRegFromXReg(Op.getReg());
3988 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3989 Op.getEndLoc(), getContext());
3994 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3995 if (NumOperands == 3 && Tok == "fmov") {
3996 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3997 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3998 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
4000 !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(
4004 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
4005 Op.getEndLoc(), getContext());
4010 // First try to match against the secondary set of tables containing the
4011 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4012 unsigned MatchResult =
4013 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4015 // If that fails, try against the alternate table containing long-form NEON:
4016 // "fadd v0.2s, v1.2s, v2.2s"
4017 if (MatchResult != Match_Success) {
4018 // But first, save the short-form match result: we can use it in case the
4019 // long-form match also fails.
4020 auto ShortFormNEONErrorInfo = ErrorInfo;
4021 auto ShortFormNEONMatchResult = MatchResult;
4024 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4026 // Now, both matches failed, and the long-form match failed on the mnemonic
4027 // suffix token operand. The short-form match failure is probably more
4028 // relevant: use it instead.
4029 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4030 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4031 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4032 MatchResult = ShortFormNEONMatchResult;
4033 ErrorInfo = ShortFormNEONErrorInfo;
4037 switch (MatchResult) {
4038 case Match_Success: {
4039 // Perform range checking and other semantic validations
4040 SmallVector<SMLoc, 8> OperandLocs;
4041 NumOperands = Operands.size();
4042 for (unsigned i = 1; i < NumOperands; ++i)
4043 OperandLocs.push_back(Operands[i]->getStartLoc());
4044 if (validateInstruction(Inst, OperandLocs))
4048 Out.EmitInstruction(Inst, getSTI());
4051 case Match_MissingFeature: {
4052 assert(ErrorInfo && "Unknown missing feature!");
4053 // Special case the error message for the very common case where only
4054 // a single subtarget feature is missing (neon, e.g.).
4055 std::string Msg = "instruction requires:";
4057 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4058 if (ErrorInfo & Mask) {
4060 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4064 return Error(IDLoc, Msg);
4066 case Match_MnemonicFail:
4067 return showMatchError(IDLoc, MatchResult);
4068 case Match_InvalidOperand: {
4069 SMLoc ErrorLoc = IDLoc;
4071 if (ErrorInfo != ~0ULL) {
4072 if (ErrorInfo >= Operands.size())
4073 return Error(IDLoc, "too few operands for instruction",
4074 SMRange(IDLoc, getTok().getLoc()));
4076 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4077 if (ErrorLoc == SMLoc())
4080 // If the match failed on a suffix token operand, tweak the diagnostic
4082 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4083 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4084 MatchResult = Match_InvalidSuffix;
4086 return showMatchError(ErrorLoc, MatchResult);
4088 case Match_InvalidMemoryIndexed1:
4089 case Match_InvalidMemoryIndexed2:
4090 case Match_InvalidMemoryIndexed4:
4091 case Match_InvalidMemoryIndexed8:
4092 case Match_InvalidMemoryIndexed16:
4093 case Match_InvalidCondCode:
4094 case Match_AddSubRegExtendSmall:
4095 case Match_AddSubRegExtendLarge:
4096 case Match_AddSubSecondSource:
4097 case Match_LogicalSecondSource:
4098 case Match_AddSubRegShift32:
4099 case Match_AddSubRegShift64:
4100 case Match_InvalidMovImm32Shift:
4101 case Match_InvalidMovImm64Shift:
4102 case Match_InvalidFPImm:
4103 case Match_InvalidMemoryWExtend8:
4104 case Match_InvalidMemoryWExtend16:
4105 case Match_InvalidMemoryWExtend32:
4106 case Match_InvalidMemoryWExtend64:
4107 case Match_InvalidMemoryWExtend128:
4108 case Match_InvalidMemoryXExtend8:
4109 case Match_InvalidMemoryXExtend16:
4110 case Match_InvalidMemoryXExtend32:
4111 case Match_InvalidMemoryXExtend64:
4112 case Match_InvalidMemoryXExtend128:
4113 case Match_InvalidMemoryIndexed4SImm7:
4114 case Match_InvalidMemoryIndexed8SImm7:
4115 case Match_InvalidMemoryIndexed16SImm7:
4116 case Match_InvalidMemoryIndexedSImm9:
4117 case Match_InvalidImm0_1:
4118 case Match_InvalidImm0_7:
4119 case Match_InvalidImm0_15:
4120 case Match_InvalidImm0_31:
4121 case Match_InvalidImm0_63:
4122 case Match_InvalidImm0_127:
4123 case Match_InvalidImm0_65535:
4124 case Match_InvalidImm1_8:
4125 case Match_InvalidImm1_16:
4126 case Match_InvalidImm1_32:
4127 case Match_InvalidImm1_64:
4128 case Match_InvalidIndex1:
4129 case Match_InvalidIndexB:
4130 case Match_InvalidIndexH:
4131 case Match_InvalidIndexS:
4132 case Match_InvalidIndexD:
4133 case Match_InvalidLabel:
4136 if (ErrorInfo >= Operands.size())
4137 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4138 // Any time we get here, there's nothing fancy to do. Just get the
4139 // operand SMLoc and display the diagnostic.
4140 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4141 if (ErrorLoc == SMLoc())
4143 return showMatchError(ErrorLoc, MatchResult);
4147 llvm_unreachable("Implement any new match types added!");
4150 /// ParseDirective parses the arm specific directives
4151 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4152 const MCObjectFileInfo::Environment Format =
4153 getContext().getObjectFileInfo()->getObjectFileType();
4154 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4155 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4157 StringRef IDVal = DirectiveID.getIdentifier();
4158 SMLoc Loc = DirectiveID.getLoc();
4159 if (IDVal == ".arch")
4160 parseDirectiveArch(Loc);
4161 else if (IDVal == ".cpu")
4162 parseDirectiveCPU(Loc);
4163 else if (IDVal == ".hword")
4164 parseDirectiveWord(2, Loc);
4165 else if (IDVal == ".word")
4166 parseDirectiveWord(4, Loc);
4167 else if (IDVal == ".xword")
4168 parseDirectiveWord(8, Loc);
4169 else if (IDVal == ".tlsdesccall")
4170 parseDirectiveTLSDescCall(Loc);
4171 else if (IDVal == ".ltorg" || IDVal == ".pool")
4172 parseDirectiveLtorg(Loc);
4173 else if (IDVal == ".unreq")
4174 parseDirectiveUnreq(Loc);
4175 else if (!IsMachO && !IsCOFF) {
4176 if (IDVal == ".inst")
4177 parseDirectiveInst(Loc);
4180 } else if (IDVal == MCLOHDirectiveName())
4181 parseDirectiveLOH(IDVal, Loc);
4187 static const struct {
4189 const FeatureBitset Features;
4190 } ExtensionMap[] = {
4191 { "crc", {AArch64::FeatureCRC} },
4192 { "crypto", {AArch64::FeatureCrypto} },
4193 { "fp", {AArch64::FeatureFPARMv8} },
4194 { "simd", {AArch64::FeatureNEON} },
4195 { "ras", {AArch64::FeatureRAS} },
4196 { "lse", {AArch64::FeatureLSE} },
4198 // FIXME: Unsupported extensions
4205 /// parseDirectiveArch
4207 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4208 SMLoc ArchLoc = getLoc();
4210 StringRef Arch, ExtensionString;
4211 std::tie(Arch, ExtensionString) =
4212 getParser().parseStringToEndOfStatement().trim().split('+');
4214 unsigned ID = AArch64::parseArch(Arch);
4215 if (ID == static_cast<unsigned>(AArch64::ArchKind::AK_INVALID))
4216 return Error(ArchLoc, "unknown arch name");
4218 if (parseToken(AsmToken::EndOfStatement))
4221 // Get the architecture and extension features.
4222 std::vector<StringRef> AArch64Features;
4223 AArch64::getArchFeatures(ID, AArch64Features);
4224 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
4227 MCSubtargetInfo &STI = copySTI();
4228 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4229 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4231 SmallVector<StringRef, 4> RequestedExtensions;
4232 if (!ExtensionString.empty())
4233 ExtensionString.split(RequestedExtensions, '+');
4235 FeatureBitset Features = STI.getFeatureBits();
4236 for (auto Name : RequestedExtensions) {
4237 bool EnableFeature = true;
4239 if (Name.startswith_lower("no")) {
4240 EnableFeature = false;
4241 Name = Name.substr(2);
4244 for (const auto &Extension : ExtensionMap) {
4245 if (Extension.Name != Name)
4248 if (Extension.Features.none())
4249 report_fatal_error("unsupported architectural extension: " + Name);
4251 FeatureBitset ToggleFeatures = EnableFeature
4252 ? (~Features & Extension.Features)
4253 : ( Features & Extension.Features);
4255 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4256 setAvailableFeatures(Features);
4263 /// parseDirectiveCPU
4265 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4266 SMLoc CPULoc = getLoc();
4268 StringRef CPU, ExtensionString;
4269 std::tie(CPU, ExtensionString) =
4270 getParser().parseStringToEndOfStatement().trim().split('+');
4272 if (parseToken(AsmToken::EndOfStatement))
4275 SmallVector<StringRef, 4> RequestedExtensions;
4276 if (!ExtensionString.empty())
4277 ExtensionString.split(RequestedExtensions, '+');
4279 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4280 // once that is tablegen'ed
4281 if (!getSTI().isCPUStringValid(CPU)) {
4282 Error(CPULoc, "unknown CPU name");
4286 MCSubtargetInfo &STI = copySTI();
4287 STI.setDefaultFeatures(CPU, "");
4289 FeatureBitset Features = STI.getFeatureBits();
4290 for (auto Name : RequestedExtensions) {
4291 bool EnableFeature = true;
4293 if (Name.startswith_lower("no")) {
4294 EnableFeature = false;
4295 Name = Name.substr(2);
4298 for (const auto &Extension : ExtensionMap) {
4299 if (Extension.Name != Name)
4302 if (Extension.Features.none())
4303 report_fatal_error("unsupported architectural extension: " + Name);
4305 FeatureBitset ToggleFeatures = EnableFeature
4306 ? (~Features & Extension.Features)
4307 : ( Features & Extension.Features);
4309 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4310 setAvailableFeatures(Features);
4318 /// parseDirectiveWord
4319 /// ::= .word [ expression (, expression)* ]
4320 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4321 auto parseOp = [&]() -> bool {
4322 const MCExpr *Value;
4323 if (getParser().parseExpression(Value))
4325 getParser().getStreamer().EmitValue(Value, Size, L);
4329 if (parseMany(parseOp))
4334 /// parseDirectiveInst
4335 /// ::= .inst opcode [, ...]
4336 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4337 if (getLexer().is(AsmToken::EndOfStatement))
4338 return Error(Loc, "expected expression following '.inst' directive");
4340 auto parseOp = [&]() -> bool {
4343 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4345 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4346 if (check(!Value, L, "expected constant expression"))
4348 getTargetStreamer().emitInst(Value->getValue());
4352 if (parseMany(parseOp))
4353 return addErrorSuffix(" in '.inst' directive");
4357 // parseDirectiveTLSDescCall:
4358 // ::= .tlsdesccall symbol
4359 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4361 if (check(getParser().parseIdentifier(Name), L,
4362 "expected symbol after directive") ||
4363 parseToken(AsmToken::EndOfStatement))
4366 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4367 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4368 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4371 Inst.setOpcode(AArch64::TLSDESCCALL);
4372 Inst.addOperand(MCOperand::createExpr(Expr));
4374 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4378 /// ::= .loh <lohName | lohId> label1, ..., labelN
4379 /// The number of arguments depends on the loh identifier.
4380 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4382 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4383 if (getParser().getTok().isNot(AsmToken::Integer))
4384 return TokError("expected an identifier or a number in directive");
4385 // We successfully get a numeric value for the identifier.
4386 // Check if it is valid.
4387 int64_t Id = getParser().getTok().getIntVal();
4388 if (Id <= -1U && !isValidMCLOHType(Id))
4389 return TokError("invalid numeric identifier in directive");
4390 Kind = (MCLOHType)Id;
4392 StringRef Name = getTok().getIdentifier();
4393 // We successfully parse an identifier.
4394 // Check if it is a recognized one.
4395 int Id = MCLOHNameToId(Name);
4398 return TokError("invalid identifier in directive");
4399 Kind = (MCLOHType)Id;
4401 // Consume the identifier.
4403 // Get the number of arguments of this LOH.
4404 int NbArgs = MCLOHIdToNbArgs(Kind);
4406 assert(NbArgs != -1 && "Invalid number of arguments");
4408 SmallVector<MCSymbol *, 3> Args;
4409 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4411 if (getParser().parseIdentifier(Name))
4412 return TokError("expected identifier in directive");
4413 Args.push_back(getContext().getOrCreateSymbol(Name));
4415 if (Idx + 1 == NbArgs)
4417 if (parseToken(AsmToken::Comma,
4418 "unexpected token in '" + Twine(IDVal) + "' directive"))
4421 if (parseToken(AsmToken::EndOfStatement,
4422 "unexpected token in '" + Twine(IDVal) + "' directive"))
4425 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4429 /// parseDirectiveLtorg
4430 /// ::= .ltorg | .pool
4431 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4432 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4434 getTargetStreamer().emitCurrentConstantPool();
4438 /// parseDirectiveReq
4439 /// ::= name .req registername
4440 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4441 MCAsmParser &Parser = getParser();
4442 Parser.Lex(); // Eat the '.req' token.
4443 SMLoc SRegLoc = getLoc();
4444 unsigned RegNum = tryParseRegister();
4445 bool IsVector = false;
4447 if (RegNum == static_cast<unsigned>(-1)) {
4449 RegNum = tryMatchVectorRegister(Kind, false);
4451 return Error(SRegLoc, "vector register without type specifier expected");
4455 if (RegNum == static_cast<unsigned>(-1))
4456 return Error(SRegLoc, "register name or alias expected");
4458 // Shouldn't be anything else.
4459 if (parseToken(AsmToken::EndOfStatement,
4460 "unexpected input in .req directive"))
4463 auto pair = std::make_pair(IsVector, RegNum);
4464 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4465 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4470 /// parseDirectiveUneq
4471 /// ::= .unreq registername
4472 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4473 MCAsmParser &Parser = getParser();
4474 if (getTok().isNot(AsmToken::Identifier))
4475 return TokError("unexpected input in .unreq directive.");
4476 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4477 Parser.Lex(); // Eat the identifier.
4478 if (parseToken(AsmToken::EndOfStatement))
4479 return addErrorSuffix("in '.unreq' directive");
4484 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4485 AArch64MCExpr::VariantKind &ELFRefKind,
4486 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4488 ELFRefKind = AArch64MCExpr::VK_INVALID;
4489 DarwinRefKind = MCSymbolRefExpr::VK_None;
4492 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4493 ELFRefKind = AE->getKind();
4494 Expr = AE->getSubExpr();
4497 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4499 // It's a simple symbol reference with no addend.
4500 DarwinRefKind = SE->getKind();
4504 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4508 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4511 DarwinRefKind = SE->getKind();
4513 if (BE->getOpcode() != MCBinaryExpr::Add &&
4514 BE->getOpcode() != MCBinaryExpr::Sub)
4517 // See if the addend is is a constant, otherwise there's more going
4518 // on here than we can deal with.
4519 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4523 Addend = AddendExpr->getValue();
4524 if (BE->getOpcode() == MCBinaryExpr::Sub)
4527 // It's some symbol reference + a constant addend, but really
4528 // shouldn't use both Darwin and ELF syntax.
4529 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4530 DarwinRefKind == MCSymbolRefExpr::VK_None;
4533 /// Force static initialization.
4534 extern "C" void LLVMInitializeAArch64AsmParser() {
4535 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
4536 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
4537 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
4540 #define GET_REGISTER_MATCHER
4541 #define GET_SUBTARGET_FEATURE_NAME
4542 #define GET_MATCHER_IMPLEMENTATION
4543 #include "AArch64GenAsmMatcher.inc"
4545 // Define this matcher function after the auto-generated include so we
4546 // have the match class enum definitions.
4547 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4549 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4550 // If the kind is a token for a literal immediate, check if our asm
4551 // operand matches. This is for InstAliases which have a fixed-value
4552 // immediate in the syntax.
4553 int64_t ExpectedVal;
4556 return Match_InvalidOperand;
4598 return Match_InvalidOperand;
4599 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4601 return Match_InvalidOperand;
4602 if (CE->getValue() == ExpectedVal)
4603 return Match_Success;
4604 return Match_InvalidOperand;
4607 OperandMatchResultTy
4608 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4612 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4613 Error(S, "expected register");
4614 return MatchOperand_ParseFail;
4617 int FirstReg = tryParseRegister();
4618 if (FirstReg == -1) {
4619 return MatchOperand_ParseFail;
4621 const MCRegisterClass &WRegClass =
4622 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4623 const MCRegisterClass &XRegClass =
4624 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4626 bool isXReg = XRegClass.contains(FirstReg),
4627 isWReg = WRegClass.contains(FirstReg);
4628 if (!isXReg && !isWReg) {
4629 Error(S, "expected first even register of a "
4630 "consecutive same-size even/odd register pair");
4631 return MatchOperand_ParseFail;
4634 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4635 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4637 if (FirstEncoding & 0x1) {
4638 Error(S, "expected first even register of a "
4639 "consecutive same-size even/odd register pair");
4640 return MatchOperand_ParseFail;
4644 if (getParser().getTok().isNot(AsmToken::Comma)) {
4645 Error(M, "expected comma");
4646 return MatchOperand_ParseFail;
4652 int SecondReg = tryParseRegister();
4653 if (SecondReg ==-1) {
4654 return MatchOperand_ParseFail;
4657 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4658 (isXReg && !XRegClass.contains(SecondReg)) ||
4659 (isWReg && !WRegClass.contains(SecondReg))) {
4660 Error(E,"expected second odd register of a "
4661 "consecutive same-size even/odd register pair");
4662 return MatchOperand_ParseFail;
4667 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4668 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4670 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4671 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4674 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4677 return MatchOperand_Success;