1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64MCTargetDesc.h"
13 #include "MCTargetDesc/AArch64TargetStreamer.h"
14 #include "AArch64InstrInfo.h"
15 #include "Utils/AArch64BaseInfo.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/MC/MCContext.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
29 #include "llvm/MC/MCLinkerOptimizationHint.h"
30 #include "llvm/MC/MCObjectFileInfo.h"
31 #include "llvm/MC/MCParser/MCAsmLexer.h"
32 #include "llvm/MC/MCParser/MCAsmParser.h"
33 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
34 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
35 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSubtargetInfo.h"
39 #include "llvm/MC/MCSymbol.h"
40 #include "llvm/MC/MCTargetOptions.h"
41 #include "llvm/MC/SubtargetFeature.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/Compiler.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/SMLoc.h"
47 #include "llvm/Support/TargetParser.h"
48 #include "llvm/Support/TargetRegistry.h"
49 #include "llvm/Support/raw_ostream.h"
70 enum RegConstraintEqualityTy {
76 class AArch64AsmParser : public MCTargetAsmParser {
78 StringRef Mnemonic; ///< Instruction mnemonic.
80 // Map of register aliases registers via the .req directive.
81 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
85 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
87 switch (Inst.getOpcode()) {
88 case AArch64::MOVPRFX_ZZ:
90 Prefix.Dst = Inst.getOperand(0).getReg();
92 case AArch64::MOVPRFX_ZPmZ_B:
93 case AArch64::MOVPRFX_ZPmZ_H:
94 case AArch64::MOVPRFX_ZPmZ_S:
95 case AArch64::MOVPRFX_ZPmZ_D:
97 Prefix.Predicated = true;
98 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
99 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
100 "No destructive element size set for movprfx");
101 Prefix.Dst = Inst.getOperand(0).getReg();
102 Prefix.Pg = Inst.getOperand(2).getReg();
104 case AArch64::MOVPRFX_ZPzZ_B:
105 case AArch64::MOVPRFX_ZPzZ_H:
106 case AArch64::MOVPRFX_ZPzZ_S:
107 case AArch64::MOVPRFX_ZPzZ_D:
108 Prefix.Active = true;
109 Prefix.Predicated = true;
110 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
111 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
112 "No destructive element size set for movprfx");
113 Prefix.Dst = Inst.getOperand(0).getReg();
114 Prefix.Pg = Inst.getOperand(1).getReg();
123 PrefixInfo() : Active(false), Predicated(false) {}
124 bool isActive() const { return Active; }
125 bool isPredicated() const { return Predicated; }
126 unsigned getElementSize() const {
130 unsigned getDstReg() const { return Dst; }
131 unsigned getPgReg() const {
139 unsigned ElementSize;
144 AArch64TargetStreamer &getTargetStreamer() {
145 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
146 return static_cast<AArch64TargetStreamer &>(TS);
149 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
151 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
152 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
153 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
154 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
155 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
156 bool parseRegister(OperandVector &Operands);
157 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
158 bool parseNeonVectorList(OperandVector &Operands);
159 bool parseOptionalMulOperand(OperandVector &Operands);
160 bool parseOperand(OperandVector &Operands, bool isCondCode,
161 bool invertCondCode);
163 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
164 OperandVector &Operands);
166 bool parseDirectiveArch(SMLoc L);
167 bool parseDirectiveCPU(SMLoc L);
168 bool parseDirectiveInst(SMLoc L);
170 bool parseDirectiveTLSDescCall(SMLoc L);
172 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
173 bool parseDirectiveLtorg(SMLoc L);
175 bool parseDirectiveReq(StringRef Name, SMLoc L);
176 bool parseDirectiveUnreq(SMLoc L);
178 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
179 SmallVectorImpl<SMLoc> &Loc);
180 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
181 OperandVector &Operands, MCStreamer &Out,
183 bool MatchingInlineAsm) override;
184 /// @name Auto-generated Match Functions
187 #define GET_ASSEMBLER_HEADER
188 #include "AArch64GenAsmMatcher.inc"
192 OperandMatchResultTy tryParseScalarRegister(unsigned &Reg);
193 OperandMatchResultTy tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
195 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
196 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
197 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
198 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
199 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
200 template <bool IsSVEPrefetch = false>
201 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
202 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
203 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
204 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
205 template<bool AddFPZeroAsLiteral>
206 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
207 OperandMatchResultTy tryParseImmWithOptionalShift(OperandVector &Operands);
208 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
209 bool tryParseNeonVectorRegister(OperandVector &Operands);
210 OperandMatchResultTy tryParseVectorIndex(OperandVector &Operands);
211 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
212 template <bool ParseShiftExtend,
213 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
214 OperandMatchResultTy tryParseGPROperand(OperandVector &Operands);
215 template <bool ParseShiftExtend, bool ParseSuffix>
216 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
217 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
218 template <RegKind VectorKind>
219 OperandMatchResultTy tryParseVectorList(OperandVector &Operands,
220 bool ExpectMatch = false);
221 OperandMatchResultTy tryParseSVEPattern(OperandVector &Operands);
224 enum AArch64MatchResultTy {
225 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
226 #define GET_OPERAND_DIAGNOSTIC_TYPES
227 #include "AArch64GenAsmMatcher.inc"
231 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
232 const MCInstrInfo &MII, const MCTargetOptions &Options)
233 : MCTargetAsmParser(Options, STI, MII) {
234 IsILP32 = Options.getABIName() == "ilp32";
235 MCAsmParserExtension::Initialize(Parser);
236 MCStreamer &S = getParser().getStreamer();
237 if (S.getTargetStreamer() == nullptr)
238 new AArch64TargetStreamer(S);
240 // Alias .hword/.word/xword to the target-independent .2byte/.4byte/.8byte
241 // directives as they have the same form and semantics:
242 /// ::= (.hword | .word | .xword ) [ expression (, expression)* ]
243 Parser.addAliasForDirective(".hword", ".2byte");
244 Parser.addAliasForDirective(".word", ".4byte");
245 Parser.addAliasForDirective(".xword", ".8byte");
247 // Initialize the set of available features.
248 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
251 bool regsEqual(const MCParsedAsmOperand &Op1,
252 const MCParsedAsmOperand &Op2) const override;
253 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
254 SMLoc NameLoc, OperandVector &Operands) override;
255 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
256 bool ParseDirective(AsmToken DirectiveID) override;
257 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
258 unsigned Kind) override;
260 static bool classifySymbolRef(const MCExpr *Expr,
261 AArch64MCExpr::VariantKind &ELFRefKind,
262 MCSymbolRefExpr::VariantKind &DarwinRefKind,
266 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
268 class AArch64Operand : public MCParsedAsmOperand {
287 SMLoc StartLoc, EndLoc;
292 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
295 // Separate shift/extend operand.
296 struct ShiftExtendOp {
297 AArch64_AM::ShiftExtendType Type;
299 bool HasExplicitAmount;
307 // The register may be allowed as a different register class,
308 // e.g. for GPR64as32 or GPR32as64.
309 RegConstraintEqualityTy EqualityTy;
311 // In some cases the shift/extend needs to be explicitly parsed together
312 // with the register, rather than as a separate operand. This is needed
313 // for addressing modes where the instruction as a whole dictates the
314 // scaling/extend, rather than specific bits in the instruction.
315 // By parsing them as a single operand, we avoid the need to pass an
316 // extra operand in all CodeGen patterns (because all operands need to
317 // have an associated value), and we avoid the need to update TableGen to
318 // accept operands that have no associated bits in the instruction.
320 // An added benefit of parsing them together is that the assembler
321 // can give a sensible diagnostic if the scaling is not correct.
323 // The default is 'lsl #0' (HasExplicitAmount = false) if no
324 // ShiftExtend is specified.
325 ShiftExtendOp ShiftExtend;
328 struct VectorListOp {
331 unsigned NumElements;
332 unsigned ElementWidth;
333 RegKind RegisterKind;
336 struct VectorIndexOp {
344 struct ShiftedImmOp {
346 unsigned ShiftAmount;
350 AArch64CC::CondCode Code;
354 uint64_t Val; // APFloat value bitcasted to uint64_t.
355 bool IsExact; // describes whether parsed value was exact.
361 unsigned Val; // Not the enum since not all values have names.
369 uint32_t PStateField;
395 struct VectorListOp VectorList;
396 struct VectorIndexOp VectorIndex;
398 struct ShiftedImmOp ShiftedImm;
399 struct CondCodeOp CondCode;
400 struct FPImmOp FPImm;
401 struct BarrierOp Barrier;
402 struct SysRegOp SysReg;
403 struct SysCRImmOp SysCRImm;
404 struct PrefetchOp Prefetch;
405 struct PSBHintOp PSBHint;
406 struct ShiftExtendOp ShiftExtend;
409 // Keep the MCContext around as the MCExprs may need manipulated during
410 // the add<>Operands() calls.
414 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
416 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
418 StartLoc = o.StartLoc;
428 ShiftedImm = o.ShiftedImm;
431 CondCode = o.CondCode;
443 VectorList = o.VectorList;
446 VectorIndex = o.VectorIndex;
452 SysCRImm = o.SysCRImm;
455 Prefetch = o.Prefetch;
461 ShiftExtend = o.ShiftExtend;
466 /// getStartLoc - Get the location of the first token of this operand.
467 SMLoc getStartLoc() const override { return StartLoc; }
468 /// getEndLoc - Get the location of the last token of this operand.
469 SMLoc getEndLoc() const override { return EndLoc; }
471 StringRef getToken() const {
472 assert(Kind == k_Token && "Invalid access!");
473 return StringRef(Tok.Data, Tok.Length);
476 bool isTokenSuffix() const {
477 assert(Kind == k_Token && "Invalid access!");
481 const MCExpr *getImm() const {
482 assert(Kind == k_Immediate && "Invalid access!");
486 const MCExpr *getShiftedImmVal() const {
487 assert(Kind == k_ShiftedImm && "Invalid access!");
488 return ShiftedImm.Val;
491 unsigned getShiftedImmShift() const {
492 assert(Kind == k_ShiftedImm && "Invalid access!");
493 return ShiftedImm.ShiftAmount;
496 AArch64CC::CondCode getCondCode() const {
497 assert(Kind == k_CondCode && "Invalid access!");
498 return CondCode.Code;
501 APFloat getFPImm() const {
502 assert (Kind == k_FPImm && "Invalid access!");
503 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
506 bool getFPImmIsExact() const {
507 assert (Kind == k_FPImm && "Invalid access!");
508 return FPImm.IsExact;
511 unsigned getBarrier() const {
512 assert(Kind == k_Barrier && "Invalid access!");
516 StringRef getBarrierName() const {
517 assert(Kind == k_Barrier && "Invalid access!");
518 return StringRef(Barrier.Data, Barrier.Length);
521 unsigned getReg() const override {
522 assert(Kind == k_Register && "Invalid access!");
526 RegConstraintEqualityTy getRegEqualityTy() const {
527 assert(Kind == k_Register && "Invalid access!");
528 return Reg.EqualityTy;
531 unsigned getVectorListStart() const {
532 assert(Kind == k_VectorList && "Invalid access!");
533 return VectorList.RegNum;
536 unsigned getVectorListCount() const {
537 assert(Kind == k_VectorList && "Invalid access!");
538 return VectorList.Count;
541 unsigned getVectorIndex() const {
542 assert(Kind == k_VectorIndex && "Invalid access!");
543 return VectorIndex.Val;
546 StringRef getSysReg() const {
547 assert(Kind == k_SysReg && "Invalid access!");
548 return StringRef(SysReg.Data, SysReg.Length);
551 unsigned getSysCR() const {
552 assert(Kind == k_SysCR && "Invalid access!");
556 unsigned getPrefetch() const {
557 assert(Kind == k_Prefetch && "Invalid access!");
561 unsigned getPSBHint() const {
562 assert(Kind == k_PSBHint && "Invalid access!");
566 StringRef getPSBHintName() const {
567 assert(Kind == k_PSBHint && "Invalid access!");
568 return StringRef(PSBHint.Data, PSBHint.Length);
571 StringRef getPrefetchName() const {
572 assert(Kind == k_Prefetch && "Invalid access!");
573 return StringRef(Prefetch.Data, Prefetch.Length);
576 AArch64_AM::ShiftExtendType getShiftExtendType() const {
577 if (Kind == k_ShiftExtend)
578 return ShiftExtend.Type;
579 if (Kind == k_Register)
580 return Reg.ShiftExtend.Type;
581 llvm_unreachable("Invalid access!");
584 unsigned getShiftExtendAmount() const {
585 if (Kind == k_ShiftExtend)
586 return ShiftExtend.Amount;
587 if (Kind == k_Register)
588 return Reg.ShiftExtend.Amount;
589 llvm_unreachable("Invalid access!");
592 bool hasShiftExtendAmount() const {
593 if (Kind == k_ShiftExtend)
594 return ShiftExtend.HasExplicitAmount;
595 if (Kind == k_Register)
596 return Reg.ShiftExtend.HasExplicitAmount;
597 llvm_unreachable("Invalid access!");
600 bool isImm() const override { return Kind == k_Immediate; }
601 bool isMem() const override { return false; }
603 bool isUImm6() const {
606 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
609 int64_t Val = MCE->getValue();
610 return (Val >= 0 && Val < 64);
613 template <int Width> bool isSImm() const { return isSImmScaled<Width, 1>(); }
615 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
616 return isImmScaled<Bits, Scale>(true);
619 template <int Bits, int Scale> DiagnosticPredicate isUImmScaled() const {
620 return isImmScaled<Bits, Scale>(false);
623 template <int Bits, int Scale>
624 DiagnosticPredicate isImmScaled(bool Signed) const {
626 return DiagnosticPredicateTy::NoMatch;
628 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630 return DiagnosticPredicateTy::NoMatch;
632 int64_t MinVal, MaxVal;
634 int64_t Shift = Bits - 1;
635 MinVal = (int64_t(1) << Shift) * -Scale;
636 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
639 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
642 int64_t Val = MCE->getValue();
643 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
644 return DiagnosticPredicateTy::Match;
646 return DiagnosticPredicateTy::NearMatch;
649 DiagnosticPredicate isSVEPattern() const {
651 return DiagnosticPredicateTy::NoMatch;
652 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
654 return DiagnosticPredicateTy::NoMatch;
655 int64_t Val = MCE->getValue();
656 if (Val >= 0 && Val < 32)
657 return DiagnosticPredicateTy::Match;
658 return DiagnosticPredicateTy::NearMatch;
661 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
662 AArch64MCExpr::VariantKind ELFRefKind;
663 MCSymbolRefExpr::VariantKind DarwinRefKind;
665 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
667 // If we don't understand the expression, assume the best and
668 // let the fixup and relocation code deal with it.
672 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
673 ELFRefKind == AArch64MCExpr::VK_LO12 ||
674 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
675 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
676 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
677 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
678 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
679 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
680 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
681 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
682 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) {
683 // Note that we don't range-check the addend. It's adjusted modulo page
684 // size when converted, so there is no "out of range" condition when using
686 return Addend >= 0 && (Addend % Scale) == 0;
687 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
688 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
689 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
696 template <int Scale> bool isUImm12Offset() const {
700 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
702 return isSymbolicUImm12Offset(getImm(), Scale);
704 int64_t Val = MCE->getValue();
705 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
708 template <int N, int M>
709 bool isImmInRange() const {
712 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
715 int64_t Val = MCE->getValue();
716 return (Val >= N && Val <= M);
719 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
720 // a logical immediate can always be represented when inverted.
721 template <typename T>
722 bool isLogicalImm() const {
725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
729 int64_t Val = MCE->getValue();
730 int64_t SVal = typename std::make_signed<T>::type(Val);
731 int64_t UVal = typename std::make_unsigned<T>::type(Val);
732 if (Val != SVal && Val != UVal)
735 return AArch64_AM::isLogicalImmediate(UVal, sizeof(T) * 8);
738 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
740 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
741 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
742 /// immediate that can be shifted by 'Shift'.
743 template <unsigned Width>
744 Optional<std::pair<int64_t, unsigned> > getShiftedVal() const {
745 if (isShiftedImm() && Width == getShiftedImmShift())
746 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
747 return std::make_pair(CE->getValue(), Width);
750 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
751 int64_t Val = CE->getValue();
752 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
753 return std::make_pair(Val >> Width, Width);
755 return std::make_pair(Val, 0u);
761 bool isAddSubImm() const {
762 if (!isShiftedImm() && !isImm())
767 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
768 if (isShiftedImm()) {
769 unsigned Shift = ShiftedImm.ShiftAmount;
770 Expr = ShiftedImm.Val;
771 if (Shift != 0 && Shift != 12)
777 AArch64MCExpr::VariantKind ELFRefKind;
778 MCSymbolRefExpr::VariantKind DarwinRefKind;
780 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
781 DarwinRefKind, Addend)) {
782 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
783 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
784 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
785 || ELFRefKind == AArch64MCExpr::VK_LO12
786 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
787 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
788 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
789 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
790 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
791 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
792 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12
793 || ELFRefKind == AArch64MCExpr::VK_SECREL_HI12
794 || ELFRefKind == AArch64MCExpr::VK_SECREL_LO12;
797 // If it's a constant, it should be a real immediate in range.
798 if (auto ShiftedVal = getShiftedVal<12>())
799 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
801 // If it's an expression, we hope for the best and let the fixup/relocation
802 // code deal with it.
806 bool isAddSubImmNeg() const {
807 if (!isShiftedImm() && !isImm())
810 // Otherwise it should be a real negative immediate in range.
811 if (auto ShiftedVal = getShiftedVal<12>())
812 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
817 // Signed value in the range -128 to +127. For element widths of
818 // 16 bits or higher it may also be a signed multiple of 256 in the
819 // range -32768 to +32512.
820 // For element-width of 8 bits a range of -128 to 255 is accepted,
821 // since a copy of a byte can be either signed/unsigned.
822 template <typename T>
823 DiagnosticPredicate isSVECpyImm() const {
824 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
825 return DiagnosticPredicateTy::NoMatch;
828 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
829 if (auto ShiftedImm = getShiftedVal<8>())
830 if (!(IsByte && ShiftedImm->second) &&
831 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
832 << ShiftedImm->second))
833 return DiagnosticPredicateTy::Match;
835 return DiagnosticPredicateTy::NearMatch;
838 // Unsigned value in the range 0 to 255. For element widths of
839 // 16 bits or higher it may also be a signed multiple of 256 in the
841 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
842 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
843 return DiagnosticPredicateTy::NoMatch;
846 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
847 if (auto ShiftedImm = getShiftedVal<8>())
848 if (!(IsByte && ShiftedImm->second) &&
849 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
850 << ShiftedImm->second))
851 return DiagnosticPredicateTy::Match;
853 return DiagnosticPredicateTy::NearMatch;
856 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
857 if (isLogicalImm<T>() && !isSVECpyImm<T>())
858 return DiagnosticPredicateTy::Match;
859 return DiagnosticPredicateTy::NoMatch;
862 bool isCondCode() const { return Kind == k_CondCode; }
864 bool isSIMDImmType10() const {
867 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
870 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
874 bool isBranchTarget() const {
877 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
880 int64_t Val = MCE->getValue();
883 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
884 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
888 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
892 AArch64MCExpr::VariantKind ELFRefKind;
893 MCSymbolRefExpr::VariantKind DarwinRefKind;
895 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
896 DarwinRefKind, Addend)) {
899 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
902 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
903 if (ELFRefKind == AllowedModifiers[i])
910 bool isMovZSymbolG3() const {
911 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
914 bool isMovZSymbolG2() const {
915 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
916 AArch64MCExpr::VK_TPREL_G2,
917 AArch64MCExpr::VK_DTPREL_G2});
920 bool isMovZSymbolG1() const {
921 return isMovWSymbol({
922 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
923 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
924 AArch64MCExpr::VK_DTPREL_G1,
928 bool isMovZSymbolG0() const {
929 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
930 AArch64MCExpr::VK_TPREL_G0,
931 AArch64MCExpr::VK_DTPREL_G0});
934 bool isMovKSymbolG3() const {
935 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
938 bool isMovKSymbolG2() const {
939 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
942 bool isMovKSymbolG1() const {
943 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
944 AArch64MCExpr::VK_TPREL_G1_NC,
945 AArch64MCExpr::VK_DTPREL_G1_NC});
948 bool isMovKSymbolG0() const {
950 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
951 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
954 template<int RegWidth, int Shift>
955 bool isMOVZMovAlias() const {
956 if (!isImm()) return false;
958 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
959 if (!CE) return false;
960 uint64_t Value = CE->getValue();
962 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
965 template<int RegWidth, int Shift>
966 bool isMOVNMovAlias() const {
967 if (!isImm()) return false;
969 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
970 if (!CE) return false;
971 uint64_t Value = CE->getValue();
973 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
976 bool isFPImm() const {
977 return Kind == k_FPImm &&
978 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
981 bool isBarrier() const { return Kind == k_Barrier; }
982 bool isSysReg() const { return Kind == k_SysReg; }
984 bool isMRSSystemRegister() const {
985 if (!isSysReg()) return false;
987 return SysReg.MRSReg != -1U;
990 bool isMSRSystemRegister() const {
991 if (!isSysReg()) return false;
992 return SysReg.MSRReg != -1U;
995 bool isSystemPStateFieldWithImm0_1() const {
996 if (!isSysReg()) return false;
997 return (SysReg.PStateField == AArch64PState::PAN ||
998 SysReg.PStateField == AArch64PState::DIT ||
999 SysReg.PStateField == AArch64PState::UAO);
1002 bool isSystemPStateFieldWithImm0_15() const {
1003 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
1004 return SysReg.PStateField != -1U;
1007 bool isReg() const override {
1008 return Kind == k_Register;
1011 bool isScalarReg() const {
1012 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1015 bool isNeonVectorReg() const {
1016 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1019 bool isNeonVectorRegLo() const {
1020 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1021 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1025 template <unsigned Class> bool isSVEVectorReg() const {
1028 case AArch64::ZPRRegClassID:
1029 case AArch64::ZPR_3bRegClassID:
1030 case AArch64::ZPR_4bRegClassID:
1031 RK = RegKind::SVEDataVector;
1033 case AArch64::PPRRegClassID:
1034 case AArch64::PPR_3bRegClassID:
1035 RK = RegKind::SVEPredicateVector;
1038 llvm_unreachable("Unsupport register class");
1041 return (Kind == k_Register && Reg.Kind == RK) &&
1042 AArch64MCRegisterClasses[Class].contains(getReg());
1045 template <unsigned Class> bool isFPRasZPR() const {
1046 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1047 AArch64MCRegisterClasses[Class].contains(getReg());
1050 template <int ElementWidth, unsigned Class>
1051 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1052 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1053 return DiagnosticPredicateTy::NoMatch;
1055 if (isSVEVectorReg<Class>() &&
1056 (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1057 return DiagnosticPredicateTy::Match;
1059 return DiagnosticPredicateTy::NearMatch;
1062 template <int ElementWidth, unsigned Class>
1063 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1064 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1065 return DiagnosticPredicateTy::NoMatch;
1067 if (isSVEVectorReg<Class>() &&
1068 (ElementWidth == 0 || Reg.ElementWidth == ElementWidth))
1069 return DiagnosticPredicateTy::Match;
1071 return DiagnosticPredicateTy::NearMatch;
1074 template <int ElementWidth, unsigned Class,
1075 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1076 bool ShiftWidthAlwaysSame>
1077 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1078 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1079 if (!VectorMatch.isMatch())
1080 return DiagnosticPredicateTy::NoMatch;
1082 // Give a more specific diagnostic when the user has explicitly typed in
1083 // a shift-amount that does not match what is expected, but for which
1084 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1085 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1086 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1087 ShiftExtendTy == AArch64_AM::SXTW) &&
1088 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1089 return DiagnosticPredicateTy::NoMatch;
1091 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1092 return DiagnosticPredicateTy::Match;
1094 return DiagnosticPredicateTy::NearMatch;
1097 bool isGPR32as64() const {
1098 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1099 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1102 bool isGPR64as32() const {
1103 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1104 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1107 bool isWSeqPair() const {
1108 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1109 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1113 bool isXSeqPair() const {
1114 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1115 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1119 template<int64_t Angle, int64_t Remainder>
1120 DiagnosticPredicate isComplexRotation() const {
1121 if (!isImm()) return DiagnosticPredicateTy::NoMatch;
1123 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1124 if (!CE) return DiagnosticPredicateTy::NoMatch;
1125 uint64_t Value = CE->getValue();
1127 if (Value % Angle == Remainder && Value <= 270)
1128 return DiagnosticPredicateTy::Match;
1129 return DiagnosticPredicateTy::NearMatch;
1132 template <unsigned RegClassID> bool isGPR64() const {
1133 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1134 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1137 template <unsigned RegClassID, int ExtWidth>
1138 DiagnosticPredicate isGPR64WithShiftExtend() const {
1139 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1140 return DiagnosticPredicateTy::NoMatch;
1142 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1143 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1144 return DiagnosticPredicateTy::Match;
1145 return DiagnosticPredicateTy::NearMatch;
1148 /// Is this a vector list with the type implicit (presumably attached to the
1149 /// instruction itself)?
1150 template <RegKind VectorKind, unsigned NumRegs>
1151 bool isImplicitlyTypedVectorList() const {
1152 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1153 VectorList.NumElements == 0 &&
1154 VectorList.RegisterKind == VectorKind;
1157 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1158 unsigned ElementWidth>
1159 bool isTypedVectorList() const {
1160 if (Kind != k_VectorList)
1162 if (VectorList.Count != NumRegs)
1164 if (VectorList.RegisterKind != VectorKind)
1166 if (VectorList.ElementWidth != ElementWidth)
1168 return VectorList.NumElements == NumElements;
1171 template <int Min, int Max>
1172 DiagnosticPredicate isVectorIndex() const {
1173 if (Kind != k_VectorIndex)
1174 return DiagnosticPredicateTy::NoMatch;
1175 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1176 return DiagnosticPredicateTy::Match;
1177 return DiagnosticPredicateTy::NearMatch;
1180 bool isToken() const override { return Kind == k_Token; }
1182 bool isTokenEqual(StringRef Str) const {
1183 return Kind == k_Token && getToken() == Str;
1185 bool isSysCR() const { return Kind == k_SysCR; }
1186 bool isPrefetch() const { return Kind == k_Prefetch; }
1187 bool isPSBHint() const { return Kind == k_PSBHint; }
1188 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1189 bool isShifter() const {
1190 if (!isShiftExtend())
1193 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1194 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1195 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1196 ST == AArch64_AM::MSL);
1199 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1200 if (Kind != k_FPImm)
1201 return DiagnosticPredicateTy::NoMatch;
1203 if (getFPImmIsExact()) {
1204 // Lookup the immediate from table of supported immediates.
1205 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1206 assert(Desc && "Unknown enum value");
1208 // Calculate its FP value.
1209 APFloat RealVal(APFloat::IEEEdouble());
1210 if (RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero) !=
1212 llvm_unreachable("FP immediate is not exact");
1214 if (getFPImm().bitwiseIsEqual(RealVal))
1215 return DiagnosticPredicateTy::Match;
1218 return DiagnosticPredicateTy::NearMatch;
1221 template <unsigned ImmA, unsigned ImmB>
1222 DiagnosticPredicate isExactFPImm() const {
1223 DiagnosticPredicate Res = DiagnosticPredicateTy::NoMatch;
1224 if ((Res = isExactFPImm<ImmA>()))
1225 return DiagnosticPredicateTy::Match;
1226 if ((Res = isExactFPImm<ImmB>()))
1227 return DiagnosticPredicateTy::Match;
1231 bool isExtend() const {
1232 if (!isShiftExtend())
1235 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1236 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1237 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1238 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1239 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1240 ET == AArch64_AM::LSL) &&
1241 getShiftExtendAmount() <= 4;
1244 bool isExtend64() const {
1247 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1248 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1249 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1252 bool isExtendLSL64() const {
1255 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1256 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1257 ET == AArch64_AM::LSL) &&
1258 getShiftExtendAmount() <= 4;
1261 template<int Width> bool isMemXExtend() const {
1264 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1265 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1266 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1267 getShiftExtendAmount() == 0);
1270 template<int Width> bool isMemWExtend() const {
1273 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1274 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1275 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1276 getShiftExtendAmount() == 0);
1279 template <unsigned width>
1280 bool isArithmeticShifter() const {
1284 // An arithmetic shifter is LSL, LSR, or ASR.
1285 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1286 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1287 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1290 template <unsigned width>
1291 bool isLogicalShifter() const {
1295 // A logical shifter is LSL, LSR, ASR or ROR.
1296 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1297 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1298 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1299 getShiftExtendAmount() < width;
1302 bool isMovImm32Shifter() const {
1306 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1307 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1308 if (ST != AArch64_AM::LSL)
1310 uint64_t Val = getShiftExtendAmount();
1311 return (Val == 0 || Val == 16);
1314 bool isMovImm64Shifter() const {
1318 // A MOVi shifter is LSL of 0 or 16.
1319 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1320 if (ST != AArch64_AM::LSL)
1322 uint64_t Val = getShiftExtendAmount();
1323 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1326 bool isLogicalVecShifter() const {
1330 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1331 unsigned Shift = getShiftExtendAmount();
1332 return getShiftExtendType() == AArch64_AM::LSL &&
1333 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1336 bool isLogicalVecHalfWordShifter() const {
1337 if (!isLogicalVecShifter())
1340 // A logical vector shifter is a left shift by 0 or 8.
1341 unsigned Shift = getShiftExtendAmount();
1342 return getShiftExtendType() == AArch64_AM::LSL &&
1343 (Shift == 0 || Shift == 8);
1346 bool isMoveVecShifter() const {
1347 if (!isShiftExtend())
1350 // A logical vector shifter is a left shift by 8 or 16.
1351 unsigned Shift = getShiftExtendAmount();
1352 return getShiftExtendType() == AArch64_AM::MSL &&
1353 (Shift == 8 || Shift == 16);
1356 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1357 // to LDUR/STUR when the offset is not legal for the former but is for
1358 // the latter. As such, in addition to checking for being a legal unscaled
1359 // address, also check that it is not a legal scaled address. This avoids
1360 // ambiguity in the matcher.
1362 bool isSImm9OffsetFB() const {
1363 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1366 bool isAdrpLabel() const {
1367 // Validation was handled during parsing, so we just sanity check that
1368 // something didn't go haywire.
1372 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1373 int64_t Val = CE->getValue();
1374 int64_t Min = - (4096 * (1LL << (21 - 1)));
1375 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1376 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1382 bool isAdrLabel() const {
1383 // Validation was handled during parsing, so we just sanity check that
1384 // something didn't go haywire.
1388 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1389 int64_t Val = CE->getValue();
1390 int64_t Min = - (1LL << (21 - 1));
1391 int64_t Max = ((1LL << (21 - 1)) - 1);
1392 return Val >= Min && Val <= Max;
1398 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1399 // Add as immediates when possible. Null MCExpr = 0.
1401 Inst.addOperand(MCOperand::createImm(0));
1402 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1403 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1405 Inst.addOperand(MCOperand::createExpr(Expr));
1408 void addRegOperands(MCInst &Inst, unsigned N) const {
1409 assert(N == 1 && "Invalid number of operands!");
1410 Inst.addOperand(MCOperand::createReg(getReg()));
1413 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1414 assert(N == 1 && "Invalid number of operands!");
1416 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1418 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1419 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1420 RI->getEncodingValue(getReg()));
1422 Inst.addOperand(MCOperand::createReg(Reg));
1425 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1426 assert(N == 1 && "Invalid number of operands!");
1428 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1430 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1431 uint32_t Reg = RI->getRegClass(AArch64::GPR64RegClassID).getRegister(
1432 RI->getEncodingValue(getReg()));
1434 Inst.addOperand(MCOperand::createReg(Reg));
1437 template <int Width>
1438 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1441 case 8: Base = AArch64::B0; break;
1442 case 16: Base = AArch64::H0; break;
1443 case 32: Base = AArch64::S0; break;
1444 case 64: Base = AArch64::D0; break;
1445 case 128: Base = AArch64::Q0; break;
1447 llvm_unreachable("Unsupported width");
1449 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1452 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1453 assert(N == 1 && "Invalid number of operands!");
1455 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1456 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1459 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1460 assert(N == 1 && "Invalid number of operands!");
1462 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1463 Inst.addOperand(MCOperand::createReg(getReg()));
1466 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1467 assert(N == 1 && "Invalid number of operands!");
1468 Inst.addOperand(MCOperand::createReg(getReg()));
1471 enum VecListIndexType {
1472 VecListIdx_DReg = 0,
1473 VecListIdx_QReg = 1,
1474 VecListIdx_ZReg = 2,
1477 template <VecListIndexType RegTy, unsigned NumRegs>
1478 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1479 assert(N == 1 && "Invalid number of operands!");
1480 static const unsigned FirstRegs[][5] = {
1481 /* DReg */ { AArch64::Q0,
1482 AArch64::D0, AArch64::D0_D1,
1483 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1484 /* QReg */ { AArch64::Q0,
1485 AArch64::Q0, AArch64::Q0_Q1,
1486 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1487 /* ZReg */ { AArch64::Z0,
1488 AArch64::Z0, AArch64::Z0_Z1,
1489 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1492 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1493 " NumRegs must be <= 4 for ZRegs");
1495 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1496 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1497 FirstRegs[(unsigned)RegTy][0]));
1500 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1501 assert(N == 1 && "Invalid number of operands!");
1502 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1505 template <unsigned ImmIs0, unsigned ImmIs1>
1506 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1507 assert(N == 1 && "Invalid number of operands!");
1508 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1509 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1512 void addImmOperands(MCInst &Inst, unsigned N) const {
1513 assert(N == 1 && "Invalid number of operands!");
1514 // If this is a pageoff symrefexpr with an addend, adjust the addend
1515 // to be only the page-offset portion. Otherwise, just add the expr
1517 addExpr(Inst, getImm());
1520 template <int Shift>
1521 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1522 assert(N == 2 && "Invalid number of operands!");
1523 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1524 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1525 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1526 } else if (isShiftedImm()) {
1527 addExpr(Inst, getShiftedImmVal());
1528 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1530 addExpr(Inst, getImm());
1531 Inst.addOperand(MCOperand::createImm(0));
1535 template <int Shift>
1536 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1537 assert(N == 2 && "Invalid number of operands!");
1538 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1539 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1540 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1542 llvm_unreachable("Not a shifted negative immediate");
1545 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1546 assert(N == 1 && "Invalid number of operands!");
1547 Inst.addOperand(MCOperand::createImm(getCondCode()));
1550 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1551 assert(N == 1 && "Invalid number of operands!");
1552 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1554 addExpr(Inst, getImm());
1556 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1559 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1560 addImmOperands(Inst, N);
1564 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1565 assert(N == 1 && "Invalid number of operands!");
1566 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1569 Inst.addOperand(MCOperand::createExpr(getImm()));
1572 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1575 void addUImm6Operands(MCInst &Inst, unsigned N) const {
1576 assert(N == 1 && "Invalid number of operands!");
1577 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1578 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1581 template <int Scale>
1582 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
1583 assert(N == 1 && "Invalid number of operands!");
1584 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1585 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1588 template <typename T>
1589 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1590 assert(N == 1 && "Invalid number of operands!");
1591 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1592 typename std::make_unsigned<T>::type Val = MCE->getValue();
1593 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1594 Inst.addOperand(MCOperand::createImm(encoding));
1597 template <typename T>
1598 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
1599 assert(N == 1 && "Invalid number of operands!");
1600 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1601 typename std::make_unsigned<T>::type Val = ~MCE->getValue();
1602 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
1603 Inst.addOperand(MCOperand::createImm(encoding));
1606 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1607 assert(N == 1 && "Invalid number of operands!");
1608 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1609 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1610 Inst.addOperand(MCOperand::createImm(encoding));
1613 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1614 // Branch operands don't encode the low bits, so shift them off
1615 // here. If it's a label, however, just put it on directly as there's
1616 // not enough information now to do anything.
1617 assert(N == 1 && "Invalid number of operands!");
1618 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1620 addExpr(Inst, getImm());
1623 assert(MCE && "Invalid constant immediate operand!");
1624 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1627 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1628 // Branch operands don't encode the low bits, so shift them off
1629 // here. If it's a label, however, just put it on directly as there's
1630 // not enough information now to do anything.
1631 assert(N == 1 && "Invalid number of operands!");
1632 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1634 addExpr(Inst, getImm());
1637 assert(MCE && "Invalid constant immediate operand!");
1638 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1641 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1642 // Branch operands don't encode the low bits, so shift them off
1643 // here. If it's a label, however, just put it on directly as there's
1644 // not enough information now to do anything.
1645 assert(N == 1 && "Invalid number of operands!");
1646 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1648 addExpr(Inst, getImm());
1651 assert(MCE && "Invalid constant immediate operand!");
1652 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1655 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1656 assert(N == 1 && "Invalid number of operands!");
1657 Inst.addOperand(MCOperand::createImm(
1658 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
1661 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1662 assert(N == 1 && "Invalid number of operands!");
1663 Inst.addOperand(MCOperand::createImm(getBarrier()));
1666 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1667 assert(N == 1 && "Invalid number of operands!");
1669 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1672 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1673 assert(N == 1 && "Invalid number of operands!");
1675 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1678 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1679 assert(N == 1 && "Invalid number of operands!");
1681 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1684 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1685 assert(N == 1 && "Invalid number of operands!");
1687 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1690 void addSysCROperands(MCInst &Inst, unsigned N) const {
1691 assert(N == 1 && "Invalid number of operands!");
1692 Inst.addOperand(MCOperand::createImm(getSysCR()));
1695 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1696 assert(N == 1 && "Invalid number of operands!");
1697 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1700 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1701 assert(N == 1 && "Invalid number of operands!");
1702 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1705 void addShifterOperands(MCInst &Inst, unsigned N) const {
1706 assert(N == 1 && "Invalid number of operands!");
1708 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1709 Inst.addOperand(MCOperand::createImm(Imm));
1712 void addExtendOperands(MCInst &Inst, unsigned N) const {
1713 assert(N == 1 && "Invalid number of operands!");
1714 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1715 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1716 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1717 Inst.addOperand(MCOperand::createImm(Imm));
1720 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1721 assert(N == 1 && "Invalid number of operands!");
1722 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1723 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1724 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1725 Inst.addOperand(MCOperand::createImm(Imm));
1728 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1729 assert(N == 2 && "Invalid number of operands!");
1730 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1731 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1732 Inst.addOperand(MCOperand::createImm(IsSigned));
1733 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1736 // For 8-bit load/store instructions with a register offset, both the
1737 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1738 // they're disambiguated by whether the shift was explicit or implicit rather
1740 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1741 assert(N == 2 && "Invalid number of operands!");
1742 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1743 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1744 Inst.addOperand(MCOperand::createImm(IsSigned));
1745 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1749 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1750 assert(N == 1 && "Invalid number of operands!");
1752 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1753 uint64_t Value = CE->getValue();
1754 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1758 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1759 assert(N == 1 && "Invalid number of operands!");
1761 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1762 uint64_t Value = CE->getValue();
1763 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1766 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1767 assert(N == 1 && "Invalid number of operands!");
1768 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1769 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1772 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1773 assert(N == 1 && "Invalid number of operands!");
1774 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1775 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1778 void print(raw_ostream &OS) const override;
1780 static std::unique_ptr<AArch64Operand>
1781 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1782 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1783 Op->Tok.Data = Str.data();
1784 Op->Tok.Length = Str.size();
1785 Op->Tok.IsSuffix = IsSuffix;
1791 static std::unique_ptr<AArch64Operand>
1792 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
1793 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1794 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1795 unsigned ShiftAmount = 0,
1796 unsigned HasExplicitAmount = false) {
1797 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1798 Op->Reg.RegNum = RegNum;
1799 Op->Reg.Kind = Kind;
1800 Op->Reg.ElementWidth = 0;
1801 Op->Reg.EqualityTy = EqTy;
1802 Op->Reg.ShiftExtend.Type = ExtTy;
1803 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1804 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1810 static std::unique_ptr<AArch64Operand>
1811 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1812 SMLoc S, SMLoc E, MCContext &Ctx,
1813 AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
1814 unsigned ShiftAmount = 0,
1815 unsigned HasExplicitAmount = false) {
1816 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1817 Kind == RegKind::SVEPredicateVector) &&
1818 "Invalid vector kind");
1819 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1821 Op->Reg.ElementWidth = ElementWidth;
1825 static std::unique_ptr<AArch64Operand>
1826 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1827 unsigned ElementWidth, RegKind RegisterKind, SMLoc S, SMLoc E,
1829 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1830 Op->VectorList.RegNum = RegNum;
1831 Op->VectorList.Count = Count;
1832 Op->VectorList.NumElements = NumElements;
1833 Op->VectorList.ElementWidth = ElementWidth;
1834 Op->VectorList.RegisterKind = RegisterKind;
1840 static std::unique_ptr<AArch64Operand>
1841 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1842 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1843 Op->VectorIndex.Val = Idx;
1849 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1850 SMLoc E, MCContext &Ctx) {
1851 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1858 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1859 unsigned ShiftAmount,
1862 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1863 Op->ShiftedImm .Val = Val;
1864 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1870 static std::unique_ptr<AArch64Operand>
1871 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1872 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1873 Op->CondCode.Code = Code;
1879 static std::unique_ptr<AArch64Operand>
1880 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
1881 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1882 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
1883 Op->FPImm.IsExact = IsExact;
1889 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1893 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1894 Op->Barrier.Val = Val;
1895 Op->Barrier.Data = Str.data();
1896 Op->Barrier.Length = Str.size();
1902 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1905 uint32_t PStateField,
1907 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1908 Op->SysReg.Data = Str.data();
1909 Op->SysReg.Length = Str.size();
1910 Op->SysReg.MRSReg = MRSReg;
1911 Op->SysReg.MSRReg = MSRReg;
1912 Op->SysReg.PStateField = PStateField;
1918 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1919 SMLoc E, MCContext &Ctx) {
1920 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1921 Op->SysCRImm.Val = Val;
1927 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1931 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1932 Op->Prefetch.Val = Val;
1933 Op->Barrier.Data = Str.data();
1934 Op->Barrier.Length = Str.size();
1940 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1944 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1945 Op->PSBHint.Val = Val;
1946 Op->PSBHint.Data = Str.data();
1947 Op->PSBHint.Length = Str.size();
1953 static std::unique_ptr<AArch64Operand>
1954 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1955 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1956 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1957 Op->ShiftExtend.Type = ShOp;
1958 Op->ShiftExtend.Amount = Val;
1959 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1966 } // end anonymous namespace.
1968 void AArch64Operand::print(raw_ostream &OS) const {
1971 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
1972 if (!getFPImmIsExact())
1977 StringRef Name = getBarrierName();
1979 OS << "<barrier " << Name << ">";
1981 OS << "<barrier invalid #" << getBarrier() << ">";
1987 case k_ShiftedImm: {
1988 unsigned Shift = getShiftedImmShift();
1989 OS << "<shiftedimm ";
1990 OS << *getShiftedImmVal();
1991 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1995 OS << "<condcode " << getCondCode() << ">";
1997 case k_VectorList: {
1998 OS << "<vectorlist ";
1999 unsigned Reg = getVectorListStart();
2000 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2001 OS << Reg + i << " ";
2006 OS << "<vectorindex " << getVectorIndex() << ">";
2009 OS << "<sysreg: " << getSysReg() << '>';
2012 OS << "'" << getToken() << "'";
2015 OS << "c" << getSysCR();
2018 StringRef Name = getPrefetchName();
2020 OS << "<prfop " << Name << ">";
2022 OS << "<prfop invalid #" << getPrefetch() << ">";
2026 OS << getPSBHintName();
2029 OS << "<register " << getReg() << ">";
2030 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2034 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2035 << getShiftExtendAmount();
2036 if (!hasShiftExtendAmount())
2043 /// @name Auto-generated Match Functions
2046 static unsigned MatchRegisterName(StringRef Name);
2050 static unsigned MatchNeonVectorRegName(StringRef Name) {
2051 return StringSwitch<unsigned>(Name.lower())
2052 .Case("v0", AArch64::Q0)
2053 .Case("v1", AArch64::Q1)
2054 .Case("v2", AArch64::Q2)
2055 .Case("v3", AArch64::Q3)
2056 .Case("v4", AArch64::Q4)
2057 .Case("v5", AArch64::Q5)
2058 .Case("v6", AArch64::Q6)
2059 .Case("v7", AArch64::Q7)
2060 .Case("v8", AArch64::Q8)
2061 .Case("v9", AArch64::Q9)
2062 .Case("v10", AArch64::Q10)
2063 .Case("v11", AArch64::Q11)
2064 .Case("v12", AArch64::Q12)
2065 .Case("v13", AArch64::Q13)
2066 .Case("v14", AArch64::Q14)
2067 .Case("v15", AArch64::Q15)
2068 .Case("v16", AArch64::Q16)
2069 .Case("v17", AArch64::Q17)
2070 .Case("v18", AArch64::Q18)
2071 .Case("v19", AArch64::Q19)
2072 .Case("v20", AArch64::Q20)
2073 .Case("v21", AArch64::Q21)
2074 .Case("v22", AArch64::Q22)
2075 .Case("v23", AArch64::Q23)
2076 .Case("v24", AArch64::Q24)
2077 .Case("v25", AArch64::Q25)
2078 .Case("v26", AArch64::Q26)
2079 .Case("v27", AArch64::Q27)
2080 .Case("v28", AArch64::Q28)
2081 .Case("v29", AArch64::Q29)
2082 .Case("v30", AArch64::Q30)
2083 .Case("v31", AArch64::Q31)
2087 /// Returns an optional pair of (#elements, element-width) if Suffix
2088 /// is a valid vector kind. Where the number of elements in a vector
2089 /// or the vector width is implicit or explicitly unknown (but still a
2090 /// valid suffix kind), 0 is used.
2091 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2092 RegKind VectorKind) {
2093 std::pair<int, int> Res = {-1, -1};
2095 switch (VectorKind) {
2096 case RegKind::NeonVector:
2098 StringSwitch<std::pair<int, int>>(Suffix.lower())
2100 .Case(".1d", {1, 64})
2101 .Case(".1q", {1, 128})
2102 // '.2h' needed for fp16 scalar pairwise reductions
2103 .Case(".2h", {2, 16})
2104 .Case(".2s", {2, 32})
2105 .Case(".2d", {2, 64})
2106 // '.4b' is another special case for the ARMv8.2a dot product
2108 .Case(".4b", {4, 8})
2109 .Case(".4h", {4, 16})
2110 .Case(".4s", {4, 32})
2111 .Case(".8b", {8, 8})
2112 .Case(".8h", {8, 16})
2113 .Case(".16b", {16, 8})
2114 // Accept the width neutral ones, too, for verbose syntax. If those
2115 // aren't used in the right places, the token operand won't match so
2116 // all will work out.
2118 .Case(".h", {0, 16})
2119 .Case(".s", {0, 32})
2120 .Case(".d", {0, 64})
2123 case RegKind::SVEPredicateVector:
2124 case RegKind::SVEDataVector:
2125 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2128 .Case(".h", {0, 16})
2129 .Case(".s", {0, 32})
2130 .Case(".d", {0, 64})
2131 .Case(".q", {0, 128})
2135 llvm_unreachable("Unsupported RegKind");
2138 if (Res == std::make_pair(-1, -1))
2139 return Optional<std::pair<int, int>>();
2141 return Optional<std::pair<int, int>>(Res);
2144 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2145 return parseVectorKind(Suffix, VectorKind).hasValue();
2148 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2149 return StringSwitch<unsigned>(Name.lower())
2150 .Case("z0", AArch64::Z0)
2151 .Case("z1", AArch64::Z1)
2152 .Case("z2", AArch64::Z2)
2153 .Case("z3", AArch64::Z3)
2154 .Case("z4", AArch64::Z4)
2155 .Case("z5", AArch64::Z5)
2156 .Case("z6", AArch64::Z6)
2157 .Case("z7", AArch64::Z7)
2158 .Case("z8", AArch64::Z8)
2159 .Case("z9", AArch64::Z9)
2160 .Case("z10", AArch64::Z10)
2161 .Case("z11", AArch64::Z11)
2162 .Case("z12", AArch64::Z12)
2163 .Case("z13", AArch64::Z13)
2164 .Case("z14", AArch64::Z14)
2165 .Case("z15", AArch64::Z15)
2166 .Case("z16", AArch64::Z16)
2167 .Case("z17", AArch64::Z17)
2168 .Case("z18", AArch64::Z18)
2169 .Case("z19", AArch64::Z19)
2170 .Case("z20", AArch64::Z20)
2171 .Case("z21", AArch64::Z21)
2172 .Case("z22", AArch64::Z22)
2173 .Case("z23", AArch64::Z23)
2174 .Case("z24", AArch64::Z24)
2175 .Case("z25", AArch64::Z25)
2176 .Case("z26", AArch64::Z26)
2177 .Case("z27", AArch64::Z27)
2178 .Case("z28", AArch64::Z28)
2179 .Case("z29", AArch64::Z29)
2180 .Case("z30", AArch64::Z30)
2181 .Case("z31", AArch64::Z31)
2185 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2186 return StringSwitch<unsigned>(Name.lower())
2187 .Case("p0", AArch64::P0)
2188 .Case("p1", AArch64::P1)
2189 .Case("p2", AArch64::P2)
2190 .Case("p3", AArch64::P3)
2191 .Case("p4", AArch64::P4)
2192 .Case("p5", AArch64::P5)
2193 .Case("p6", AArch64::P6)
2194 .Case("p7", AArch64::P7)
2195 .Case("p8", AArch64::P8)
2196 .Case("p9", AArch64::P9)
2197 .Case("p10", AArch64::P10)
2198 .Case("p11", AArch64::P11)
2199 .Case("p12", AArch64::P12)
2200 .Case("p13", AArch64::P13)
2201 .Case("p14", AArch64::P14)
2202 .Case("p15", AArch64::P15)
2206 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2208 StartLoc = getLoc();
2209 auto Res = tryParseScalarRegister(RegNo);
2210 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2211 return Res != MatchOperand_Success;
2214 // Matches a register name or register alias previously defined by '.req'
2215 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2217 unsigned RegNum = 0;
2218 if ((RegNum = matchSVEDataVectorRegName(Name)))
2219 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2221 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2222 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2224 if ((RegNum = MatchNeonVectorRegName(Name)))
2225 return Kind == RegKind::NeonVector ? RegNum : 0;
2227 // The parsed register must be of RegKind Scalar
2228 if ((RegNum = MatchRegisterName(Name)))
2229 return Kind == RegKind::Scalar ? RegNum : 0;
2232 // Handle a few common aliases of registers.
2233 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2234 .Case("fp", AArch64::FP)
2235 .Case("lr", AArch64::LR)
2236 .Case("x31", AArch64::XZR)
2237 .Case("w31", AArch64::WZR)
2239 return Kind == RegKind::Scalar ? RegNum : 0;
2241 // Check for aliases registered via .req. Canonicalize to lower case.
2242 // That's more consistent since register names are case insensitive, and
2243 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2244 auto Entry = RegisterReqs.find(Name.lower());
2245 if (Entry == RegisterReqs.end())
2248 // set RegNum if the match is the right kind of register
2249 if (Kind == Entry->getValue().first)
2250 RegNum = Entry->getValue().second;
2255 /// tryParseScalarRegister - Try to parse a register name. The token must be an
2256 /// Identifier when called, and if it is a register name the token is eaten and
2257 /// the register is added to the operand list.
2258 OperandMatchResultTy
2259 AArch64AsmParser::tryParseScalarRegister(unsigned &RegNum) {
2260 MCAsmParser &Parser = getParser();
2261 const AsmToken &Tok = Parser.getTok();
2262 if (Tok.isNot(AsmToken::Identifier))
2263 return MatchOperand_NoMatch;
2265 std::string lowerCase = Tok.getString().lower();
2266 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2268 return MatchOperand_NoMatch;
2271 Parser.Lex(); // Eat identifier token.
2272 return MatchOperand_Success;
2275 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2276 OperandMatchResultTy
2277 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2278 MCAsmParser &Parser = getParser();
2281 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2282 Error(S, "Expected cN operand where 0 <= N <= 15");
2283 return MatchOperand_ParseFail;
2286 StringRef Tok = Parser.getTok().getIdentifier();
2287 if (Tok[0] != 'c' && Tok[0] != 'C') {
2288 Error(S, "Expected cN operand where 0 <= N <= 15");
2289 return MatchOperand_ParseFail;
2293 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2294 if (BadNum || CRNum > 15) {
2295 Error(S, "Expected cN operand where 0 <= N <= 15");
2296 return MatchOperand_ParseFail;
2299 Parser.Lex(); // Eat identifier token.
2301 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2302 return MatchOperand_Success;
2305 /// tryParsePrefetch - Try to parse a prefetch operand.
2306 template <bool IsSVEPrefetch>
2307 OperandMatchResultTy
2308 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2309 MCAsmParser &Parser = getParser();
2311 const AsmToken &Tok = Parser.getTok();
2313 auto LookupByName = [](StringRef N) {
2314 if (IsSVEPrefetch) {
2315 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
2316 return Optional<unsigned>(Res->Encoding);
2317 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
2318 return Optional<unsigned>(Res->Encoding);
2319 return Optional<unsigned>();
2322 auto LookupByEncoding = [](unsigned E) {
2323 if (IsSVEPrefetch) {
2324 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
2325 return Optional<StringRef>(Res->Name);
2326 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
2327 return Optional<StringRef>(Res->Name);
2328 return Optional<StringRef>();
2330 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2332 // Either an identifier for named values or a 5-bit immediate.
2333 // Eat optional hash.
2334 if (parseOptionalToken(AsmToken::Hash) ||
2335 Tok.is(AsmToken::Integer)) {
2336 const MCExpr *ImmVal;
2337 if (getParser().parseExpression(ImmVal))
2338 return MatchOperand_ParseFail;
2340 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2342 TokError("immediate value expected for prefetch operand");
2343 return MatchOperand_ParseFail;
2345 unsigned prfop = MCE->getValue();
2346 if (prfop > MaxVal) {
2347 TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
2349 return MatchOperand_ParseFail;
2352 auto PRFM = LookupByEncoding(MCE->getValue());
2353 Operands.push_back(AArch64Operand::CreatePrefetch(
2354 prfop, PRFM.getValueOr(""), S, getContext()));
2355 return MatchOperand_Success;
2358 if (Tok.isNot(AsmToken::Identifier)) {
2359 TokError("prefetch hint expected");
2360 return MatchOperand_ParseFail;
2363 auto PRFM = LookupByName(Tok.getString());
2365 TokError("prefetch hint expected");
2366 return MatchOperand_ParseFail;
2369 Parser.Lex(); // Eat identifier token.
2370 Operands.push_back(AArch64Operand::CreatePrefetch(
2371 *PRFM, Tok.getString(), S, getContext()));
2372 return MatchOperand_Success;
2375 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2376 OperandMatchResultTy
2377 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2378 MCAsmParser &Parser = getParser();
2380 const AsmToken &Tok = Parser.getTok();
2381 if (Tok.isNot(AsmToken::Identifier)) {
2382 TokError("invalid operand for instruction");
2383 return MatchOperand_ParseFail;
2386 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2388 TokError("invalid operand for instruction");
2389 return MatchOperand_ParseFail;
2392 Parser.Lex(); // Eat identifier token.
2393 Operands.push_back(AArch64Operand::CreatePSBHint(
2394 PSB->Encoding, Tok.getString(), S, getContext()));
2395 return MatchOperand_Success;
2398 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2400 OperandMatchResultTy
2401 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2402 MCAsmParser &Parser = getParser();
2406 if (Parser.getTok().is(AsmToken::Hash)) {
2407 Parser.Lex(); // Eat hash token.
2410 if (parseSymbolicImmVal(Expr))
2411 return MatchOperand_ParseFail;
2413 AArch64MCExpr::VariantKind ELFRefKind;
2414 MCSymbolRefExpr::VariantKind DarwinRefKind;
2416 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2417 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2418 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2419 // No modifier was specified at all; this is the syntax for an ELF basic
2420 // ADRP relocation (unfortunately).
2422 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2423 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2424 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2426 Error(S, "gotpage label reference not allowed an addend");
2427 return MatchOperand_ParseFail;
2428 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2429 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2430 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2431 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2432 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2433 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2434 // The operand must be an @page or @gotpage qualified symbolref.
2435 Error(S, "page or gotpage label reference expected");
2436 return MatchOperand_ParseFail;
2440 // We have either a label reference possibly with addend or an immediate. The
2441 // addend is a raw value here. The linker will adjust it to only reference the
2443 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2444 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2446 return MatchOperand_Success;
2449 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2451 OperandMatchResultTy
2452 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2456 const AsmToken &Tok = getParser().getTok();
2457 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2458 if (getParser().parseExpression(Expr))
2459 return MatchOperand_ParseFail;
2461 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2462 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2464 return MatchOperand_Success;
2466 return MatchOperand_NoMatch;
2469 /// tryParseFPImm - A floating point immediate expression operand.
2470 template<bool AddFPZeroAsLiteral>
2471 OperandMatchResultTy
2472 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2473 MCAsmParser &Parser = getParser();
2476 bool Hash = parseOptionalToken(AsmToken::Hash);
2478 // Handle negation, as that still comes through as a separate token.
2479 bool isNegative = parseOptionalToken(AsmToken::Minus);
2481 const AsmToken &Tok = Parser.getTok();
2482 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
2484 return MatchOperand_NoMatch;
2485 TokError("invalid floating point immediate");
2486 return MatchOperand_ParseFail;
2489 // Parse hexadecimal representation.
2490 if (Tok.is(AsmToken::Integer) && Tok.getString().startswith("0x")) {
2491 if (Tok.getIntVal() > 255 || isNegative) {
2492 TokError("encoded floating point value out of range");
2493 return MatchOperand_ParseFail;
2496 APFloat F((double)AArch64_AM::getFPImmFloat(Tok.getIntVal()));
2498 AArch64Operand::CreateFPImm(F, true, S, getContext()));
2500 // Parse FP representation.
2501 APFloat RealVal(APFloat::IEEEdouble());
2503 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
2505 RealVal.changeSign();
2507 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2509 AArch64Operand::CreateToken("#0", false, S, getContext()));
2511 AArch64Operand::CreateToken(".0", false, S, getContext()));
2513 Operands.push_back(AArch64Operand::CreateFPImm(
2514 RealVal, Status == APFloat::opOK, S, getContext()));
2517 Parser.Lex(); // Eat the token.
2519 return MatchOperand_Success;
2522 /// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
2523 /// a shift suffix, for example '#1, lsl #12'.
2524 OperandMatchResultTy
2525 AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
2526 MCAsmParser &Parser = getParser();
2529 if (Parser.getTok().is(AsmToken::Hash))
2530 Parser.Lex(); // Eat '#'
2531 else if (Parser.getTok().isNot(AsmToken::Integer))
2532 // Operand should start from # or should be integer, emit error otherwise.
2533 return MatchOperand_NoMatch;
2536 if (parseSymbolicImmVal(Imm))
2537 return MatchOperand_ParseFail;
2538 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2539 SMLoc E = Parser.getTok().getLoc();
2541 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2542 return MatchOperand_Success;
2548 // The optional operand must be "lsl #N" where N is non-negative.
2549 if (!Parser.getTok().is(AsmToken::Identifier) ||
2550 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2551 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2552 return MatchOperand_ParseFail;
2558 parseOptionalToken(AsmToken::Hash);
2560 if (Parser.getTok().isNot(AsmToken::Integer)) {
2561 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2562 return MatchOperand_ParseFail;
2565 int64_t ShiftAmount = Parser.getTok().getIntVal();
2567 if (ShiftAmount < 0) {
2568 Error(Parser.getTok().getLoc(), "positive shift amount required");
2569 return MatchOperand_ParseFail;
2571 Parser.Lex(); // Eat the number
2573 // Just in case the optional lsl #0 is used for immediates other than zero.
2574 if (ShiftAmount == 0 && Imm != 0) {
2575 SMLoc E = Parser.getTok().getLoc();
2576 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2577 return MatchOperand_Success;
2580 SMLoc E = Parser.getTok().getLoc();
2581 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2582 S, E, getContext()));
2583 return MatchOperand_Success;
2586 /// parseCondCodeString - Parse a Condition Code string.
2587 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2588 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2589 .Case("eq", AArch64CC::EQ)
2590 .Case("ne", AArch64CC::NE)
2591 .Case("cs", AArch64CC::HS)
2592 .Case("hs", AArch64CC::HS)
2593 .Case("cc", AArch64CC::LO)
2594 .Case("lo", AArch64CC::LO)
2595 .Case("mi", AArch64CC::MI)
2596 .Case("pl", AArch64CC::PL)
2597 .Case("vs", AArch64CC::VS)
2598 .Case("vc", AArch64CC::VC)
2599 .Case("hi", AArch64CC::HI)
2600 .Case("ls", AArch64CC::LS)
2601 .Case("ge", AArch64CC::GE)
2602 .Case("lt", AArch64CC::LT)
2603 .Case("gt", AArch64CC::GT)
2604 .Case("le", AArch64CC::LE)
2605 .Case("al", AArch64CC::AL)
2606 .Case("nv", AArch64CC::NV)
2607 .Default(AArch64CC::Invalid);
2609 if (CC == AArch64CC::Invalid &&
2610 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2611 CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2612 .Case("none", AArch64CC::EQ)
2613 .Case("any", AArch64CC::NE)
2614 .Case("nlast", AArch64CC::HS)
2615 .Case("last", AArch64CC::LO)
2616 .Case("first", AArch64CC::MI)
2617 .Case("nfrst", AArch64CC::PL)
2618 .Case("pmore", AArch64CC::HI)
2619 .Case("plast", AArch64CC::LS)
2620 .Case("tcont", AArch64CC::GE)
2621 .Case("tstop", AArch64CC::LT)
2622 .Default(AArch64CC::Invalid);
2627 /// parseCondCode - Parse a Condition Code operand.
2628 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2629 bool invertCondCode) {
2630 MCAsmParser &Parser = getParser();
2632 const AsmToken &Tok = Parser.getTok();
2633 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2635 StringRef Cond = Tok.getString();
2636 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2637 if (CC == AArch64CC::Invalid)
2638 return TokError("invalid condition code");
2639 Parser.Lex(); // Eat identifier token.
2641 if (invertCondCode) {
2642 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2643 return TokError("condition codes AL and NV are invalid for this instruction");
2644 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2648 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2652 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2653 /// them if present.
2654 OperandMatchResultTy
2655 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2656 MCAsmParser &Parser = getParser();
2657 const AsmToken &Tok = Parser.getTok();
2658 std::string LowerID = Tok.getString().lower();
2659 AArch64_AM::ShiftExtendType ShOp =
2660 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2661 .Case("lsl", AArch64_AM::LSL)
2662 .Case("lsr", AArch64_AM::LSR)
2663 .Case("asr", AArch64_AM::ASR)
2664 .Case("ror", AArch64_AM::ROR)
2665 .Case("msl", AArch64_AM::MSL)
2666 .Case("uxtb", AArch64_AM::UXTB)
2667 .Case("uxth", AArch64_AM::UXTH)
2668 .Case("uxtw", AArch64_AM::UXTW)
2669 .Case("uxtx", AArch64_AM::UXTX)
2670 .Case("sxtb", AArch64_AM::SXTB)
2671 .Case("sxth", AArch64_AM::SXTH)
2672 .Case("sxtw", AArch64_AM::SXTW)
2673 .Case("sxtx", AArch64_AM::SXTX)
2674 .Default(AArch64_AM::InvalidShiftExtend);
2676 if (ShOp == AArch64_AM::InvalidShiftExtend)
2677 return MatchOperand_NoMatch;
2679 SMLoc S = Tok.getLoc();
2682 bool Hash = parseOptionalToken(AsmToken::Hash);
2684 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2685 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2686 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2687 ShOp == AArch64_AM::MSL) {
2688 // We expect a number here.
2689 TokError("expected #imm after shift specifier");
2690 return MatchOperand_ParseFail;
2693 // "extend" type operations don't need an immediate, #0 is implicit.
2694 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2696 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2697 return MatchOperand_Success;
2700 // Make sure we do actually have a number, identifier or a parenthesized
2702 SMLoc E = Parser.getTok().getLoc();
2703 if (!Parser.getTok().is(AsmToken::Integer) &&
2704 !Parser.getTok().is(AsmToken::LParen) &&
2705 !Parser.getTok().is(AsmToken::Identifier)) {
2706 Error(E, "expected integer shift amount");
2707 return MatchOperand_ParseFail;
2710 const MCExpr *ImmVal;
2711 if (getParser().parseExpression(ImmVal))
2712 return MatchOperand_ParseFail;
2714 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2716 Error(E, "expected constant '#imm' after shift specifier");
2717 return MatchOperand_ParseFail;
2720 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2721 Operands.push_back(AArch64Operand::CreateShiftExtend(
2722 ShOp, MCE->getValue(), true, S, E, getContext()));
2723 return MatchOperand_Success;
2726 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2727 if (FBS[AArch64::HasV8_1aOps])
2729 else if (FBS[AArch64::HasV8_2aOps])
2731 else if (FBS[AArch64::HasV8_3aOps])
2733 else if (FBS[AArch64::HasV8_4aOps])
2739 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2741 const uint16_t Op2 = Encoding & 7;
2742 const uint16_t Cm = (Encoding & 0x78) >> 3;
2743 const uint16_t Cn = (Encoding & 0x780) >> 7;
2744 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2746 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2749 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2751 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2753 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2754 Expr = MCConstantExpr::create(Op2, getContext());
2756 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2759 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2760 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2761 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2762 OperandVector &Operands) {
2763 if (Name.find('.') != StringRef::npos)
2764 return TokError("invalid operand");
2768 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2770 MCAsmParser &Parser = getParser();
2771 const AsmToken &Tok = Parser.getTok();
2772 StringRef Op = Tok.getString();
2773 SMLoc S = Tok.getLoc();
2775 if (Mnemonic == "ic") {
2776 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2778 return TokError("invalid operand for IC instruction");
2779 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2780 std::string Str("IC " + std::string(IC->Name) + " requires ");
2781 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2782 return TokError(Str.c_str());
2784 createSysAlias(IC->Encoding, Operands, S);
2785 } else if (Mnemonic == "dc") {
2786 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2788 return TokError("invalid operand for DC instruction");
2789 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2790 std::string Str("DC " + std::string(DC->Name) + " requires ");
2791 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2792 return TokError(Str.c_str());
2794 createSysAlias(DC->Encoding, Operands, S);
2795 } else if (Mnemonic == "at") {
2796 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2798 return TokError("invalid operand for AT instruction");
2799 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2800 std::string Str("AT " + std::string(AT->Name) + " requires ");
2801 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2802 return TokError(Str.c_str());
2804 createSysAlias(AT->Encoding, Operands, S);
2805 } else if (Mnemonic == "tlbi") {
2806 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2808 return TokError("invalid operand for TLBI instruction");
2809 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2810 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2811 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2812 return TokError(Str.c_str());
2814 createSysAlias(TLBI->Encoding, Operands, S);
2817 Parser.Lex(); // Eat operand.
2819 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2820 bool HasRegister = false;
2822 // Check for the optional register operand.
2823 if (parseOptionalToken(AsmToken::Comma)) {
2824 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2825 return TokError("expected register operand");
2829 if (ExpectRegister && !HasRegister)
2830 return TokError("specified " + Mnemonic + " op requires a register");
2831 else if (!ExpectRegister && HasRegister)
2832 return TokError("specified " + Mnemonic + " op does not use a register");
2834 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2840 OperandMatchResultTy
2841 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2842 MCAsmParser &Parser = getParser();
2843 const AsmToken &Tok = Parser.getTok();
2845 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier)) {
2846 TokError("'csync' operand expected");
2847 return MatchOperand_ParseFail;
2848 // Can be either a #imm style literal or an option name
2849 } else if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
2850 // Immediate operand.
2851 const MCExpr *ImmVal;
2852 SMLoc ExprLoc = getLoc();
2853 if (getParser().parseExpression(ImmVal))
2854 return MatchOperand_ParseFail;
2855 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2857 Error(ExprLoc, "immediate value expected for barrier operand");
2858 return MatchOperand_ParseFail;
2860 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2861 Error(ExprLoc, "barrier operand out of range");
2862 return MatchOperand_ParseFail;
2864 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2865 Operands.push_back(AArch64Operand::CreateBarrier(
2866 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2867 return MatchOperand_Success;
2870 if (Tok.isNot(AsmToken::Identifier)) {
2871 TokError("invalid operand for instruction");
2872 return MatchOperand_ParseFail;
2875 auto TSB = AArch64TSB::lookupTSBByName(Tok.getString());
2876 // The only valid named option for ISB is 'sy'
2877 auto DB = AArch64DB::lookupDBByName(Tok.getString());
2878 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2879 TokError("'sy' or #imm operand expected");
2880 return MatchOperand_ParseFail;
2881 // The only valid named option for TSB is 'csync'
2882 } else if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
2883 TokError("'csync' operand expected");
2884 return MatchOperand_ParseFail;
2885 } else if (!DB && !TSB) {
2886 TokError("invalid barrier option name");
2887 return MatchOperand_ParseFail;
2890 Operands.push_back(AArch64Operand::CreateBarrier(
2891 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(), getContext()));
2892 Parser.Lex(); // Consume the option
2894 return MatchOperand_Success;
2897 OperandMatchResultTy
2898 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2899 MCAsmParser &Parser = getParser();
2900 const AsmToken &Tok = Parser.getTok();
2902 if (Tok.isNot(AsmToken::Identifier))
2903 return MatchOperand_NoMatch;
2906 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2907 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2908 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2909 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2911 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2913 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2914 unsigned PStateImm = -1;
2915 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2916 PStateImm = PState->Encoding;
2919 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2920 PStateImm, getContext()));
2921 Parser.Lex(); // Eat identifier
2923 return MatchOperand_Success;
2926 /// tryParseNeonVectorRegister - Parse a vector register operand.
2927 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2928 MCAsmParser &Parser = getParser();
2929 if (Parser.getTok().isNot(AsmToken::Identifier))
2933 // Check for a vector register specifier first.
2936 OperandMatchResultTy Res =
2937 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
2938 if (Res != MatchOperand_Success)
2941 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
2945 unsigned ElementWidth = KindRes->second;
2947 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
2948 S, getLoc(), getContext()));
2950 // If there was an explicit qualifier, that goes on as a literal text
2954 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2956 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
2959 OperandMatchResultTy
2960 AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
2961 SMLoc SIdx = getLoc();
2962 if (parseOptionalToken(AsmToken::LBrac)) {
2963 const MCExpr *ImmVal;
2964 if (getParser().parseExpression(ImmVal))
2965 return MatchOperand_NoMatch;
2966 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2968 TokError("immediate value expected for vector index");
2969 return MatchOperand_ParseFail;;
2974 if (parseToken(AsmToken::RBrac, "']' expected"))
2975 return MatchOperand_ParseFail;;
2977 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2979 return MatchOperand_Success;
2982 return MatchOperand_NoMatch;
2985 // tryParseVectorRegister - Try to parse a vector register name with
2986 // optional kind specifier. If it is a register specifier, eat the token
2988 OperandMatchResultTy
2989 AArch64AsmParser::tryParseVectorRegister(unsigned &Reg, StringRef &Kind,
2990 RegKind MatchKind) {
2991 MCAsmParser &Parser = getParser();
2992 const AsmToken &Tok = Parser.getTok();
2994 if (Tok.isNot(AsmToken::Identifier))
2995 return MatchOperand_NoMatch;
2997 StringRef Name = Tok.getString();
2998 // If there is a kind specifier, it's separated from the register name by
3000 size_t Start = 0, Next = Name.find('.');
3001 StringRef Head = Name.slice(Start, Next);
3002 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3005 if (Next != StringRef::npos) {
3006 Kind = Name.slice(Next, StringRef::npos);
3007 if (!isValidVectorKind(Kind, MatchKind)) {
3008 TokError("invalid vector kind qualifier");
3009 return MatchOperand_ParseFail;
3012 Parser.Lex(); // Eat the register token.
3015 return MatchOperand_Success;
3018 return MatchOperand_NoMatch;
3021 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
3022 OperandMatchResultTy
3023 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
3024 // Check for a SVE predicate register specifier first.
3025 const SMLoc S = getLoc();
3028 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3029 if (Res != MatchOperand_Success)
3032 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEPredicateVector);
3034 return MatchOperand_NoMatch;
3036 unsigned ElementWidth = KindRes->second;
3037 Operands.push_back(AArch64Operand::CreateVectorReg(
3038 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3039 getLoc(), getContext()));
3041 // Not all predicates are followed by a '/m' or '/z'.
3042 MCAsmParser &Parser = getParser();
3043 if (Parser.getTok().isNot(AsmToken::Slash))
3044 return MatchOperand_Success;
3046 // But when they do they shouldn't have an element type suffix.
3047 if (!Kind.empty()) {
3048 Error(S, "not expecting size suffix");
3049 return MatchOperand_ParseFail;
3052 // Add a literal slash as operand
3054 AArch64Operand::CreateToken("/" , false, getLoc(), getContext()));
3056 Parser.Lex(); // Eat the slash.
3058 // Zeroing or merging?
3059 auto Pred = Parser.getTok().getString().lower();
3060 if (Pred != "z" && Pred != "m") {
3061 Error(getLoc(), "expecting 'm' or 'z' predication");
3062 return MatchOperand_ParseFail;
3065 // Add zero/merge token.
3066 const char *ZM = Pred == "z" ? "z" : "m";
3068 AArch64Operand::CreateToken(ZM, false, getLoc(), getContext()));
3070 Parser.Lex(); // Eat zero/merge token.
3071 return MatchOperand_Success;
3074 /// parseRegister - Parse a register operand.
3075 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
3076 // Try for a Neon vector register.
3077 if (!tryParseNeonVectorRegister(Operands))
3080 // Otherwise try for a scalar register.
3081 if (tryParseGPROperand<false>(Operands) == MatchOperand_Success)
3087 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3088 MCAsmParser &Parser = getParser();
3089 bool HasELFModifier = false;
3090 AArch64MCExpr::VariantKind RefKind;
3092 if (parseOptionalToken(AsmToken::Colon)) {
3093 HasELFModifier = true;
3095 if (Parser.getTok().isNot(AsmToken::Identifier))
3096 return TokError("expect relocation specifier in operand after ':'");
3098 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3099 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
3100 .Case("lo12", AArch64MCExpr::VK_LO12)
3101 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
3102 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
3103 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
3104 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
3105 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
3106 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
3107 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
3108 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
3109 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
3110 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
3111 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
3112 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
3113 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
3114 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
3115 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
3116 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
3117 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
3118 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
3119 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
3120 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
3121 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
3122 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
3123 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
3124 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
3125 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
3126 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
3127 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
3128 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
3129 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
3130 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
3131 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
3132 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
3133 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
3134 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
3135 .Case("secrel_lo12", AArch64MCExpr::VK_SECREL_LO12)
3136 .Case("secrel_hi12", AArch64MCExpr::VK_SECREL_HI12)
3137 .Default(AArch64MCExpr::VK_INVALID);
3139 if (RefKind == AArch64MCExpr::VK_INVALID)
3140 return TokError("expect relocation specifier in operand after ':'");
3142 Parser.Lex(); // Eat identifier
3144 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
3148 if (getParser().parseExpression(ImmVal))
3152 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
3157 template <RegKind VectorKind>
3158 OperandMatchResultTy
3159 AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
3161 MCAsmParser &Parser = getParser();
3162 if (!Parser.getTok().is(AsmToken::LCurly))
3163 return MatchOperand_NoMatch;
3165 // Wrapper around parse function
3166 auto ParseVector = [this, &Parser](unsigned &Reg, StringRef &Kind, SMLoc Loc,
3167 bool NoMatchIsError) {
3168 auto RegTok = Parser.getTok();
3169 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3170 if (ParseRes == MatchOperand_Success) {
3171 if (parseVectorKind(Kind, VectorKind))
3173 llvm_unreachable("Expected a valid vector kind");
3176 if (RegTok.isNot(AsmToken::Identifier) ||
3177 ParseRes == MatchOperand_ParseFail ||
3178 (ParseRes == MatchOperand_NoMatch && NoMatchIsError)) {
3179 Error(Loc, "vector register expected");
3180 return MatchOperand_ParseFail;
3183 return MatchOperand_NoMatch;
3187 auto LCurly = Parser.getTok();
3188 Parser.Lex(); // Eat left bracket token.
3192 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3194 // Put back the original left bracket if there was no match, so that
3195 // different types of list-operands can be matched (e.g. SVE, Neon).
3196 if (ParseRes == MatchOperand_NoMatch)
3197 Parser.getLexer().UnLex(LCurly);
3199 if (ParseRes != MatchOperand_Success)
3202 int64_t PrevReg = FirstReg;
3205 if (parseOptionalToken(AsmToken::Minus)) {
3206 SMLoc Loc = getLoc();
3210 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3211 if (ParseRes != MatchOperand_Success)
3214 // Any Kind suffices must match on all regs in the list.
3215 if (Kind != NextKind) {
3216 Error(Loc, "mismatched register size suffix");
3217 return MatchOperand_ParseFail;
3220 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3222 if (Space == 0 || Space > 3) {
3223 Error(Loc, "invalid number of vectors");
3224 return MatchOperand_ParseFail;
3230 while (parseOptionalToken(AsmToken::Comma)) {
3231 SMLoc Loc = getLoc();
3234 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
3235 if (ParseRes != MatchOperand_Success)
3238 // Any Kind suffices must match on all regs in the list.
3239 if (Kind != NextKind) {
3240 Error(Loc, "mismatched register size suffix");
3241 return MatchOperand_ParseFail;
3244 // Registers must be incremental (with wraparound at 31)
3245 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3246 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3247 Error(Loc, "registers must be sequential");
3248 return MatchOperand_ParseFail;
3256 if (parseToken(AsmToken::RCurly, "'}' expected"))
3257 return MatchOperand_ParseFail;
3260 Error(S, "invalid number of vectors");
3261 return MatchOperand_ParseFail;
3264 unsigned NumElements = 0;
3265 unsigned ElementWidth = 0;
3266 if (!Kind.empty()) {
3267 if (const auto &VK = parseVectorKind(Kind, VectorKind))
3268 std::tie(NumElements, ElementWidth) = *VK;
3271 Operands.push_back(AArch64Operand::CreateVectorList(
3272 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3275 return MatchOperand_Success;
3278 /// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
3279 bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
3280 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
3281 if (ParseRes != MatchOperand_Success)
3284 return tryParseVectorIndex(Operands) == MatchOperand_ParseFail;
3287 OperandMatchResultTy
3288 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3289 SMLoc StartLoc = getLoc();
3292 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3293 if (Res != MatchOperand_Success)
3296 if (!parseOptionalToken(AsmToken::Comma)) {
3297 Operands.push_back(AArch64Operand::CreateReg(
3298 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3299 return MatchOperand_Success;
3302 parseOptionalToken(AsmToken::Hash);
3304 if (getParser().getTok().isNot(AsmToken::Integer)) {
3305 Error(getLoc(), "index must be absent or #0");
3306 return MatchOperand_ParseFail;
3309 const MCExpr *ImmVal;
3310 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3311 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3312 Error(getLoc(), "index must be absent or #0");
3313 return MatchOperand_ParseFail;
3316 Operands.push_back(AArch64Operand::CreateReg(
3317 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3318 return MatchOperand_Success;
3321 template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
3322 OperandMatchResultTy
3323 AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
3324 SMLoc StartLoc = getLoc();
3327 OperandMatchResultTy Res = tryParseScalarRegister(RegNum);
3328 if (Res != MatchOperand_Success)
3331 // No shift/extend is the default.
3332 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
3333 Operands.push_back(AArch64Operand::CreateReg(
3334 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
3335 return MatchOperand_Success;
3342 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
3343 Res = tryParseOptionalShiftExtend(ExtOpnd);
3344 if (Res != MatchOperand_Success)
3347 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
3348 Operands.push_back(AArch64Operand::CreateReg(
3349 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
3350 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
3351 Ext->hasShiftExtendAmount()));
3353 return MatchOperand_Success;
3356 bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
3357 MCAsmParser &Parser = getParser();
3359 // Some SVE instructions have a decoration after the immediate, i.e.
3360 // "mul vl". We parse them here and add tokens, which must be present in the
3361 // asm string in the tablegen instruction.
3362 bool NextIsVL = Parser.getLexer().peekTok().getString().equals_lower("vl");
3363 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
3364 if (!Parser.getTok().getString().equals_lower("mul") ||
3365 !(NextIsVL || NextIsHash))
3369 AArch64Operand::CreateToken("mul", false, getLoc(), getContext()));
3370 Parser.Lex(); // Eat the "mul"
3374 AArch64Operand::CreateToken("vl", false, getLoc(), getContext()));
3375 Parser.Lex(); // Eat the "vl"
3380 Parser.Lex(); // Eat the #
3383 // Parse immediate operand.
3384 const MCExpr *ImmVal;
3385 if (!Parser.parseExpression(ImmVal))
3386 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3387 Operands.push_back(AArch64Operand::CreateImm(
3388 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
3390 return MatchOperand_Success;
3394 return Error(getLoc(), "expected 'vl' or '#<imm>'");
3397 /// parseOperand - Parse a arm instruction operand. For now this parses the
3398 /// operand regardless of the mnemonic.
3399 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3400 bool invertCondCode) {
3401 MCAsmParser &Parser = getParser();
3403 OperandMatchResultTy ResTy =
3404 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3406 // Check if the current operand has a custom associated parser, if so, try to
3407 // custom parse the operand, or fallback to the general approach.
3408 if (ResTy == MatchOperand_Success)
3410 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3411 // there was a match, but an error occurred, in which case, just return that
3412 // the operand parsing failed.
3413 if (ResTy == MatchOperand_ParseFail)
3416 // Nothing custom, so do general case parsing.
3418 switch (getLexer().getKind()) {
3422 if (parseSymbolicImmVal(Expr))
3423 return Error(S, "invalid operand");
3425 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3426 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3429 case AsmToken::LBrac: {
3430 SMLoc Loc = Parser.getTok().getLoc();
3431 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3433 Parser.Lex(); // Eat '['
3435 // There's no comma after a '[', so we can parse the next operand
3437 return parseOperand(Operands, false, false);
3439 case AsmToken::LCurly:
3440 return parseNeonVectorList(Operands);
3441 case AsmToken::Identifier: {
3442 // If we're expecting a Condition Code operand, then just parse that.
3444 return parseCondCode(Operands, invertCondCode);
3446 // If it's a register name, parse it.
3447 if (!parseRegister(Operands))
3450 // See if this is a "mul vl" decoration or "mul #<int>" operand used
3451 // by SVE instructions.
3452 if (!parseOptionalMulOperand(Operands))
3455 // This could be an optional "shift" or "extend" operand.
3456 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3457 // We can only continue if no tokens were eaten.
3458 if (GotShift != MatchOperand_NoMatch)
3461 // This was not a register so parse other operands that start with an
3462 // identifier (like labels) as expressions and create them as immediates.
3463 const MCExpr *IdVal;
3465 if (getParser().parseExpression(IdVal))
3467 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3468 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3471 case AsmToken::Integer:
3472 case AsmToken::Real:
3473 case AsmToken::Hash: {
3474 // #42 -> immediate.
3477 parseOptionalToken(AsmToken::Hash);
3479 // Parse a negative sign
3480 bool isNegative = false;
3481 if (Parser.getTok().is(AsmToken::Minus)) {
3483 // We need to consume this token only when we have a Real, otherwise
3484 // we let parseSymbolicImmVal take care of it
3485 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3489 // The only Real that should come through here is a literal #0.0 for
3490 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3491 // so convert the value.
3492 const AsmToken &Tok = Parser.getTok();
3493 if (Tok.is(AsmToken::Real)) {
3494 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3495 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3496 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3497 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3498 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
3499 return TokError("unexpected floating point literal");
3500 else if (IntVal != 0 || isNegative)
3501 return TokError("expected floating-point constant #0.0");
3502 Parser.Lex(); // Eat the token.
3505 AArch64Operand::CreateToken("#0", false, S, getContext()));
3507 AArch64Operand::CreateToken(".0", false, S, getContext()));
3511 const MCExpr *ImmVal;
3512 if (parseSymbolicImmVal(ImmVal))
3515 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3516 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3519 case AsmToken::Equal: {
3520 SMLoc Loc = getLoc();
3521 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3522 return TokError("unexpected token in operand");
3523 Parser.Lex(); // Eat '='
3524 const MCExpr *SubExprVal;
3525 if (getParser().parseExpression(SubExprVal))
3528 if (Operands.size() < 2 ||
3529 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3530 return Error(Loc, "Only valid when first operand is register");
3533 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3534 Operands[1]->getReg());
3536 MCContext& Ctx = getContext();
3537 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3538 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3539 if (isa<MCConstantExpr>(SubExprVal)) {
3540 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3541 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3542 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3546 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3547 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3548 Operands.push_back(AArch64Operand::CreateImm(
3549 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3551 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3552 ShiftAmt, true, S, E, Ctx));
3555 APInt Simm = APInt(64, Imm << ShiftAmt);
3556 // check if the immediate is an unsigned or signed 32-bit int for W regs
3557 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3558 return Error(Loc, "Immediate too large for register");
3560 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3561 const MCExpr *CPLoc =
3562 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3563 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3569 bool AArch64AsmParser::regsEqual(const MCParsedAsmOperand &Op1,
3570 const MCParsedAsmOperand &Op2) const {
3571 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
3572 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
3573 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3574 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3575 return MCTargetAsmParser::regsEqual(Op1, Op2);
3577 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3578 "Testing equality of non-scalar registers not supported");
3580 // Check if a registers match their sub/super register classes.
3581 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3582 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
3583 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3584 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
3585 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3586 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
3587 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3588 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
3593 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3595 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3596 StringRef Name, SMLoc NameLoc,
3597 OperandVector &Operands) {
3598 MCAsmParser &Parser = getParser();
3599 Name = StringSwitch<StringRef>(Name.lower())
3600 .Case("beq", "b.eq")
3601 .Case("bne", "b.ne")
3602 .Case("bhs", "b.hs")
3603 .Case("bcs", "b.cs")
3604 .Case("blo", "b.lo")
3605 .Case("bcc", "b.cc")
3606 .Case("bmi", "b.mi")
3607 .Case("bpl", "b.pl")
3608 .Case("bvs", "b.vs")
3609 .Case("bvc", "b.vc")
3610 .Case("bhi", "b.hi")
3611 .Case("bls", "b.ls")
3612 .Case("bge", "b.ge")
3613 .Case("blt", "b.lt")
3614 .Case("bgt", "b.gt")
3615 .Case("ble", "b.le")
3616 .Case("bal", "b.al")
3617 .Case("bnv", "b.nv")
3620 // First check for the AArch64-specific .req directive.
3621 if (Parser.getTok().is(AsmToken::Identifier) &&
3622 Parser.getTok().getIdentifier() == ".req") {
3623 parseDirectiveReq(Name, NameLoc);
3624 // We always return 'error' for this, as we're done with this
3625 // statement and don't need to match the 'instruction."
3629 // Create the leading tokens for the mnemonic, split by '.' characters.
3630 size_t Start = 0, Next = Name.find('.');
3631 StringRef Head = Name.slice(Start, Next);
3633 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3634 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3635 return parseSysAlias(Head, NameLoc, Operands);
3638 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3641 // Handle condition codes for a branch mnemonic
3642 if (Head == "b" && Next != StringRef::npos) {
3644 Next = Name.find('.', Start + 1);
3645 Head = Name.slice(Start + 1, Next);
3647 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3648 (Head.data() - Name.data()));
3649 AArch64CC::CondCode CC = parseCondCodeString(Head);
3650 if (CC == AArch64CC::Invalid)
3651 return Error(SuffixLoc, "invalid condition code");
3653 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3655 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3658 // Add the remaining tokens in the mnemonic.
3659 while (Next != StringRef::npos) {
3661 Next = Name.find('.', Start + 1);
3662 Head = Name.slice(Start, Next);
3663 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3664 (Head.data() - Name.data()) + 1);
3666 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3669 // Conditional compare instructions have a Condition Code operand, which needs
3670 // to be parsed and an immediate operand created.
3671 bool condCodeFourthOperand =
3672 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3673 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3674 Head == "csinc" || Head == "csinv" || Head == "csneg");
3676 // These instructions are aliases to some of the conditional select
3677 // instructions. However, the condition code is inverted in the aliased
3680 // FIXME: Is this the correct way to handle these? Or should the parser
3681 // generate the aliased instructions directly?
3682 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3683 bool condCodeThirdOperand =
3684 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3686 // Read the remaining operands.
3687 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3688 // Read the first operand.
3689 if (parseOperand(Operands, false, false)) {
3694 while (parseOptionalToken(AsmToken::Comma)) {
3695 // Parse and remember the operand.
3696 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3697 (N == 3 && condCodeThirdOperand) ||
3698 (N == 2 && condCodeSecondOperand),
3699 condCodeSecondOperand || condCodeThirdOperand)) {
3703 // After successfully parsing some operands there are two special cases to
3704 // consider (i.e. notional operands not separated by commas). Both are due
3705 // to memory specifiers:
3706 // + An RBrac will end an address for load/store/prefetch
3707 // + An '!' will indicate a pre-indexed operation.
3709 // It's someone else's responsibility to make sure these tokens are sane
3710 // in the given context!
3712 SMLoc RLoc = Parser.getTok().getLoc();
3713 if (parseOptionalToken(AsmToken::RBrac))
3715 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3716 SMLoc ELoc = Parser.getTok().getLoc();
3717 if (parseOptionalToken(AsmToken::Exclaim))
3719 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3725 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3731 static inline bool isMatchingOrAlias(unsigned ZReg, unsigned Reg) {
3732 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3733 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3734 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3735 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3736 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3737 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3738 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3741 // FIXME: This entire function is a giant hack to provide us with decent
3742 // operand range validation/diagnostics until TableGen/MC can be extended
3743 // to support autogeneration of this kind of validation.
3744 bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
3745 SmallVectorImpl<SMLoc> &Loc) {
3746 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3747 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
3749 // A prefix only applies to the instruction following it. Here we extract
3750 // prefix information for the next instruction before validating the current
3751 // one so that in the case of failure we don't erronously continue using the
3753 PrefixInfo Prefix = NextPrefix;
3754 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
3756 // Before validating the instruction in isolation we run through the rules
3757 // applicable when it follows a prefix instruction.
3758 // NOTE: brk & hlt can be prefixed but require no additional validation.
3759 if (Prefix.isActive() &&
3760 (Inst.getOpcode() != AArch64::BRK) &&
3761 (Inst.getOpcode() != AArch64::HLT)) {
3763 // Prefixed intructions must have a destructive operand.
3764 if ((MCID.TSFlags & AArch64::DestructiveInstTypeMask) ==
3765 AArch64::NotDestructive)
3766 return Error(IDLoc, "instruction is unpredictable when following a"
3767 " movprfx, suggest replacing movprfx with mov");
3769 // Destination operands must match.
3770 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
3771 return Error(Loc[0], "instruction is unpredictable when following a"
3772 " movprfx writing to a different destination");
3774 // Destination operand must not be used in any other location.
3775 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
3776 if (Inst.getOperand(i).isReg() &&
3777 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
3778 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
3779 return Error(Loc[0], "instruction is unpredictable when following a"
3780 " movprfx and destination also used as non-destructive"
3784 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3785 if (Prefix.isPredicated()) {
3788 // Find the instructions general predicate.
3789 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
3790 if (Inst.getOperand(i).isReg() &&
3791 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
3796 // Instruction must be predicated if the movprfx is predicated.
3798 (MCID.TSFlags & AArch64::ElementSizeMask) == AArch64::ElementSizeNone)
3799 return Error(IDLoc, "instruction is unpredictable when following a"
3800 " predicated movprfx, suggest using unpredicated movprfx");
3802 // Instruction must use same general predicate as the movprfx.
3803 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
3804 return Error(IDLoc, "instruction is unpredictable when following a"
3805 " predicated movprfx using a different general predicate");
3807 // Instruction element type must match the movprfx.
3808 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
3809 return Error(IDLoc, "instruction is unpredictable when following a"
3810 " predicated movprfx with a different element size");
3814 // Check for indexed addressing modes w/ the base register being the
3815 // same as a destination/source register or pair load where
3816 // the Rt == Rt2. All of those are undefined behaviour.
3817 switch (Inst.getOpcode()) {
3818 case AArch64::LDPSWpre:
3819 case AArch64::LDPWpost:
3820 case AArch64::LDPWpre:
3821 case AArch64::LDPXpost:
3822 case AArch64::LDPXpre: {
3823 unsigned Rt = Inst.getOperand(1).getReg();
3824 unsigned Rt2 = Inst.getOperand(2).getReg();
3825 unsigned Rn = Inst.getOperand(3).getReg();
3826 if (RI->isSubRegisterEq(Rn, Rt))
3827 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3828 "is also a destination");
3829 if (RI->isSubRegisterEq(Rn, Rt2))
3830 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3831 "is also a destination");
3834 case AArch64::LDPDi:
3835 case AArch64::LDPQi:
3836 case AArch64::LDPSi:
3837 case AArch64::LDPSWi:
3838 case AArch64::LDPWi:
3839 case AArch64::LDPXi: {
3840 unsigned Rt = Inst.getOperand(0).getReg();
3841 unsigned Rt2 = Inst.getOperand(1).getReg();
3843 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3846 case AArch64::LDPDpost:
3847 case AArch64::LDPDpre:
3848 case AArch64::LDPQpost:
3849 case AArch64::LDPQpre:
3850 case AArch64::LDPSpost:
3851 case AArch64::LDPSpre:
3852 case AArch64::LDPSWpost: {
3853 unsigned Rt = Inst.getOperand(1).getReg();
3854 unsigned Rt2 = Inst.getOperand(2).getReg();
3856 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3859 case AArch64::STPDpost:
3860 case AArch64::STPDpre:
3861 case AArch64::STPQpost:
3862 case AArch64::STPQpre:
3863 case AArch64::STPSpost:
3864 case AArch64::STPSpre:
3865 case AArch64::STPWpost:
3866 case AArch64::STPWpre:
3867 case AArch64::STPXpost:
3868 case AArch64::STPXpre: {
3869 unsigned Rt = Inst.getOperand(1).getReg();
3870 unsigned Rt2 = Inst.getOperand(2).getReg();
3871 unsigned Rn = Inst.getOperand(3).getReg();
3872 if (RI->isSubRegisterEq(Rn, Rt))
3873 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3874 "is also a source");
3875 if (RI->isSubRegisterEq(Rn, Rt2))
3876 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3877 "is also a source");
3880 case AArch64::LDRBBpre:
3881 case AArch64::LDRBpre:
3882 case AArch64::LDRHHpre:
3883 case AArch64::LDRHpre:
3884 case AArch64::LDRSBWpre:
3885 case AArch64::LDRSBXpre:
3886 case AArch64::LDRSHWpre:
3887 case AArch64::LDRSHXpre:
3888 case AArch64::LDRSWpre:
3889 case AArch64::LDRWpre:
3890 case AArch64::LDRXpre:
3891 case AArch64::LDRBBpost:
3892 case AArch64::LDRBpost:
3893 case AArch64::LDRHHpost:
3894 case AArch64::LDRHpost:
3895 case AArch64::LDRSBWpost:
3896 case AArch64::LDRSBXpost:
3897 case AArch64::LDRSHWpost:
3898 case AArch64::LDRSHXpost:
3899 case AArch64::LDRSWpost:
3900 case AArch64::LDRWpost:
3901 case AArch64::LDRXpost: {
3902 unsigned Rt = Inst.getOperand(1).getReg();
3903 unsigned Rn = Inst.getOperand(2).getReg();
3904 if (RI->isSubRegisterEq(Rn, Rt))
3905 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3906 "is also a source");
3909 case AArch64::STRBBpost:
3910 case AArch64::STRBpost:
3911 case AArch64::STRHHpost:
3912 case AArch64::STRHpost:
3913 case AArch64::STRWpost:
3914 case AArch64::STRXpost:
3915 case AArch64::STRBBpre:
3916 case AArch64::STRBpre:
3917 case AArch64::STRHHpre:
3918 case AArch64::STRHpre:
3919 case AArch64::STRWpre:
3920 case AArch64::STRXpre: {
3921 unsigned Rt = Inst.getOperand(1).getReg();
3922 unsigned Rn = Inst.getOperand(2).getReg();
3923 if (RI->isSubRegisterEq(Rn, Rt))
3924 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3925 "is also a source");
3928 case AArch64::STXRB:
3929 case AArch64::STXRH:
3930 case AArch64::STXRW:
3931 case AArch64::STXRX:
3932 case AArch64::STLXRB:
3933 case AArch64::STLXRH:
3934 case AArch64::STLXRW:
3935 case AArch64::STLXRX: {
3936 unsigned Rs = Inst.getOperand(0).getReg();
3937 unsigned Rt = Inst.getOperand(1).getReg();
3938 unsigned Rn = Inst.getOperand(2).getReg();
3939 if (RI->isSubRegisterEq(Rt, Rs) ||
3940 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3941 return Error(Loc[0],
3942 "unpredictable STXR instruction, status is also a source");
3945 case AArch64::STXPW:
3946 case AArch64::STXPX:
3947 case AArch64::STLXPW:
3948 case AArch64::STLXPX: {
3949 unsigned Rs = Inst.getOperand(0).getReg();
3950 unsigned Rt1 = Inst.getOperand(1).getReg();
3951 unsigned Rt2 = Inst.getOperand(2).getReg();
3952 unsigned Rn = Inst.getOperand(3).getReg();
3953 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
3954 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
3955 return Error(Loc[0],
3956 "unpredictable STXP instruction, status is also a source");
3962 // Now check immediate ranges. Separate from the above as there is overlap
3963 // in the instructions being checked and this keeps the nested conditionals
3965 switch (Inst.getOpcode()) {
3966 case AArch64::ADDSWri:
3967 case AArch64::ADDSXri:
3968 case AArch64::ADDWri:
3969 case AArch64::ADDXri:
3970 case AArch64::SUBSWri:
3971 case AArch64::SUBSXri:
3972 case AArch64::SUBWri:
3973 case AArch64::SUBXri: {
3974 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3975 // some slight duplication here.
3976 if (Inst.getOperand(2).isExpr()) {
3977 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3978 AArch64MCExpr::VariantKind ELFRefKind;
3979 MCSymbolRefExpr::VariantKind DarwinRefKind;
3981 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3983 // Only allow these with ADDXri.
3984 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3985 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3986 Inst.getOpcode() == AArch64::ADDXri)
3989 // Only allow these with ADDXri/ADDWri
3990 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3991 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3992 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3993 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3994 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3995 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3996 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3997 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12 ||
3998 ELFRefKind == AArch64MCExpr::VK_SECREL_LO12 ||
3999 ELFRefKind == AArch64MCExpr::VK_SECREL_HI12) &&
4000 (Inst.getOpcode() == AArch64::ADDXri ||
4001 Inst.getOpcode() == AArch64::ADDWri))
4004 // Don't allow symbol refs in the immediate field otherwise
4005 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
4006 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
4007 // 'cmp w0, 'borked')
4008 return Error(Loc.back(), "invalid immediate expression");
4010 // We don't validate more complex expressions here
4019 static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
4020 unsigned VariantID = 0);
4022 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
4024 OperandVector &Operands) {
4026 case Match_InvalidTiedOperand: {
4027 RegConstraintEqualityTy EqTy =
4028 static_cast<const AArch64Operand &>(*Operands[ErrorInfo])
4029 .getRegEqualityTy();
4031 case RegConstraintEqualityTy::EqualsSubReg:
4032 return Error(Loc, "operand must be 64-bit form of destination register");
4033 case RegConstraintEqualityTy::EqualsSuperReg:
4034 return Error(Loc, "operand must be 32-bit form of destination register");
4035 case RegConstraintEqualityTy::EqualsReg:
4036 return Error(Loc, "operand must match destination register");
4038 llvm_unreachable("Unknown RegConstraintEqualityTy");
4040 case Match_MissingFeature:
4042 "instruction requires a CPU feature not currently enabled");
4043 case Match_InvalidOperand:
4044 return Error(Loc, "invalid operand for instruction");
4045 case Match_InvalidSuffix:
4046 return Error(Loc, "invalid type suffix for instruction");
4047 case Match_InvalidCondCode:
4048 return Error(Loc, "expected AArch64 condition code");
4049 case Match_AddSubRegExtendSmall:
4051 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4052 case Match_AddSubRegExtendLarge:
4054 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4055 case Match_AddSubSecondSource:
4057 "expected compatible register, symbol or integer in range [0, 4095]");
4058 case Match_LogicalSecondSource:
4059 return Error(Loc, "expected compatible register or logical immediate");
4060 case Match_InvalidMovImm32Shift:
4061 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
4062 case Match_InvalidMovImm64Shift:
4063 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
4064 case Match_AddSubRegShift32:
4066 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4067 case Match_AddSubRegShift64:
4069 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4070 case Match_InvalidFPImm:
4072 "expected compatible register or floating-point constant");
4073 case Match_InvalidMemoryIndexedSImm6:
4074 return Error(Loc, "index must be an integer in range [-32, 31].");
4075 case Match_InvalidMemoryIndexedSImm5:
4076 return Error(Loc, "index must be an integer in range [-16, 15].");
4077 case Match_InvalidMemoryIndexed1SImm4:
4078 return Error(Loc, "index must be an integer in range [-8, 7].");
4079 case Match_InvalidMemoryIndexed2SImm4:
4080 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
4081 case Match_InvalidMemoryIndexed3SImm4:
4082 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
4083 case Match_InvalidMemoryIndexed4SImm4:
4084 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
4085 case Match_InvalidMemoryIndexed16SImm4:
4086 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
4087 case Match_InvalidMemoryIndexed1SImm6:
4088 return Error(Loc, "index must be an integer in range [-32, 31].");
4089 case Match_InvalidMemoryIndexedSImm8:
4090 return Error(Loc, "index must be an integer in range [-128, 127].");
4091 case Match_InvalidMemoryIndexedSImm9:
4092 return Error(Loc, "index must be an integer in range [-256, 255].");
4093 case Match_InvalidMemoryIndexed8SImm10:
4094 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
4095 case Match_InvalidMemoryIndexed4SImm7:
4096 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
4097 case Match_InvalidMemoryIndexed8SImm7:
4098 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
4099 case Match_InvalidMemoryIndexed16SImm7:
4100 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
4101 case Match_InvalidMemoryIndexed8UImm5:
4102 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
4103 case Match_InvalidMemoryIndexed4UImm5:
4104 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
4105 case Match_InvalidMemoryIndexed2UImm5:
4106 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
4107 case Match_InvalidMemoryIndexed8UImm6:
4108 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
4109 case Match_InvalidMemoryIndexed4UImm6:
4110 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
4111 case Match_InvalidMemoryIndexed2UImm6:
4112 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
4113 case Match_InvalidMemoryIndexed1UImm6:
4114 return Error(Loc, "index must be in range [0, 63].");
4115 case Match_InvalidMemoryWExtend8:
4117 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4118 case Match_InvalidMemoryWExtend16:
4120 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4121 case Match_InvalidMemoryWExtend32:
4123 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4124 case Match_InvalidMemoryWExtend64:
4126 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4127 case Match_InvalidMemoryWExtend128:
4129 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4130 case Match_InvalidMemoryXExtend8:
4132 "expected 'lsl' or 'sxtx' with optional shift of #0");
4133 case Match_InvalidMemoryXExtend16:
4135 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4136 case Match_InvalidMemoryXExtend32:
4138 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4139 case Match_InvalidMemoryXExtend64:
4141 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4142 case Match_InvalidMemoryXExtend128:
4144 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4145 case Match_InvalidMemoryIndexed1:
4146 return Error(Loc, "index must be an integer in range [0, 4095].");
4147 case Match_InvalidMemoryIndexed2:
4148 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
4149 case Match_InvalidMemoryIndexed4:
4150 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
4151 case Match_InvalidMemoryIndexed8:
4152 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
4153 case Match_InvalidMemoryIndexed16:
4154 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
4155 case Match_InvalidImm0_1:
4156 return Error(Loc, "immediate must be an integer in range [0, 1].");
4157 case Match_InvalidImm0_7:
4158 return Error(Loc, "immediate must be an integer in range [0, 7].");
4159 case Match_InvalidImm0_15:
4160 return Error(Loc, "immediate must be an integer in range [0, 15].");
4161 case Match_InvalidImm0_31:
4162 return Error(Loc, "immediate must be an integer in range [0, 31].");
4163 case Match_InvalidImm0_63:
4164 return Error(Loc, "immediate must be an integer in range [0, 63].");
4165 case Match_InvalidImm0_127:
4166 return Error(Loc, "immediate must be an integer in range [0, 127].");
4167 case Match_InvalidImm0_255:
4168 return Error(Loc, "immediate must be an integer in range [0, 255].");
4169 case Match_InvalidImm0_65535:
4170 return Error(Loc, "immediate must be an integer in range [0, 65535].");
4171 case Match_InvalidImm1_8:
4172 return Error(Loc, "immediate must be an integer in range [1, 8].");
4173 case Match_InvalidImm1_16:
4174 return Error(Loc, "immediate must be an integer in range [1, 16].");
4175 case Match_InvalidImm1_32:
4176 return Error(Loc, "immediate must be an integer in range [1, 32].");
4177 case Match_InvalidImm1_64:
4178 return Error(Loc, "immediate must be an integer in range [1, 64].");
4179 case Match_InvalidSVEAddSubImm8:
4180 return Error(Loc, "immediate must be an integer in range [0, 255]"
4181 " with a shift amount of 0");
4182 case Match_InvalidSVEAddSubImm16:
4183 case Match_InvalidSVEAddSubImm32:
4184 case Match_InvalidSVEAddSubImm64:
4185 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
4186 "multiple of 256 in range [256, 65280]");
4187 case Match_InvalidSVECpyImm8:
4188 return Error(Loc, "immediate must be an integer in range [-128, 255]"
4189 " with a shift amount of 0");
4190 case Match_InvalidSVECpyImm16:
4191 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4192 "multiple of 256 in range [-32768, 65280]");
4193 case Match_InvalidSVECpyImm32:
4194 case Match_InvalidSVECpyImm64:
4195 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
4196 "multiple of 256 in range [-32768, 32512]");
4197 case Match_InvalidIndexRange1_1:
4198 return Error(Loc, "expected lane specifier '[1]'");
4199 case Match_InvalidIndexRange0_15:
4200 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4201 case Match_InvalidIndexRange0_7:
4202 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4203 case Match_InvalidIndexRange0_3:
4204 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4205 case Match_InvalidIndexRange0_1:
4206 return Error(Loc, "vector lane must be an integer in range [0, 1].");
4207 case Match_InvalidSVEIndexRange0_63:
4208 return Error(Loc, "vector lane must be an integer in range [0, 63].");
4209 case Match_InvalidSVEIndexRange0_31:
4210 return Error(Loc, "vector lane must be an integer in range [0, 31].");
4211 case Match_InvalidSVEIndexRange0_15:
4212 return Error(Loc, "vector lane must be an integer in range [0, 15].");
4213 case Match_InvalidSVEIndexRange0_7:
4214 return Error(Loc, "vector lane must be an integer in range [0, 7].");
4215 case Match_InvalidSVEIndexRange0_3:
4216 return Error(Loc, "vector lane must be an integer in range [0, 3].");
4217 case Match_InvalidLabel:
4218 return Error(Loc, "expected label or encodable integer pc offset");
4220 return Error(Loc, "expected readable system register");
4222 return Error(Loc, "expected writable system register or pstate");
4223 case Match_InvalidComplexRotationEven:
4224 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
4225 case Match_InvalidComplexRotationOdd:
4226 return Error(Loc, "complex rotation must be 90 or 270.");
4227 case Match_MnemonicFail: {
4228 std::string Suggestion = AArch64MnemonicSpellCheck(
4229 ((AArch64Operand &)*Operands[0]).getToken(),
4230 ComputeAvailableFeatures(STI->getFeatureBits()));
4231 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
4233 case Match_InvalidGPR64shifted8:
4234 return Error(Loc, "register must be x0..x30 or xzr, without shift");
4235 case Match_InvalidGPR64shifted16:
4236 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
4237 case Match_InvalidGPR64shifted32:
4238 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
4239 case Match_InvalidGPR64shifted64:
4240 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
4241 case Match_InvalidGPR64NoXZRshifted8:
4242 return Error(Loc, "register must be x0..x30 without shift");
4243 case Match_InvalidGPR64NoXZRshifted16:
4244 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
4245 case Match_InvalidGPR64NoXZRshifted32:
4246 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
4247 case Match_InvalidGPR64NoXZRshifted64:
4248 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
4249 case Match_InvalidZPR32UXTW8:
4250 case Match_InvalidZPR32SXTW8:
4251 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4252 case Match_InvalidZPR32UXTW16:
4253 case Match_InvalidZPR32SXTW16:
4254 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4255 case Match_InvalidZPR32UXTW32:
4256 case Match_InvalidZPR32SXTW32:
4257 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4258 case Match_InvalidZPR32UXTW64:
4259 case Match_InvalidZPR32SXTW64:
4260 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4261 case Match_InvalidZPR64UXTW8:
4262 case Match_InvalidZPR64SXTW8:
4263 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4264 case Match_InvalidZPR64UXTW16:
4265 case Match_InvalidZPR64SXTW16:
4266 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4267 case Match_InvalidZPR64UXTW32:
4268 case Match_InvalidZPR64SXTW32:
4269 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4270 case Match_InvalidZPR64UXTW64:
4271 case Match_InvalidZPR64SXTW64:
4272 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4273 case Match_InvalidZPR32LSL8:
4274 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
4275 case Match_InvalidZPR32LSL16:
4276 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4277 case Match_InvalidZPR32LSL32:
4278 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4279 case Match_InvalidZPR32LSL64:
4280 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4281 case Match_InvalidZPR64LSL8:
4282 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
4283 case Match_InvalidZPR64LSL16:
4284 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4285 case Match_InvalidZPR64LSL32:
4286 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4287 case Match_InvalidZPR64LSL64:
4288 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4289 case Match_InvalidZPR0:
4290 return Error(Loc, "expected register without element width sufix");
4291 case Match_InvalidZPR8:
4292 case Match_InvalidZPR16:
4293 case Match_InvalidZPR32:
4294 case Match_InvalidZPR64:
4295 case Match_InvalidZPR128:
4296 return Error(Loc, "invalid element width");
4297 case Match_InvalidZPR_3b8:
4298 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
4299 case Match_InvalidZPR_3b16:
4300 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
4301 case Match_InvalidZPR_3b32:
4302 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
4303 case Match_InvalidZPR_4b16:
4304 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
4305 case Match_InvalidZPR_4b32:
4306 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
4307 case Match_InvalidZPR_4b64:
4308 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
4309 case Match_InvalidSVEPattern:
4310 return Error(Loc, "invalid predicate pattern");
4311 case Match_InvalidSVEPredicateAnyReg:
4312 case Match_InvalidSVEPredicateBReg:
4313 case Match_InvalidSVEPredicateHReg:
4314 case Match_InvalidSVEPredicateSReg:
4315 case Match_InvalidSVEPredicateDReg:
4316 return Error(Loc, "invalid predicate register.");
4317 case Match_InvalidSVEPredicate3bAnyReg:
4318 case Match_InvalidSVEPredicate3bBReg:
4319 case Match_InvalidSVEPredicate3bHReg:
4320 case Match_InvalidSVEPredicate3bSReg:
4321 case Match_InvalidSVEPredicate3bDReg:
4322 return Error(Loc, "restricted predicate has range [0, 7].");
4323 case Match_InvalidSVEExactFPImmOperandHalfOne:
4324 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
4325 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4326 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
4327 case Match_InvalidSVEExactFPImmOperandZeroOne:
4328 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
4330 llvm_unreachable("unexpected error code!");
4334 static const char *getSubtargetFeatureName(uint64_t Val);
4336 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4337 OperandVector &Operands,
4339 uint64_t &ErrorInfo,
4340 bool MatchingInlineAsm) {
4341 assert(!Operands.empty() && "Unexpect empty operand list!");
4342 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
4343 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
4345 StringRef Tok = Op.getToken();
4346 unsigned NumOperands = Operands.size();
4348 if (NumOperands == 4 && Tok == "lsl") {
4349 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4350 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4351 if (Op2.isScalarReg() && Op3.isImm()) {
4352 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4354 uint64_t Op3Val = Op3CE->getValue();
4355 uint64_t NewOp3Val = 0;
4356 uint64_t NewOp4Val = 0;
4357 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
4359 NewOp3Val = (32 - Op3Val) & 0x1f;
4360 NewOp4Val = 31 - Op3Val;
4362 NewOp3Val = (64 - Op3Val) & 0x3f;
4363 NewOp4Val = 63 - Op3Val;
4366 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
4367 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
4369 Operands[0] = AArch64Operand::CreateToken(
4370 "ubfm", false, Op.getStartLoc(), getContext());
4371 Operands.push_back(AArch64Operand::CreateImm(
4372 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4373 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4374 Op3.getEndLoc(), getContext());
4377 } else if (NumOperands == 4 && Tok == "bfc") {
4378 // FIXME: Horrible hack to handle BFC->BFM alias.
4379 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4380 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
4381 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
4383 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4384 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
4385 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
4387 if (LSBCE && WidthCE) {
4388 uint64_t LSB = LSBCE->getValue();
4389 uint64_t Width = WidthCE->getValue();
4391 uint64_t RegWidth = 0;
4392 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4398 if (LSB >= RegWidth)
4399 return Error(LSBOp.getStartLoc(),
4400 "expected integer in range [0, 31]");
4401 if (Width < 1 || Width > RegWidth)
4402 return Error(WidthOp.getStartLoc(),
4403 "expected integer in range [1, 32]");
4407 ImmR = (32 - LSB) & 0x1f;
4409 ImmR = (64 - LSB) & 0x3f;
4411 uint64_t ImmS = Width - 1;
4413 if (ImmR != 0 && ImmS >= ImmR)
4414 return Error(WidthOp.getStartLoc(),
4415 "requested insert overflows register");
4417 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
4418 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
4419 Operands[0] = AArch64Operand::CreateToken(
4420 "bfm", false, Op.getStartLoc(), getContext());
4421 Operands[2] = AArch64Operand::CreateReg(
4422 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
4423 SMLoc(), SMLoc(), getContext());
4424 Operands[3] = AArch64Operand::CreateImm(
4425 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4426 Operands.emplace_back(
4427 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4428 WidthOp.getEndLoc(), getContext()));
4431 } else if (NumOperands == 5) {
4432 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4433 // UBFIZ -> UBFM aliases.
4434 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4435 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4436 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4437 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4439 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4440 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4441 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4443 if (Op3CE && Op4CE) {
4444 uint64_t Op3Val = Op3CE->getValue();
4445 uint64_t Op4Val = Op4CE->getValue();
4447 uint64_t RegWidth = 0;
4448 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4454 if (Op3Val >= RegWidth)
4455 return Error(Op3.getStartLoc(),
4456 "expected integer in range [0, 31]");
4457 if (Op4Val < 1 || Op4Val > RegWidth)
4458 return Error(Op4.getStartLoc(),
4459 "expected integer in range [1, 32]");
4461 uint64_t NewOp3Val = 0;
4463 NewOp3Val = (32 - Op3Val) & 0x1f;
4465 NewOp3Val = (64 - Op3Val) & 0x3f;
4467 uint64_t NewOp4Val = Op4Val - 1;
4469 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4470 return Error(Op4.getStartLoc(),
4471 "requested insert overflows register");
4473 const MCExpr *NewOp3 =
4474 MCConstantExpr::create(NewOp3Val, getContext());
4475 const MCExpr *NewOp4 =
4476 MCConstantExpr::create(NewOp4Val, getContext());
4477 Operands[3] = AArch64Operand::CreateImm(
4478 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4479 Operands[4] = AArch64Operand::CreateImm(
4480 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4482 Operands[0] = AArch64Operand::CreateToken(
4483 "bfm", false, Op.getStartLoc(), getContext());
4484 else if (Tok == "sbfiz")
4485 Operands[0] = AArch64Operand::CreateToken(
4486 "sbfm", false, Op.getStartLoc(), getContext());
4487 else if (Tok == "ubfiz")
4488 Operands[0] = AArch64Operand::CreateToken(
4489 "ubfm", false, Op.getStartLoc(), getContext());
4491 llvm_unreachable("No valid mnemonic for alias?");
4495 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4496 // UBFX -> UBFM aliases.
4497 } else if (NumOperands == 5 &&
4498 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4499 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4500 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4501 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
4503 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4504 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
4505 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
4507 if (Op3CE && Op4CE) {
4508 uint64_t Op3Val = Op3CE->getValue();
4509 uint64_t Op4Val = Op4CE->getValue();
4511 uint64_t RegWidth = 0;
4512 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4518 if (Op3Val >= RegWidth)
4519 return Error(Op3.getStartLoc(),
4520 "expected integer in range [0, 31]");
4521 if (Op4Val < 1 || Op4Val > RegWidth)
4522 return Error(Op4.getStartLoc(),
4523 "expected integer in range [1, 32]");
4525 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4527 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4528 return Error(Op4.getStartLoc(),
4529 "requested extract overflows register");
4531 const MCExpr *NewOp4 =
4532 MCConstantExpr::create(NewOp4Val, getContext());
4533 Operands[4] = AArch64Operand::CreateImm(
4534 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4536 Operands[0] = AArch64Operand::CreateToken(
4537 "bfm", false, Op.getStartLoc(), getContext());
4538 else if (Tok == "sbfx")
4539 Operands[0] = AArch64Operand::CreateToken(
4540 "sbfm", false, Op.getStartLoc(), getContext());
4541 else if (Tok == "ubfx")
4542 Operands[0] = AArch64Operand::CreateToken(
4543 "ubfm", false, Op.getStartLoc(), getContext());
4545 llvm_unreachable("No valid mnemonic for alias?");
4551 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
4552 // instruction for FP registers correctly in some rare circumstances. Convert
4553 // it to a safe instruction and warn (because silently changing someone's
4554 // assembly is rude).
4555 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4556 NumOperands == 4 && Tok == "movi") {
4557 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
4558 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
4559 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
4560 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4561 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4562 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4563 if (Suffix.lower() == ".2d" &&
4564 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4565 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
4566 " correctly on this CPU, converting to equivalent movi.16b");
4567 // Switch the suffix to .16b.
4568 unsigned Idx = Op1.isToken() ? 1 : 2;
4569 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
4575 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4576 // InstAlias can't quite handle this since the reg classes aren't
4578 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4579 // The source register can be Wn here, but the matcher expects a
4580 // GPR64. Twiddle it here if necessary.
4581 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4582 if (Op.isScalarReg()) {
4583 unsigned Reg = getXRegFromWReg(Op.getReg());
4584 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4585 Op.getStartLoc(), Op.getEndLoc(),
4589 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4590 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4591 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4592 if (Op.isScalarReg() &&
4593 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4595 // The source register can be Wn here, but the matcher expects a
4596 // GPR64. Twiddle it here if necessary.
4597 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
4598 if (Op.isScalarReg()) {
4599 unsigned Reg = getXRegFromWReg(Op.getReg());
4600 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4602 Op.getEndLoc(), getContext());
4606 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4607 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4608 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4609 if (Op.isScalarReg() &&
4610 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4612 // The source register can be Wn here, but the matcher expects a
4613 // GPR32. Twiddle it here if necessary.
4614 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
4615 if (Op.isScalarReg()) {
4616 unsigned Reg = getWRegFromXReg(Op.getReg());
4617 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
4619 Op.getEndLoc(), getContext());
4625 // First try to match against the secondary set of tables containing the
4626 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4627 unsigned MatchResult =
4628 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4630 // If that fails, try against the alternate table containing long-form NEON:
4631 // "fadd v0.2s, v1.2s, v2.2s"
4632 if (MatchResult != Match_Success) {
4633 // But first, save the short-form match result: we can use it in case the
4634 // long-form match also fails.
4635 auto ShortFormNEONErrorInfo = ErrorInfo;
4636 auto ShortFormNEONMatchResult = MatchResult;
4639 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4641 // Now, both matches failed, and the long-form match failed on the mnemonic
4642 // suffix token operand. The short-form match failure is probably more
4643 // relevant: use it instead.
4644 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4645 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4646 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4647 MatchResult = ShortFormNEONMatchResult;
4648 ErrorInfo = ShortFormNEONErrorInfo;
4652 switch (MatchResult) {
4653 case Match_Success: {
4654 // Perform range checking and other semantic validations
4655 SmallVector<SMLoc, 8> OperandLocs;
4656 NumOperands = Operands.size();
4657 for (unsigned i = 1; i < NumOperands; ++i)
4658 OperandLocs.push_back(Operands[i]->getStartLoc());
4659 if (validateInstruction(Inst, IDLoc, OperandLocs))
4663 Out.EmitInstruction(Inst, getSTI());
4666 case Match_MissingFeature: {
4667 assert(ErrorInfo && "Unknown missing feature!");
4668 // Special case the error message for the very common case where only
4669 // a single subtarget feature is missing (neon, e.g.).
4670 std::string Msg = "instruction requires:";
4672 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4673 if (ErrorInfo & Mask) {
4675 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4679 return Error(IDLoc, Msg);
4681 case Match_MnemonicFail:
4682 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4683 case Match_InvalidOperand: {
4684 SMLoc ErrorLoc = IDLoc;
4686 if (ErrorInfo != ~0ULL) {
4687 if (ErrorInfo >= Operands.size())
4688 return Error(IDLoc, "too few operands for instruction",
4689 SMRange(IDLoc, getTok().getLoc()));
4691 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4692 if (ErrorLoc == SMLoc())
4695 // If the match failed on a suffix token operand, tweak the diagnostic
4697 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4698 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4699 MatchResult = Match_InvalidSuffix;
4701 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4703 case Match_InvalidTiedOperand:
4704 case Match_InvalidMemoryIndexed1:
4705 case Match_InvalidMemoryIndexed2:
4706 case Match_InvalidMemoryIndexed4:
4707 case Match_InvalidMemoryIndexed8:
4708 case Match_InvalidMemoryIndexed16:
4709 case Match_InvalidCondCode:
4710 case Match_AddSubRegExtendSmall:
4711 case Match_AddSubRegExtendLarge:
4712 case Match_AddSubSecondSource:
4713 case Match_LogicalSecondSource:
4714 case Match_AddSubRegShift32:
4715 case Match_AddSubRegShift64:
4716 case Match_InvalidMovImm32Shift:
4717 case Match_InvalidMovImm64Shift:
4718 case Match_InvalidFPImm:
4719 case Match_InvalidMemoryWExtend8:
4720 case Match_InvalidMemoryWExtend16:
4721 case Match_InvalidMemoryWExtend32:
4722 case Match_InvalidMemoryWExtend64:
4723 case Match_InvalidMemoryWExtend128:
4724 case Match_InvalidMemoryXExtend8:
4725 case Match_InvalidMemoryXExtend16:
4726 case Match_InvalidMemoryXExtend32:
4727 case Match_InvalidMemoryXExtend64:
4728 case Match_InvalidMemoryXExtend128:
4729 case Match_InvalidMemoryIndexed1SImm4:
4730 case Match_InvalidMemoryIndexed2SImm4:
4731 case Match_InvalidMemoryIndexed3SImm4:
4732 case Match_InvalidMemoryIndexed4SImm4:
4733 case Match_InvalidMemoryIndexed1SImm6:
4734 case Match_InvalidMemoryIndexed16SImm4:
4735 case Match_InvalidMemoryIndexed4SImm7:
4736 case Match_InvalidMemoryIndexed8SImm7:
4737 case Match_InvalidMemoryIndexed16SImm7:
4738 case Match_InvalidMemoryIndexed8UImm5:
4739 case Match_InvalidMemoryIndexed4UImm5:
4740 case Match_InvalidMemoryIndexed2UImm5:
4741 case Match_InvalidMemoryIndexed1UImm6:
4742 case Match_InvalidMemoryIndexed2UImm6:
4743 case Match_InvalidMemoryIndexed4UImm6:
4744 case Match_InvalidMemoryIndexed8UImm6:
4745 case Match_InvalidMemoryIndexedSImm6:
4746 case Match_InvalidMemoryIndexedSImm5:
4747 case Match_InvalidMemoryIndexedSImm8:
4748 case Match_InvalidMemoryIndexedSImm9:
4749 case Match_InvalidMemoryIndexed8SImm10:
4750 case Match_InvalidImm0_1:
4751 case Match_InvalidImm0_7:
4752 case Match_InvalidImm0_15:
4753 case Match_InvalidImm0_31:
4754 case Match_InvalidImm0_63:
4755 case Match_InvalidImm0_127:
4756 case Match_InvalidImm0_255:
4757 case Match_InvalidImm0_65535:
4758 case Match_InvalidImm1_8:
4759 case Match_InvalidImm1_16:
4760 case Match_InvalidImm1_32:
4761 case Match_InvalidImm1_64:
4762 case Match_InvalidSVEAddSubImm8:
4763 case Match_InvalidSVEAddSubImm16:
4764 case Match_InvalidSVEAddSubImm32:
4765 case Match_InvalidSVEAddSubImm64:
4766 case Match_InvalidSVECpyImm8:
4767 case Match_InvalidSVECpyImm16:
4768 case Match_InvalidSVECpyImm32:
4769 case Match_InvalidSVECpyImm64:
4770 case Match_InvalidIndexRange1_1:
4771 case Match_InvalidIndexRange0_15:
4772 case Match_InvalidIndexRange0_7:
4773 case Match_InvalidIndexRange0_3:
4774 case Match_InvalidIndexRange0_1:
4775 case Match_InvalidSVEIndexRange0_63:
4776 case Match_InvalidSVEIndexRange0_31:
4777 case Match_InvalidSVEIndexRange0_15:
4778 case Match_InvalidSVEIndexRange0_7:
4779 case Match_InvalidSVEIndexRange0_3:
4780 case Match_InvalidLabel:
4781 case Match_InvalidComplexRotationEven:
4782 case Match_InvalidComplexRotationOdd:
4783 case Match_InvalidGPR64shifted8:
4784 case Match_InvalidGPR64shifted16:
4785 case Match_InvalidGPR64shifted32:
4786 case Match_InvalidGPR64shifted64:
4787 case Match_InvalidGPR64NoXZRshifted8:
4788 case Match_InvalidGPR64NoXZRshifted16:
4789 case Match_InvalidGPR64NoXZRshifted32:
4790 case Match_InvalidGPR64NoXZRshifted64:
4791 case Match_InvalidZPR32UXTW8:
4792 case Match_InvalidZPR32UXTW16:
4793 case Match_InvalidZPR32UXTW32:
4794 case Match_InvalidZPR32UXTW64:
4795 case Match_InvalidZPR32SXTW8:
4796 case Match_InvalidZPR32SXTW16:
4797 case Match_InvalidZPR32SXTW32:
4798 case Match_InvalidZPR32SXTW64:
4799 case Match_InvalidZPR64UXTW8:
4800 case Match_InvalidZPR64SXTW8:
4801 case Match_InvalidZPR64UXTW16:
4802 case Match_InvalidZPR64SXTW16:
4803 case Match_InvalidZPR64UXTW32:
4804 case Match_InvalidZPR64SXTW32:
4805 case Match_InvalidZPR64UXTW64:
4806 case Match_InvalidZPR64SXTW64:
4807 case Match_InvalidZPR32LSL8:
4808 case Match_InvalidZPR32LSL16:
4809 case Match_InvalidZPR32LSL32:
4810 case Match_InvalidZPR32LSL64:
4811 case Match_InvalidZPR64LSL8:
4812 case Match_InvalidZPR64LSL16:
4813 case Match_InvalidZPR64LSL32:
4814 case Match_InvalidZPR64LSL64:
4815 case Match_InvalidZPR0:
4816 case Match_InvalidZPR8:
4817 case Match_InvalidZPR16:
4818 case Match_InvalidZPR32:
4819 case Match_InvalidZPR64:
4820 case Match_InvalidZPR128:
4821 case Match_InvalidZPR_3b8:
4822 case Match_InvalidZPR_3b16:
4823 case Match_InvalidZPR_3b32:
4824 case Match_InvalidZPR_4b16:
4825 case Match_InvalidZPR_4b32:
4826 case Match_InvalidZPR_4b64:
4827 case Match_InvalidSVEPredicateAnyReg:
4828 case Match_InvalidSVEPattern:
4829 case Match_InvalidSVEPredicateBReg:
4830 case Match_InvalidSVEPredicateHReg:
4831 case Match_InvalidSVEPredicateSReg:
4832 case Match_InvalidSVEPredicateDReg:
4833 case Match_InvalidSVEPredicate3bAnyReg:
4834 case Match_InvalidSVEPredicate3bBReg:
4835 case Match_InvalidSVEPredicate3bHReg:
4836 case Match_InvalidSVEPredicate3bSReg:
4837 case Match_InvalidSVEPredicate3bDReg:
4838 case Match_InvalidSVEExactFPImmOperandHalfOne:
4839 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4840 case Match_InvalidSVEExactFPImmOperandZeroOne:
4843 if (ErrorInfo >= Operands.size())
4844 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4845 // Any time we get here, there's nothing fancy to do. Just get the
4846 // operand SMLoc and display the diagnostic.
4847 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4848 if (ErrorLoc == SMLoc())
4850 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4854 llvm_unreachable("Implement any new match types added!");
4857 /// ParseDirective parses the arm specific directives
4858 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4859 const MCObjectFileInfo::Environment Format =
4860 getContext().getObjectFileInfo()->getObjectFileType();
4861 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4863 StringRef IDVal = DirectiveID.getIdentifier();
4864 SMLoc Loc = DirectiveID.getLoc();
4865 if (IDVal == ".arch")
4866 parseDirectiveArch(Loc);
4867 else if (IDVal == ".cpu")
4868 parseDirectiveCPU(Loc);
4869 else if (IDVal == ".tlsdesccall")
4870 parseDirectiveTLSDescCall(Loc);
4871 else if (IDVal == ".ltorg" || IDVal == ".pool")
4872 parseDirectiveLtorg(Loc);
4873 else if (IDVal == ".unreq")
4874 parseDirectiveUnreq(Loc);
4875 else if (IDVal == ".inst")
4876 parseDirectiveInst(Loc);
4878 if (IDVal == MCLOHDirectiveName())
4879 parseDirectiveLOH(IDVal, Loc);
4887 static const struct {
4889 const FeatureBitset Features;
4890 } ExtensionMap[] = {
4891 { "crc", {AArch64::FeatureCRC} },
4892 { "sm4", {AArch64::FeatureSM4} },
4893 { "sha3", {AArch64::FeatureSHA3} },
4894 { "sha2", {AArch64::FeatureSHA2} },
4895 { "aes", {AArch64::FeatureAES} },
4896 { "crypto", {AArch64::FeatureCrypto} },
4897 { "fp", {AArch64::FeatureFPARMv8} },
4898 { "simd", {AArch64::FeatureNEON} },
4899 { "ras", {AArch64::FeatureRAS} },
4900 { "lse", {AArch64::FeatureLSE} },
4902 // FIXME: Unsupported extensions
4909 static void ExpandCryptoAEK(AArch64::ArchKind ArchKind,
4910 SmallVector<StringRef, 4> &RequestedExtensions) {
4911 const bool NoCrypto =
4912 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
4913 "nocrypto") != std::end(RequestedExtensions));
4915 (std::find(RequestedExtensions.begin(), RequestedExtensions.end(),
4916 "crypto") != std::end(RequestedExtensions));
4918 if (!NoCrypto && Crypto) {
4921 // Map 'generic' (and others) to sha2 and aes, because
4922 // that was the traditional meaning of crypto.
4923 case AArch64::ArchKind::ARMV8_1A:
4924 case AArch64::ArchKind::ARMV8_2A:
4925 case AArch64::ArchKind::ARMV8_3A:
4926 RequestedExtensions.push_back("sha2");
4927 RequestedExtensions.push_back("aes");
4929 case AArch64::ArchKind::ARMV8_4A:
4930 RequestedExtensions.push_back("sm4");
4931 RequestedExtensions.push_back("sha3");
4932 RequestedExtensions.push_back("sha2");
4933 RequestedExtensions.push_back("aes");
4936 } else if (NoCrypto) {
4939 // Map 'generic' (and others) to sha2 and aes, because
4940 // that was the traditional meaning of crypto.
4941 case AArch64::ArchKind::ARMV8_1A:
4942 case AArch64::ArchKind::ARMV8_2A:
4943 case AArch64::ArchKind::ARMV8_3A:
4944 RequestedExtensions.push_back("nosha2");
4945 RequestedExtensions.push_back("noaes");
4947 case AArch64::ArchKind::ARMV8_4A:
4948 RequestedExtensions.push_back("nosm4");
4949 RequestedExtensions.push_back("nosha3");
4950 RequestedExtensions.push_back("nosha2");
4951 RequestedExtensions.push_back("noaes");
4957 /// parseDirectiveArch
4959 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4960 SMLoc ArchLoc = getLoc();
4962 StringRef Arch, ExtensionString;
4963 std::tie(Arch, ExtensionString) =
4964 getParser().parseStringToEndOfStatement().trim().split('+');
4966 AArch64::ArchKind ID = AArch64::parseArch(Arch);
4967 if (ID == AArch64::ArchKind::INVALID)
4968 return Error(ArchLoc, "unknown arch name");
4970 if (parseToken(AsmToken::EndOfStatement))
4973 // Get the architecture and extension features.
4974 std::vector<StringRef> AArch64Features;
4975 AArch64::getArchFeatures(ID, AArch64Features);
4976 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
4979 MCSubtargetInfo &STI = copySTI();
4980 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4981 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4983 SmallVector<StringRef, 4> RequestedExtensions;
4984 if (!ExtensionString.empty())
4985 ExtensionString.split(RequestedExtensions, '+');
4987 ExpandCryptoAEK(ID, RequestedExtensions);
4989 FeatureBitset Features = STI.getFeatureBits();
4990 for (auto Name : RequestedExtensions) {
4991 bool EnableFeature = true;
4993 if (Name.startswith_lower("no")) {
4994 EnableFeature = false;
4995 Name = Name.substr(2);
4998 for (const auto &Extension : ExtensionMap) {
4999 if (Extension.Name != Name)
5002 if (Extension.Features.none())
5003 report_fatal_error("unsupported architectural extension: " + Name);
5005 FeatureBitset ToggleFeatures = EnableFeature
5006 ? (~Features & Extension.Features)
5007 : ( Features & Extension.Features);
5009 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5010 setAvailableFeatures(Features);
5017 static SMLoc incrementLoc(SMLoc L, int Offset) {
5018 return SMLoc::getFromPointer(L.getPointer() + Offset);
5021 /// parseDirectiveCPU
5023 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
5024 SMLoc CurLoc = getLoc();
5026 StringRef CPU, ExtensionString;
5027 std::tie(CPU, ExtensionString) =
5028 getParser().parseStringToEndOfStatement().trim().split('+');
5030 if (parseToken(AsmToken::EndOfStatement))
5033 SmallVector<StringRef, 4> RequestedExtensions;
5034 if (!ExtensionString.empty())
5035 ExtensionString.split(RequestedExtensions, '+');
5037 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
5038 // once that is tablegen'ed
5039 if (!getSTI().isCPUStringValid(CPU)) {
5040 Error(CurLoc, "unknown CPU name");
5044 MCSubtargetInfo &STI = copySTI();
5045 STI.setDefaultFeatures(CPU, "");
5046 CurLoc = incrementLoc(CurLoc, CPU.size());
5048 ExpandCryptoAEK(llvm::AArch64::getCPUArchKind(CPU), RequestedExtensions);
5050 FeatureBitset Features = STI.getFeatureBits();
5051 for (auto Name : RequestedExtensions) {
5052 // Advance source location past '+'.
5053 CurLoc = incrementLoc(CurLoc, 1);
5055 bool EnableFeature = true;
5057 if (Name.startswith_lower("no")) {
5058 EnableFeature = false;
5059 Name = Name.substr(2);
5062 bool FoundExtension = false;
5063 for (const auto &Extension : ExtensionMap) {
5064 if (Extension.Name != Name)
5067 if (Extension.Features.none())
5068 report_fatal_error("unsupported architectural extension: " + Name);
5070 FeatureBitset ToggleFeatures = EnableFeature
5071 ? (~Features & Extension.Features)
5072 : ( Features & Extension.Features);
5074 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
5075 setAvailableFeatures(Features);
5076 FoundExtension = true;
5081 if (!FoundExtension)
5082 Error(CurLoc, "unsupported architectural extension");
5084 CurLoc = incrementLoc(CurLoc, Name.size());
5089 /// parseDirectiveInst
5090 /// ::= .inst opcode [, ...]
5091 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
5092 if (getLexer().is(AsmToken::EndOfStatement))
5093 return Error(Loc, "expected expression following '.inst' directive");
5095 auto parseOp = [&]() -> bool {
5098 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5100 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5101 if (check(!Value, L, "expected constant expression"))
5103 getTargetStreamer().emitInst(Value->getValue());
5107 if (parseMany(parseOp))
5108 return addErrorSuffix(" in '.inst' directive");
5112 // parseDirectiveTLSDescCall:
5113 // ::= .tlsdesccall symbol
5114 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
5116 if (check(getParser().parseIdentifier(Name), L,
5117 "expected symbol after directive") ||
5118 parseToken(AsmToken::EndOfStatement))
5121 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5122 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
5123 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
5126 Inst.setOpcode(AArch64::TLSDESCCALL);
5127 Inst.addOperand(MCOperand::createExpr(Expr));
5129 getParser().getStreamer().EmitInstruction(Inst, getSTI());
5133 /// ::= .loh <lohName | lohId> label1, ..., labelN
5134 /// The number of arguments depends on the loh identifier.
5135 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
5137 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5138 if (getParser().getTok().isNot(AsmToken::Integer))
5139 return TokError("expected an identifier or a number in directive");
5140 // We successfully get a numeric value for the identifier.
5141 // Check if it is valid.
5142 int64_t Id = getParser().getTok().getIntVal();
5143 if (Id <= -1U && !isValidMCLOHType(Id))
5144 return TokError("invalid numeric identifier in directive");
5145 Kind = (MCLOHType)Id;
5147 StringRef Name = getTok().getIdentifier();
5148 // We successfully parse an identifier.
5149 // Check if it is a recognized one.
5150 int Id = MCLOHNameToId(Name);
5153 return TokError("invalid identifier in directive");
5154 Kind = (MCLOHType)Id;
5156 // Consume the identifier.
5158 // Get the number of arguments of this LOH.
5159 int NbArgs = MCLOHIdToNbArgs(Kind);
5161 assert(NbArgs != -1 && "Invalid number of arguments");
5163 SmallVector<MCSymbol *, 3> Args;
5164 for (int Idx = 0; Idx < NbArgs; ++Idx) {
5166 if (getParser().parseIdentifier(Name))
5167 return TokError("expected identifier in directive");
5168 Args.push_back(getContext().getOrCreateSymbol(Name));
5170 if (Idx + 1 == NbArgs)
5172 if (parseToken(AsmToken::Comma,
5173 "unexpected token in '" + Twine(IDVal) + "' directive"))
5176 if (parseToken(AsmToken::EndOfStatement,
5177 "unexpected token in '" + Twine(IDVal) + "' directive"))
5180 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
5184 /// parseDirectiveLtorg
5185 /// ::= .ltorg | .pool
5186 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
5187 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
5189 getTargetStreamer().emitCurrentConstantPool();
5193 /// parseDirectiveReq
5194 /// ::= name .req registername
5195 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
5196 MCAsmParser &Parser = getParser();
5197 Parser.Lex(); // Eat the '.req' token.
5198 SMLoc SRegLoc = getLoc();
5199 RegKind RegisterKind = RegKind::Scalar;
5201 OperandMatchResultTy ParseRes = tryParseScalarRegister(RegNum);
5203 if (ParseRes != MatchOperand_Success) {
5205 RegisterKind = RegKind::NeonVector;
5206 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5208 if (ParseRes == MatchOperand_ParseFail)
5211 if (ParseRes == MatchOperand_Success && !Kind.empty())
5212 return Error(SRegLoc, "vector register without type specifier expected");
5215 if (ParseRes != MatchOperand_Success) {
5217 RegisterKind = RegKind::SVEDataVector;
5219 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5221 if (ParseRes == MatchOperand_ParseFail)
5224 if (ParseRes == MatchOperand_Success && !Kind.empty())
5225 return Error(SRegLoc,
5226 "sve vector register without type specifier expected");
5229 if (ParseRes != MatchOperand_Success) {
5231 RegisterKind = RegKind::SVEPredicateVector;
5232 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5234 if (ParseRes == MatchOperand_ParseFail)
5237 if (ParseRes == MatchOperand_Success && !Kind.empty())
5238 return Error(SRegLoc,
5239 "sve predicate register without type specifier expected");
5242 if (ParseRes != MatchOperand_Success)
5243 return Error(SRegLoc, "register name or alias expected");
5245 // Shouldn't be anything else.
5246 if (parseToken(AsmToken::EndOfStatement,
5247 "unexpected input in .req directive"))
5250 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
5251 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5252 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
5257 /// parseDirectiveUneq
5258 /// ::= .unreq registername
5259 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
5260 MCAsmParser &Parser = getParser();
5261 if (getTok().isNot(AsmToken::Identifier))
5262 return TokError("unexpected input in .unreq directive.");
5263 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
5264 Parser.Lex(); // Eat the identifier.
5265 if (parseToken(AsmToken::EndOfStatement))
5266 return addErrorSuffix("in '.unreq' directive");
5271 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
5272 AArch64MCExpr::VariantKind &ELFRefKind,
5273 MCSymbolRefExpr::VariantKind &DarwinRefKind,
5275 ELFRefKind = AArch64MCExpr::VK_INVALID;
5276 DarwinRefKind = MCSymbolRefExpr::VK_None;
5279 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5280 ELFRefKind = AE->getKind();
5281 Expr = AE->getSubExpr();
5284 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
5286 // It's a simple symbol reference with no addend.
5287 DarwinRefKind = SE->getKind();
5291 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
5295 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
5298 DarwinRefKind = SE->getKind();
5300 if (BE->getOpcode() != MCBinaryExpr::Add &&
5301 BE->getOpcode() != MCBinaryExpr::Sub)
5304 // See if the addend is a constant, otherwise there's more going
5305 // on here than we can deal with.
5306 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
5310 Addend = AddendExpr->getValue();
5311 if (BE->getOpcode() == MCBinaryExpr::Sub)
5314 // It's some symbol reference + a constant addend, but really
5315 // shouldn't use both Darwin and ELF syntax.
5316 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
5317 DarwinRefKind == MCSymbolRefExpr::VK_None;
5320 /// Force static initialization.
5321 extern "C" void LLVMInitializeAArch64AsmParser() {
5322 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
5323 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
5324 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
5327 #define GET_REGISTER_MATCHER
5328 #define GET_SUBTARGET_FEATURE_NAME
5329 #define GET_MATCHER_IMPLEMENTATION
5330 #define GET_MNEMONIC_SPELL_CHECKER
5331 #include "AArch64GenAsmMatcher.inc"
5333 // Define this matcher function after the auto-generated include so we
5334 // have the match class enum definitions.
5335 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
5337 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
5338 // If the kind is a token for a literal immediate, check if our asm
5339 // operand matches. This is for InstAliases which have a fixed-value
5340 // immediate in the syntax.
5341 int64_t ExpectedVal;
5344 return Match_InvalidOperand;
5386 return Match_InvalidOperand;
5387 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
5389 return Match_InvalidOperand;
5390 if (CE->getValue() == ExpectedVal)
5391 return Match_Success;
5392 return Match_InvalidOperand;
5395 OperandMatchResultTy
5396 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
5400 if (getParser().getTok().isNot(AsmToken::Identifier)) {
5401 Error(S, "expected register");
5402 return MatchOperand_ParseFail;
5406 OperandMatchResultTy Res = tryParseScalarRegister(FirstReg);
5407 if (Res != MatchOperand_Success)
5408 return MatchOperand_ParseFail;
5410 const MCRegisterClass &WRegClass =
5411 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5412 const MCRegisterClass &XRegClass =
5413 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5415 bool isXReg = XRegClass.contains(FirstReg),
5416 isWReg = WRegClass.contains(FirstReg);
5417 if (!isXReg && !isWReg) {
5418 Error(S, "expected first even register of a "
5419 "consecutive same-size even/odd register pair");
5420 return MatchOperand_ParseFail;
5423 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5424 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
5426 if (FirstEncoding & 0x1) {
5427 Error(S, "expected first even register of a "
5428 "consecutive same-size even/odd register pair");
5429 return MatchOperand_ParseFail;
5432 if (getParser().getTok().isNot(AsmToken::Comma)) {
5433 Error(getLoc(), "expected comma");
5434 return MatchOperand_ParseFail;
5441 Res = tryParseScalarRegister(SecondReg);
5442 if (Res != MatchOperand_Success)
5443 return MatchOperand_ParseFail;
5445 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
5446 (isXReg && !XRegClass.contains(SecondReg)) ||
5447 (isWReg && !WRegClass.contains(SecondReg))) {
5448 Error(E,"expected second odd register of a "
5449 "consecutive same-size even/odd register pair");
5450 return MatchOperand_ParseFail;
5455 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
5456 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5458 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
5459 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5462 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
5463 getLoc(), getContext()));
5465 return MatchOperand_Success;
5468 template <bool ParseShiftExtend, bool ParseSuffix>
5469 OperandMatchResultTy
5470 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
5471 const SMLoc S = getLoc();
5472 // Check for a SVE vector register specifier first.
5476 OperandMatchResultTy Res =
5477 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5479 if (Res != MatchOperand_Success)
5482 if (ParseSuffix && Kind.empty())
5483 return MatchOperand_NoMatch;
5485 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
5487 return MatchOperand_NoMatch;
5489 unsigned ElementWidth = KindRes->second;
5491 // No shift/extend is the default.
5492 if (!ParseShiftExtend || getParser().getTok().isNot(AsmToken::Comma)) {
5493 Operands.push_back(AArch64Operand::CreateVectorReg(
5494 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5496 OperandMatchResultTy Res = tryParseVectorIndex(Operands);
5497 if (Res == MatchOperand_ParseFail)
5498 return MatchOperand_ParseFail;
5499 return MatchOperand_Success;
5506 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> ExtOpnd;
5507 Res = tryParseOptionalShiftExtend(ExtOpnd);
5508 if (Res != MatchOperand_Success)
5511 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
5512 Operands.push_back(AArch64Operand::CreateVectorReg(
5513 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
5514 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
5515 Ext->hasShiftExtendAmount()));
5517 return MatchOperand_Success;
5520 OperandMatchResultTy
5521 AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
5522 MCAsmParser &Parser = getParser();
5524 SMLoc SS = getLoc();
5525 const AsmToken &TokE = Parser.getTok();
5526 bool IsHash = TokE.is(AsmToken::Hash);
5528 if (!IsHash && TokE.isNot(AsmToken::Identifier))
5529 return MatchOperand_NoMatch;
5533 Parser.Lex(); // Eat hash
5535 // Parse the immediate operand.
5536 const MCExpr *ImmVal;
5538 if (Parser.parseExpression(ImmVal))
5539 return MatchOperand_ParseFail;
5541 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
5543 return MatchOperand_ParseFail;
5545 Pattern = MCE->getValue();
5547 // Parse the pattern
5548 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
5550 return MatchOperand_NoMatch;
5553 Pattern = Pat->Encoding;
5554 assert(Pattern >= 0 && Pattern < 32);
5558 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
5559 SS, getLoc(), getContext()));
5561 return MatchOperand_Success;