1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64MCTargetDesc.h"
13 #include "MCTargetDesc/AArch64TargetStreamer.h"
14 #include "Utils/AArch64BaseInfo.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/StringSwitch.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCInst.h"
28 #include "llvm/MC/MCLinkerOptimizationHint.h"
29 #include "llvm/MC/MCObjectFileInfo.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
33 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCStreamer.h"
37 #include "llvm/MC/MCSubtargetInfo.h"
38 #include "llvm/MC/MCSymbol.h"
39 #include "llvm/MC/MCTargetOptions.h"
40 #include "llvm/MC/SubtargetFeature.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/Compiler.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/SMLoc.h"
46 #include "llvm/Support/TargetParser.h"
47 #include "llvm/Support/TargetRegistry.h"
48 #include "llvm/Support/raw_ostream.h"
69 class AArch64AsmParser : public MCTargetAsmParser {
71 StringRef Mnemonic; ///< Instruction mnemonic.
73 // Map of register aliases registers via the .req directive.
74 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
76 AArch64TargetStreamer &getTargetStreamer() {
77 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
78 return static_cast<AArch64TargetStreamer &>(TS);
81 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
83 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
84 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
85 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
86 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
87 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
88 int tryParseRegister();
89 int tryMatchVectorRegister(StringRef &Kind, bool expected);
90 bool parseRegister(OperandVector &Operands);
91 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
92 bool parseVectorList(OperandVector &Operands);
93 bool parseOperand(OperandVector &Operands, bool isCondCode,
96 bool showMatchError(SMLoc Loc, unsigned ErrCode, OperandVector &Operands);
98 bool parseDirectiveArch(SMLoc L);
99 bool parseDirectiveCPU(SMLoc L);
100 bool parseDirectiveWord(unsigned Size, SMLoc L);
101 bool parseDirectiveInst(SMLoc L);
103 bool parseDirectiveTLSDescCall(SMLoc L);
105 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
106 bool parseDirectiveLtorg(SMLoc L);
108 bool parseDirectiveReq(StringRef Name, SMLoc L);
109 bool parseDirectiveUnreq(SMLoc L);
111 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
112 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
113 OperandVector &Operands, MCStreamer &Out,
115 bool MatchingInlineAsm) override;
116 /// @name Auto-generated Match Functions
119 #define GET_ASSEMBLER_HEADER
120 #include "AArch64GenAsmMatcher.inc"
124 OperandMatchResultTy tryParseSVERegister(int &Reg, StringRef &Kind,
126 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
127 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
128 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
129 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
130 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
131 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
132 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
133 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
134 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
135 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
136 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
137 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
138 bool tryParseNeonVectorRegister(OperandVector &Operands);
139 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
140 template <bool ParseSuffix>
141 OperandMatchResultTy tryParseSVEDataVector(OperandVector &Operands);
142 OperandMatchResultTy tryParseSVEPredicateVector(OperandVector &Operands);
145 enum AArch64MatchResultTy {
146 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
147 #define GET_OPERAND_DIAGNOSTIC_TYPES
148 #include "AArch64GenAsmMatcher.inc"
152 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
153 const MCInstrInfo &MII, const MCTargetOptions &Options)
154 : MCTargetAsmParser(Options, STI, MII) {
155 IsILP32 = Options.getABIName() == "ilp32";
156 MCAsmParserExtension::Initialize(Parser);
157 MCStreamer &S = getParser().getStreamer();
158 if (S.getTargetStreamer() == nullptr)
159 new AArch64TargetStreamer(S);
161 // Initialize the set of available features.
162 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
165 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
166 SMLoc NameLoc, OperandVector &Operands) override;
167 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
168 bool ParseDirective(AsmToken DirectiveID) override;
169 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
170 unsigned Kind) override;
172 static bool classifySymbolRef(const MCExpr *Expr,
173 AArch64MCExpr::VariantKind &ELFRefKind,
174 MCSymbolRefExpr::VariantKind &DarwinRefKind,
178 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
180 class AArch64Operand : public MCParsedAsmOperand {
199 SMLoc StartLoc, EndLoc;
204 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
214 struct VectorListOp {
217 unsigned NumElements;
218 unsigned ElementKind;
221 struct VectorIndexOp {
229 struct ShiftedImmOp {
231 unsigned ShiftAmount;
235 AArch64CC::CondCode Code;
239 unsigned Val; // Encoded 8-bit representation.
245 unsigned Val; // Not the enum since not all values have names.
253 uint32_t PStateField;
272 struct ShiftExtendOp {
273 AArch64_AM::ShiftExtendType Type;
275 bool HasExplicitAmount;
285 struct VectorListOp VectorList;
286 struct VectorIndexOp VectorIndex;
288 struct ShiftedImmOp ShiftedImm;
289 struct CondCodeOp CondCode;
290 struct FPImmOp FPImm;
291 struct BarrierOp Barrier;
292 struct SysRegOp SysReg;
293 struct SysCRImmOp SysCRImm;
294 struct PrefetchOp Prefetch;
295 struct PSBHintOp PSBHint;
296 struct ShiftExtendOp ShiftExtend;
299 // Keep the MCContext around as the MCExprs may need manipulated during
300 // the add<>Operands() calls.
304 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
306 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
308 StartLoc = o.StartLoc;
318 ShiftedImm = o.ShiftedImm;
321 CondCode = o.CondCode;
333 VectorList = o.VectorList;
336 VectorIndex = o.VectorIndex;
342 SysCRImm = o.SysCRImm;
345 Prefetch = o.Prefetch;
351 ShiftExtend = o.ShiftExtend;
356 /// getStartLoc - Get the location of the first token of this operand.
357 SMLoc getStartLoc() const override { return StartLoc; }
358 /// getEndLoc - Get the location of the last token of this operand.
359 SMLoc getEndLoc() const override { return EndLoc; }
361 StringRef getToken() const {
362 assert(Kind == k_Token && "Invalid access!");
363 return StringRef(Tok.Data, Tok.Length);
366 bool isTokenSuffix() const {
367 assert(Kind == k_Token && "Invalid access!");
371 const MCExpr *getImm() const {
372 assert(Kind == k_Immediate && "Invalid access!");
376 const MCExpr *getShiftedImmVal() const {
377 assert(Kind == k_ShiftedImm && "Invalid access!");
378 return ShiftedImm.Val;
381 unsigned getShiftedImmShift() const {
382 assert(Kind == k_ShiftedImm && "Invalid access!");
383 return ShiftedImm.ShiftAmount;
386 AArch64CC::CondCode getCondCode() const {
387 assert(Kind == k_CondCode && "Invalid access!");
388 return CondCode.Code;
391 unsigned getFPImm() const {
392 assert(Kind == k_FPImm && "Invalid access!");
396 unsigned getBarrier() const {
397 assert(Kind == k_Barrier && "Invalid access!");
401 StringRef getBarrierName() const {
402 assert(Kind == k_Barrier && "Invalid access!");
403 return StringRef(Barrier.Data, Barrier.Length);
406 unsigned getReg() const override {
407 assert(Kind == k_Register && "Invalid access!");
411 unsigned getVectorListStart() const {
412 assert(Kind == k_VectorList && "Invalid access!");
413 return VectorList.RegNum;
416 unsigned getVectorListCount() const {
417 assert(Kind == k_VectorList && "Invalid access!");
418 return VectorList.Count;
421 unsigned getVectorIndex() const {
422 assert(Kind == k_VectorIndex && "Invalid access!");
423 return VectorIndex.Val;
426 StringRef getSysReg() const {
427 assert(Kind == k_SysReg && "Invalid access!");
428 return StringRef(SysReg.Data, SysReg.Length);
431 unsigned getSysCR() const {
432 assert(Kind == k_SysCR && "Invalid access!");
436 unsigned getPrefetch() const {
437 assert(Kind == k_Prefetch && "Invalid access!");
441 unsigned getPSBHint() const {
442 assert(Kind == k_PSBHint && "Invalid access!");
446 StringRef getPSBHintName() const {
447 assert(Kind == k_PSBHint && "Invalid access!");
448 return StringRef(PSBHint.Data, PSBHint.Length);
451 StringRef getPrefetchName() const {
452 assert(Kind == k_Prefetch && "Invalid access!");
453 return StringRef(Prefetch.Data, Prefetch.Length);
456 AArch64_AM::ShiftExtendType getShiftExtendType() const {
457 assert(Kind == k_ShiftExtend && "Invalid access!");
458 return ShiftExtend.Type;
461 unsigned getShiftExtendAmount() const {
462 assert(Kind == k_ShiftExtend && "Invalid access!");
463 return ShiftExtend.Amount;
466 bool hasShiftExtendAmount() const {
467 assert(Kind == k_ShiftExtend && "Invalid access!");
468 return ShiftExtend.HasExplicitAmount;
471 bool isImm() const override { return Kind == k_Immediate; }
472 bool isMem() const override { return false; }
473 bool isSImm9() const {
476 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
479 int64_t Val = MCE->getValue();
480 return (Val >= -256 && Val < 256);
482 bool isSImm10s8() const {
485 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
488 int64_t Val = MCE->getValue();
489 return (Val >= -4096 && Val < 4089 && (Val & 7) == 0);
491 bool isSImm7s4() const {
494 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
497 int64_t Val = MCE->getValue();
498 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
500 bool isSImm7s8() const {
503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
506 int64_t Val = MCE->getValue();
507 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
509 bool isSImm7s16() const {
512 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
515 int64_t Val = MCE->getValue();
516 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
519 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
520 AArch64MCExpr::VariantKind ELFRefKind;
521 MCSymbolRefExpr::VariantKind DarwinRefKind;
523 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
525 // If we don't understand the expression, assume the best and
526 // let the fixup and relocation code deal with it.
530 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
531 ELFRefKind == AArch64MCExpr::VK_LO12 ||
532 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
533 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
534 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
535 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
536 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
537 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
538 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
539 // Note that we don't range-check the addend. It's adjusted modulo page
540 // size when converted, so there is no "out of range" condition when using
542 return Addend >= 0 && (Addend % Scale) == 0;
543 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
544 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
545 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
552 template <int Scale> bool isUImm12Offset() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 return isSymbolicUImm12Offset(getImm(), Scale);
560 int64_t Val = MCE->getValue();
561 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
564 template <int N, int M>
565 bool isImmInRange() const {
568 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
571 int64_t Val = MCE->getValue();
572 return (Val >= N && Val <= M);
575 bool isLogicalImm32() const {
578 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
581 int64_t Val = MCE->getValue();
582 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
585 return AArch64_AM::isLogicalImmediate(Val, 32);
588 bool isLogicalImm64() const {
591 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
594 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
597 bool isLogicalImm32Not() const {
600 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
603 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
604 return AArch64_AM::isLogicalImmediate(Val, 32);
607 bool isLogicalImm64Not() const {
610 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
613 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
616 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
618 bool isAddSubImm() const {
619 if (!isShiftedImm() && !isImm())
624 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
625 if (isShiftedImm()) {
626 unsigned Shift = ShiftedImm.ShiftAmount;
627 Expr = ShiftedImm.Val;
628 if (Shift != 0 && Shift != 12)
634 AArch64MCExpr::VariantKind ELFRefKind;
635 MCSymbolRefExpr::VariantKind DarwinRefKind;
637 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
638 DarwinRefKind, Addend)) {
639 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
640 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
641 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
642 || ELFRefKind == AArch64MCExpr::VK_LO12
643 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
644 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
645 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
646 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
647 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
648 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
649 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
652 // If it's a constant, it should be a real immediate in range:
653 if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
654 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
656 // If it's an expression, we hope for the best and let the fixup/relocation
657 // code deal with it.
661 bool isAddSubImmNeg() const {
662 if (!isShiftedImm() && !isImm())
667 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
668 if (isShiftedImm()) {
669 unsigned Shift = ShiftedImm.ShiftAmount;
670 Expr = ShiftedImm.Val;
671 if (Shift != 0 && Shift != 12)
676 // Otherwise it should be a real negative immediate in range:
677 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
678 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
681 bool isCondCode() const { return Kind == k_CondCode; }
683 bool isSIMDImmType10() const {
686 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
689 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
693 bool isBranchTarget() const {
696 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
699 int64_t Val = MCE->getValue();
702 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
703 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
707 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
711 AArch64MCExpr::VariantKind ELFRefKind;
712 MCSymbolRefExpr::VariantKind DarwinRefKind;
714 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
715 DarwinRefKind, Addend)) {
718 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
721 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
722 if (ELFRefKind == AllowedModifiers[i])
729 bool isMovZSymbolG3() const {
730 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
733 bool isMovZSymbolG2() const {
734 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
735 AArch64MCExpr::VK_TPREL_G2,
736 AArch64MCExpr::VK_DTPREL_G2});
739 bool isMovZSymbolG1() const {
740 return isMovWSymbol({
741 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
742 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
743 AArch64MCExpr::VK_DTPREL_G1,
747 bool isMovZSymbolG0() const {
748 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
749 AArch64MCExpr::VK_TPREL_G0,
750 AArch64MCExpr::VK_DTPREL_G0});
753 bool isMovKSymbolG3() const {
754 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
757 bool isMovKSymbolG2() const {
758 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
761 bool isMovKSymbolG1() const {
762 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
763 AArch64MCExpr::VK_TPREL_G1_NC,
764 AArch64MCExpr::VK_DTPREL_G1_NC});
767 bool isMovKSymbolG0() const {
769 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
770 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
773 template<int RegWidth, int Shift>
774 bool isMOVZMovAlias() const {
775 if (!isImm()) return false;
777 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778 if (!CE) return false;
779 uint64_t Value = CE->getValue();
781 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
784 template<int RegWidth, int Shift>
785 bool isMOVNMovAlias() const {
786 if (!isImm()) return false;
788 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
789 if (!CE) return false;
790 uint64_t Value = CE->getValue();
792 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
795 bool isFPImm() const { return Kind == k_FPImm; }
796 bool isBarrier() const { return Kind == k_Barrier; }
797 bool isSysReg() const { return Kind == k_SysReg; }
799 bool isMRSSystemRegister() const {
800 if (!isSysReg()) return false;
802 return SysReg.MRSReg != -1U;
805 bool isMSRSystemRegister() const {
806 if (!isSysReg()) return false;
807 return SysReg.MSRReg != -1U;
810 bool isSystemPStateFieldWithImm0_1() const {
811 if (!isSysReg()) return false;
812 return (SysReg.PStateField == AArch64PState::PAN ||
813 SysReg.PStateField == AArch64PState::UAO);
816 bool isSystemPStateFieldWithImm0_15() const {
817 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
818 return SysReg.PStateField != -1U;
821 bool isReg() const override {
822 return Kind == k_Register;
825 bool isScalarReg() const {
826 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
829 bool isNeonVectorReg() const {
830 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
833 bool isNeonVectorRegLo() const {
834 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
835 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
839 template <unsigned Class> bool isSVEVectorReg() const {
842 case AArch64::ZPRRegClassID:
843 RK = RegKind::SVEDataVector;
845 case AArch64::PPRRegClassID:
846 case AArch64::PPR_3bRegClassID:
847 RK = RegKind::SVEPredicateVector;
850 llvm_unreachable("Unsupport register class");
853 return (Kind == k_Register && Reg.Kind == RK) &&
854 AArch64MCRegisterClasses[Class].contains(getReg());
857 template <int ElementWidth, unsigned Class>
858 bool isSVEVectorRegOfWidth() const {
859 return isSVEVectorReg<Class>() &&
860 (ElementWidth == -1 || Reg.ElementWidth == ElementWidth);
863 bool isGPR32as64() const {
864 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
865 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
868 bool isWSeqPair() const {
869 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
870 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
874 bool isXSeqPair() const {
875 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
876 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
880 bool isGPR64sp0() const {
881 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
882 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
885 template<int64_t Angle, int64_t Remainder>
886 bool isComplexRotation() const {
887 if (!isImm()) return false;
889 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
890 if (!CE) return false;
891 uint64_t Value = CE->getValue();
893 return (Value % Angle == Remainder && Value <= 270);
896 /// Is this a vector list with the type implicit (presumably attached to the
897 /// instruction itself)?
898 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
899 return Kind == k_VectorList && VectorList.Count == NumRegs &&
900 !VectorList.ElementKind;
903 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
904 bool isTypedVectorList() const {
905 if (Kind != k_VectorList)
907 if (VectorList.Count != NumRegs)
909 if (VectorList.ElementKind != ElementKind)
911 return VectorList.NumElements == NumElements;
914 bool isVectorIndex1() const {
915 return Kind == k_VectorIndex && VectorIndex.Val == 1;
918 bool isVectorIndexB() const {
919 return Kind == k_VectorIndex && VectorIndex.Val < 16;
922 bool isVectorIndexH() const {
923 return Kind == k_VectorIndex && VectorIndex.Val < 8;
926 bool isVectorIndexS() const {
927 return Kind == k_VectorIndex && VectorIndex.Val < 4;
930 bool isVectorIndexD() const {
931 return Kind == k_VectorIndex && VectorIndex.Val < 2;
934 bool isToken() const override { return Kind == k_Token; }
936 bool isTokenEqual(StringRef Str) const {
937 return Kind == k_Token && getToken() == Str;
939 bool isSysCR() const { return Kind == k_SysCR; }
940 bool isPrefetch() const { return Kind == k_Prefetch; }
941 bool isPSBHint() const { return Kind == k_PSBHint; }
942 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
943 bool isShifter() const {
944 if (!isShiftExtend())
947 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
948 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
949 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
950 ST == AArch64_AM::MSL);
952 bool isExtend() const {
953 if (!isShiftExtend())
956 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
957 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
958 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
959 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
960 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
961 ET == AArch64_AM::LSL) &&
962 getShiftExtendAmount() <= 4;
965 bool isExtend64() const {
968 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
969 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
970 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
973 bool isExtendLSL64() const {
976 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
977 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
978 ET == AArch64_AM::LSL) &&
979 getShiftExtendAmount() <= 4;
982 template<int Width> bool isMemXExtend() const {
985 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
986 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
987 (getShiftExtendAmount() == Log2_32(Width / 8) ||
988 getShiftExtendAmount() == 0);
991 template<int Width> bool isMemWExtend() const {
994 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
995 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
996 (getShiftExtendAmount() == Log2_32(Width / 8) ||
997 getShiftExtendAmount() == 0);
1000 template <unsigned width>
1001 bool isArithmeticShifter() const {
1005 // An arithmetic shifter is LSL, LSR, or ASR.
1006 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1007 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1008 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1011 template <unsigned width>
1012 bool isLogicalShifter() const {
1016 // A logical shifter is LSL, LSR, ASR or ROR.
1017 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1018 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1019 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1020 getShiftExtendAmount() < width;
1023 bool isMovImm32Shifter() const {
1027 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1028 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1029 if (ST != AArch64_AM::LSL)
1031 uint64_t Val = getShiftExtendAmount();
1032 return (Val == 0 || Val == 16);
1035 bool isMovImm64Shifter() const {
1039 // A MOVi shifter is LSL of 0 or 16.
1040 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1041 if (ST != AArch64_AM::LSL)
1043 uint64_t Val = getShiftExtendAmount();
1044 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1047 bool isLogicalVecShifter() const {
1051 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1052 unsigned Shift = getShiftExtendAmount();
1053 return getShiftExtendType() == AArch64_AM::LSL &&
1054 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1057 bool isLogicalVecHalfWordShifter() const {
1058 if (!isLogicalVecShifter())
1061 // A logical vector shifter is a left shift by 0 or 8.
1062 unsigned Shift = getShiftExtendAmount();
1063 return getShiftExtendType() == AArch64_AM::LSL &&
1064 (Shift == 0 || Shift == 8);
1067 bool isMoveVecShifter() const {
1068 if (!isShiftExtend())
1071 // A logical vector shifter is a left shift by 8 or 16.
1072 unsigned Shift = getShiftExtendAmount();
1073 return getShiftExtendType() == AArch64_AM::MSL &&
1074 (Shift == 8 || Shift == 16);
1077 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1078 // to LDUR/STUR when the offset is not legal for the former but is for
1079 // the latter. As such, in addition to checking for being a legal unscaled
1080 // address, also check that it is not a legal scaled address. This avoids
1081 // ambiguity in the matcher.
1083 bool isSImm9OffsetFB() const {
1084 return isSImm9() && !isUImm12Offset<Width / 8>();
1087 bool isAdrpLabel() const {
1088 // Validation was handled during parsing, so we just sanity check that
1089 // something didn't go haywire.
1093 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1094 int64_t Val = CE->getValue();
1095 int64_t Min = - (4096 * (1LL << (21 - 1)));
1096 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1097 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1103 bool isAdrLabel() const {
1104 // Validation was handled during parsing, so we just sanity check that
1105 // something didn't go haywire.
1109 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1110 int64_t Val = CE->getValue();
1111 int64_t Min = - (1LL << (21 - 1));
1112 int64_t Max = ((1LL << (21 - 1)) - 1);
1113 return Val >= Min && Val <= Max;
1119 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1120 // Add as immediates when possible. Null MCExpr = 0.
1122 Inst.addOperand(MCOperand::createImm(0));
1123 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1124 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1126 Inst.addOperand(MCOperand::createExpr(Expr));
1129 void addRegOperands(MCInst &Inst, unsigned N) const {
1130 assert(N == 1 && "Invalid number of operands!");
1131 Inst.addOperand(MCOperand::createReg(getReg()));
1134 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1135 assert(N == 1 && "Invalid number of operands!");
1137 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1139 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1140 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1141 RI->getEncodingValue(getReg()));
1143 Inst.addOperand(MCOperand::createReg(Reg));
1146 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1147 assert(N == 1 && "Invalid number of operands!");
1149 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1150 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1153 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1154 assert(N == 1 && "Invalid number of operands!");
1156 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1157 Inst.addOperand(MCOperand::createReg(getReg()));
1160 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1161 assert(N == 1 && "Invalid number of operands!");
1162 Inst.addOperand(MCOperand::createReg(getReg()));
1165 template <unsigned NumRegs>
1166 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!");
1168 static const unsigned FirstRegs[] = { AArch64::D0,
1171 AArch64::D0_D1_D2_D3 };
1172 unsigned FirstReg = FirstRegs[NumRegs - 1];
1175 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1178 template <unsigned NumRegs>
1179 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1180 assert(N == 1 && "Invalid number of operands!");
1181 static const unsigned FirstRegs[] = { AArch64::Q0,
1184 AArch64::Q0_Q1_Q2_Q3 };
1185 unsigned FirstReg = FirstRegs[NumRegs - 1];
1188 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1191 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1196 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1198 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1201 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1202 assert(N == 1 && "Invalid number of operands!");
1203 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1206 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1207 assert(N == 1 && "Invalid number of operands!");
1208 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1211 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1212 assert(N == 1 && "Invalid number of operands!");
1213 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1216 void addImmOperands(MCInst &Inst, unsigned N) const {
1217 assert(N == 1 && "Invalid number of operands!");
1218 // If this is a pageoff symrefexpr with an addend, adjust the addend
1219 // to be only the page-offset portion. Otherwise, just add the expr
1221 addExpr(Inst, getImm());
1224 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1225 assert(N == 2 && "Invalid number of operands!");
1226 if (isShiftedImm()) {
1227 addExpr(Inst, getShiftedImmVal());
1228 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1230 addExpr(Inst, getImm());
1231 Inst.addOperand(MCOperand::createImm(0));
1235 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1236 assert(N == 2 && "Invalid number of operands!");
1238 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1239 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1240 int64_t Val = -CE->getValue();
1241 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1243 Inst.addOperand(MCOperand::createImm(Val));
1244 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1247 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1248 assert(N == 1 && "Invalid number of operands!");
1249 Inst.addOperand(MCOperand::createImm(getCondCode()));
1252 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1256 addExpr(Inst, getImm());
1258 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1261 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1262 addImmOperands(Inst, N);
1266 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1267 assert(N == 1 && "Invalid number of operands!");
1268 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1271 Inst.addOperand(MCOperand::createExpr(getImm()));
1274 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1277 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1278 assert(N == 1 && "Invalid number of operands!");
1279 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1280 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1283 void addSImm10s8Operands(MCInst &Inst, unsigned N) const {
1284 assert(N == 1 && "Invalid number of operands!");
1285 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1286 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1289 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1295 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1301 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1307 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1313 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1319 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1325 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1331 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1334 assert(MCE && "Invalid constant immediate operand!");
1335 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1338 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1339 assert(N == 1 && "Invalid number of operands!");
1340 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1341 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1344 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1345 assert(N == 1 && "Invalid number of operands!");
1346 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1347 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1350 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1351 assert(N == 1 && "Invalid number of operands!");
1352 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1353 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1356 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1357 assert(N == 1 && "Invalid number of operands!");
1358 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1359 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1362 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1363 assert(N == 1 && "Invalid number of operands!");
1364 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1365 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1368 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1369 assert(N == 1 && "Invalid number of operands!");
1370 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1371 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1374 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1375 assert(N == 1 && "Invalid number of operands!");
1376 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1377 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1380 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1383 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1386 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1387 assert(N == 1 && "Invalid number of operands!");
1388 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1389 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1392 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1393 assert(N == 1 && "Invalid number of operands!");
1394 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1395 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1398 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1399 assert(N == 1 && "Invalid number of operands!");
1400 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1402 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1403 Inst.addOperand(MCOperand::createImm(encoding));
1406 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1407 assert(N == 1 && "Invalid number of operands!");
1408 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1409 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1410 Inst.addOperand(MCOperand::createImm(encoding));
1413 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1414 assert(N == 1 && "Invalid number of operands!");
1415 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1416 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1417 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1418 Inst.addOperand(MCOperand::createImm(encoding));
1421 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1422 assert(N == 1 && "Invalid number of operands!");
1423 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1425 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1426 Inst.addOperand(MCOperand::createImm(encoding));
1429 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1430 assert(N == 1 && "Invalid number of operands!");
1431 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1432 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1433 Inst.addOperand(MCOperand::createImm(encoding));
1436 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1437 // Branch operands don't encode the low bits, so shift them off
1438 // here. If it's a label, however, just put it on directly as there's
1439 // not enough information now to do anything.
1440 assert(N == 1 && "Invalid number of operands!");
1441 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1443 addExpr(Inst, getImm());
1446 assert(MCE && "Invalid constant immediate operand!");
1447 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1450 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1451 // Branch operands don't encode the low bits, so shift them off
1452 // here. If it's a label, however, just put it on directly as there's
1453 // not enough information now to do anything.
1454 assert(N == 1 && "Invalid number of operands!");
1455 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1457 addExpr(Inst, getImm());
1460 assert(MCE && "Invalid constant immediate operand!");
1461 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1464 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1465 // Branch operands don't encode the low bits, so shift them off
1466 // here. If it's a label, however, just put it on directly as there's
1467 // not enough information now to do anything.
1468 assert(N == 1 && "Invalid number of operands!");
1469 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1471 addExpr(Inst, getImm());
1474 assert(MCE && "Invalid constant immediate operand!");
1475 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1478 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1479 assert(N == 1 && "Invalid number of operands!");
1480 Inst.addOperand(MCOperand::createImm(getFPImm()));
1483 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1484 assert(N == 1 && "Invalid number of operands!");
1485 Inst.addOperand(MCOperand::createImm(getBarrier()));
1488 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1489 assert(N == 1 && "Invalid number of operands!");
1491 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1494 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1495 assert(N == 1 && "Invalid number of operands!");
1497 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1500 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1501 assert(N == 1 && "Invalid number of operands!");
1503 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1506 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1507 assert(N == 1 && "Invalid number of operands!");
1509 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1512 void addSysCROperands(MCInst &Inst, unsigned N) const {
1513 assert(N == 1 && "Invalid number of operands!");
1514 Inst.addOperand(MCOperand::createImm(getSysCR()));
1517 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1518 assert(N == 1 && "Invalid number of operands!");
1519 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1522 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1523 assert(N == 1 && "Invalid number of operands!");
1524 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1527 void addShifterOperands(MCInst &Inst, unsigned N) const {
1528 assert(N == 1 && "Invalid number of operands!");
1530 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1531 Inst.addOperand(MCOperand::createImm(Imm));
1534 void addExtendOperands(MCInst &Inst, unsigned N) const {
1535 assert(N == 1 && "Invalid number of operands!");
1536 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1537 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1538 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1539 Inst.addOperand(MCOperand::createImm(Imm));
1542 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1543 assert(N == 1 && "Invalid number of operands!");
1544 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1545 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1546 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1547 Inst.addOperand(MCOperand::createImm(Imm));
1550 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1551 assert(N == 2 && "Invalid number of operands!");
1552 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1553 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1554 Inst.addOperand(MCOperand::createImm(IsSigned));
1555 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1558 // For 8-bit load/store instructions with a register offset, both the
1559 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1560 // they're disambiguated by whether the shift was explicit or implicit rather
1562 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1563 assert(N == 2 && "Invalid number of operands!");
1564 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1565 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1566 Inst.addOperand(MCOperand::createImm(IsSigned));
1567 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1571 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1572 assert(N == 1 && "Invalid number of operands!");
1574 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1575 uint64_t Value = CE->getValue();
1576 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1580 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1581 assert(N == 1 && "Invalid number of operands!");
1583 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1584 uint64_t Value = CE->getValue();
1585 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1588 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
1589 assert(N == 1 && "Invalid number of operands!");
1590 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1591 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
1594 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
1595 assert(N == 1 && "Invalid number of operands!");
1596 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1597 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
1600 void print(raw_ostream &OS) const override;
1602 static std::unique_ptr<AArch64Operand>
1603 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1604 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1605 Op->Tok.Data = Str.data();
1606 Op->Tok.Length = Str.size();
1607 Op->Tok.IsSuffix = IsSuffix;
1613 static std::unique_ptr<AArch64Operand>
1614 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx) {
1615 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1616 Op->Reg.RegNum = RegNum;
1617 Op->Reg.Kind = Kind;
1623 static std::unique_ptr<AArch64Operand>
1624 CreateReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
1625 SMLoc S, SMLoc E, MCContext &Ctx) {
1626 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1627 Op->Reg.RegNum = RegNum;
1628 Op->Reg.ElementWidth = ElementWidth;
1629 Op->Reg.Kind = Kind;
1635 static std::unique_ptr<AArch64Operand>
1636 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1637 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1638 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1639 Op->VectorList.RegNum = RegNum;
1640 Op->VectorList.Count = Count;
1641 Op->VectorList.NumElements = NumElements;
1642 Op->VectorList.ElementKind = ElementKind;
1648 static std::unique_ptr<AArch64Operand>
1649 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1650 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1651 Op->VectorIndex.Val = Idx;
1657 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1658 SMLoc E, MCContext &Ctx) {
1659 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1666 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1667 unsigned ShiftAmount,
1670 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1671 Op->ShiftedImm .Val = Val;
1672 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1678 static std::unique_ptr<AArch64Operand>
1679 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1680 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1681 Op->CondCode.Code = Code;
1687 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1689 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1690 Op->FPImm.Val = Val;
1696 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1700 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1701 Op->Barrier.Val = Val;
1702 Op->Barrier.Data = Str.data();
1703 Op->Barrier.Length = Str.size();
1709 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1712 uint32_t PStateField,
1714 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1715 Op->SysReg.Data = Str.data();
1716 Op->SysReg.Length = Str.size();
1717 Op->SysReg.MRSReg = MRSReg;
1718 Op->SysReg.MSRReg = MSRReg;
1719 Op->SysReg.PStateField = PStateField;
1725 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1726 SMLoc E, MCContext &Ctx) {
1727 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1728 Op->SysCRImm.Val = Val;
1734 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1738 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1739 Op->Prefetch.Val = Val;
1740 Op->Barrier.Data = Str.data();
1741 Op->Barrier.Length = Str.size();
1747 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1751 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1752 Op->PSBHint.Val = Val;
1753 Op->PSBHint.Data = Str.data();
1754 Op->PSBHint.Length = Str.size();
1760 static std::unique_ptr<AArch64Operand>
1761 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1762 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1763 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1764 Op->ShiftExtend.Type = ShOp;
1765 Op->ShiftExtend.Amount = Val;
1766 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1773 } // end anonymous namespace.
1775 void AArch64Operand::print(raw_ostream &OS) const {
1778 OS << "<fpimm " << getFPImm() << "("
1779 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1782 StringRef Name = getBarrierName();
1784 OS << "<barrier " << Name << ">";
1786 OS << "<barrier invalid #" << getBarrier() << ">";
1792 case k_ShiftedImm: {
1793 unsigned Shift = getShiftedImmShift();
1794 OS << "<shiftedimm ";
1795 OS << *getShiftedImmVal();
1796 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1800 OS << "<condcode " << getCondCode() << ">";
1803 OS << "<register " << getReg() << ">";
1805 case k_VectorList: {
1806 OS << "<vectorlist ";
1807 unsigned Reg = getVectorListStart();
1808 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1809 OS << Reg + i << " ";
1814 OS << "<vectorindex " << getVectorIndex() << ">";
1817 OS << "<sysreg: " << getSysReg() << '>';
1820 OS << "'" << getToken() << "'";
1823 OS << "c" << getSysCR();
1826 StringRef Name = getPrefetchName();
1828 OS << "<prfop " << Name << ">";
1830 OS << "<prfop invalid #" << getPrefetch() << ">";
1834 OS << getPSBHintName();
1837 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1838 << getShiftExtendAmount();
1839 if (!hasShiftExtendAmount())
1846 /// @name Auto-generated Match Functions
1849 static unsigned MatchRegisterName(StringRef Name);
1853 static unsigned MatchNeonVectorRegName(StringRef Name) {
1854 return StringSwitch<unsigned>(Name.lower())
1855 .Case("v0", AArch64::Q0)
1856 .Case("v1", AArch64::Q1)
1857 .Case("v2", AArch64::Q2)
1858 .Case("v3", AArch64::Q3)
1859 .Case("v4", AArch64::Q4)
1860 .Case("v5", AArch64::Q5)
1861 .Case("v6", AArch64::Q6)
1862 .Case("v7", AArch64::Q7)
1863 .Case("v8", AArch64::Q8)
1864 .Case("v9", AArch64::Q9)
1865 .Case("v10", AArch64::Q10)
1866 .Case("v11", AArch64::Q11)
1867 .Case("v12", AArch64::Q12)
1868 .Case("v13", AArch64::Q13)
1869 .Case("v14", AArch64::Q14)
1870 .Case("v15", AArch64::Q15)
1871 .Case("v16", AArch64::Q16)
1872 .Case("v17", AArch64::Q17)
1873 .Case("v18", AArch64::Q18)
1874 .Case("v19", AArch64::Q19)
1875 .Case("v20", AArch64::Q20)
1876 .Case("v21", AArch64::Q21)
1877 .Case("v22", AArch64::Q22)
1878 .Case("v23", AArch64::Q23)
1879 .Case("v24", AArch64::Q24)
1880 .Case("v25", AArch64::Q25)
1881 .Case("v26", AArch64::Q26)
1882 .Case("v27", AArch64::Q27)
1883 .Case("v28", AArch64::Q28)
1884 .Case("v29", AArch64::Q29)
1885 .Case("v30", AArch64::Q30)
1886 .Case("v31", AArch64::Q31)
1890 static bool isValidVectorKind(StringRef Name) {
1891 return StringSwitch<bool>(Name.lower())
1901 // Accept the width neutral ones, too, for verbose syntax. If those
1902 // aren't used in the right places, the token operand won't match so
1903 // all will work out.
1908 // Needed for fp16 scalar pairwise reductions
1910 // another special case for the ARMv8.2a dot product operand
1915 static unsigned matchSVEDataVectorRegName(StringRef Name) {
1916 return StringSwitch<unsigned>(Name.lower())
1917 .Case("z0", AArch64::Z0)
1918 .Case("z1", AArch64::Z1)
1919 .Case("z2", AArch64::Z2)
1920 .Case("z3", AArch64::Z3)
1921 .Case("z4", AArch64::Z4)
1922 .Case("z5", AArch64::Z5)
1923 .Case("z6", AArch64::Z6)
1924 .Case("z7", AArch64::Z7)
1925 .Case("z8", AArch64::Z8)
1926 .Case("z9", AArch64::Z9)
1927 .Case("z10", AArch64::Z10)
1928 .Case("z11", AArch64::Z11)
1929 .Case("z12", AArch64::Z12)
1930 .Case("z13", AArch64::Z13)
1931 .Case("z14", AArch64::Z14)
1932 .Case("z15", AArch64::Z15)
1933 .Case("z16", AArch64::Z16)
1934 .Case("z17", AArch64::Z17)
1935 .Case("z18", AArch64::Z18)
1936 .Case("z19", AArch64::Z19)
1937 .Case("z20", AArch64::Z20)
1938 .Case("z21", AArch64::Z21)
1939 .Case("z22", AArch64::Z22)
1940 .Case("z23", AArch64::Z23)
1941 .Case("z24", AArch64::Z24)
1942 .Case("z25", AArch64::Z25)
1943 .Case("z26", AArch64::Z26)
1944 .Case("z27", AArch64::Z27)
1945 .Case("z28", AArch64::Z28)
1946 .Case("z29", AArch64::Z29)
1947 .Case("z30", AArch64::Z30)
1948 .Case("z31", AArch64::Z31)
1952 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
1953 return StringSwitch<unsigned>(Name.lower())
1954 .Case("p0", AArch64::P0)
1955 .Case("p1", AArch64::P1)
1956 .Case("p2", AArch64::P2)
1957 .Case("p3", AArch64::P3)
1958 .Case("p4", AArch64::P4)
1959 .Case("p5", AArch64::P5)
1960 .Case("p6", AArch64::P6)
1961 .Case("p7", AArch64::P7)
1962 .Case("p8", AArch64::P8)
1963 .Case("p9", AArch64::P9)
1964 .Case("p10", AArch64::P10)
1965 .Case("p11", AArch64::P11)
1966 .Case("p12", AArch64::P12)
1967 .Case("p13", AArch64::P13)
1968 .Case("p14", AArch64::P14)
1969 .Case("p15", AArch64::P15)
1973 static bool isValidSVEKind(StringRef Name) {
1974 return StringSwitch<bool>(Name.lower())
1983 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1984 char &ElementKind) {
1985 assert(isValidVectorKind(Name));
1987 ElementKind = Name.lower()[Name.size() - 1];
1990 if (Name.size() == 2)
1993 // Parse the lane count
1994 Name = Name.drop_front();
1995 while (isdigit(Name.front())) {
1996 NumElements = 10 * NumElements + (Name.front() - '0');
1997 Name = Name.drop_front();
2001 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2003 StartLoc = getLoc();
2004 RegNo = tryParseRegister();
2005 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2006 return (RegNo == (unsigned)-1);
2009 // Matches a register name or register alias previously defined by '.req'
2010 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2012 unsigned RegNum = 0;
2013 if ((RegNum = matchSVEDataVectorRegName(Name)))
2014 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2016 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2017 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2019 if ((RegNum = MatchNeonVectorRegName(Name)))
2020 return Kind == RegKind::NeonVector ? RegNum : 0;
2022 // The parsed register must be of RegKind Scalar
2023 if ((RegNum = MatchRegisterName(Name)))
2024 return Kind == RegKind::Scalar ? RegNum : 0;
2027 // Check for aliases registered via .req. Canonicalize to lower case.
2028 // That's more consistent since register names are case insensitive, and
2029 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2030 auto Entry = RegisterReqs.find(Name.lower());
2031 if (Entry == RegisterReqs.end())
2034 // set RegNum if the match is the right kind of register
2035 if (Kind == Entry->getValue().first)
2036 RegNum = Entry->getValue().second;
2041 /// tryParseRegister - Try to parse a register name. The token must be an
2042 /// Identifier when called, and if it is a register name the token is eaten and
2043 /// the register is added to the operand list.
2044 int AArch64AsmParser::tryParseRegister() {
2045 MCAsmParser &Parser = getParser();
2046 const AsmToken &Tok = Parser.getTok();
2047 if (Tok.isNot(AsmToken::Identifier))
2050 std::string lowerCase = Tok.getString().lower();
2051 unsigned RegNum = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2053 // Also handle a few aliases of registers.
2055 RegNum = StringSwitch<unsigned>(lowerCase)
2056 .Case("fp", AArch64::FP)
2057 .Case("lr", AArch64::LR)
2058 .Case("x31", AArch64::XZR)
2059 .Case("w31", AArch64::WZR)
2065 Parser.Lex(); // Eat identifier token.
2069 /// tryMatchVectorRegister - Try to parse a vector register name with optional
2070 /// kind specifier. If it is a register specifier, eat the token and return it.
2071 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
2072 MCAsmParser &Parser = getParser();
2073 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2074 TokError("vector register expected");
2078 StringRef Name = Parser.getTok().getString();
2079 // If there is a kind specifier, it's separated from the register name by
2081 size_t Start = 0, Next = Name.find('.');
2082 StringRef Head = Name.slice(Start, Next);
2083 unsigned RegNum = matchRegisterNameAlias(Head, RegKind::NeonVector);
2086 if (Next != StringRef::npos) {
2087 Kind = Name.slice(Next, StringRef::npos);
2088 if (!isValidVectorKind(Kind)) {
2089 TokError("invalid vector kind qualifier");
2093 Parser.Lex(); // Eat the register token.
2098 TokError("vector register expected");
2102 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2103 OperandMatchResultTy
2104 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2105 MCAsmParser &Parser = getParser();
2108 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2109 Error(S, "Expected cN operand where 0 <= N <= 15");
2110 return MatchOperand_ParseFail;
2113 StringRef Tok = Parser.getTok().getIdentifier();
2114 if (Tok[0] != 'c' && Tok[0] != 'C') {
2115 Error(S, "Expected cN operand where 0 <= N <= 15");
2116 return MatchOperand_ParseFail;
2120 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2121 if (BadNum || CRNum > 15) {
2122 Error(S, "Expected cN operand where 0 <= N <= 15");
2123 return MatchOperand_ParseFail;
2126 Parser.Lex(); // Eat identifier token.
2128 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2129 return MatchOperand_Success;
2132 /// tryParsePrefetch - Try to parse a prefetch operand.
2133 OperandMatchResultTy
2134 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2135 MCAsmParser &Parser = getParser();
2137 const AsmToken &Tok = Parser.getTok();
2138 // Either an identifier for named values or a 5-bit immediate.
2139 // Eat optional hash.
2140 if (parseOptionalToken(AsmToken::Hash) ||
2141 Tok.is(AsmToken::Integer)) {
2142 const MCExpr *ImmVal;
2143 if (getParser().parseExpression(ImmVal))
2144 return MatchOperand_ParseFail;
2146 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2148 TokError("immediate value expected for prefetch operand");
2149 return MatchOperand_ParseFail;
2151 unsigned prfop = MCE->getValue();
2153 TokError("prefetch operand out of range, [0,31] expected");
2154 return MatchOperand_ParseFail;
2157 auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
2158 Operands.push_back(AArch64Operand::CreatePrefetch(
2159 prfop, PRFM ? PRFM->Name : "", S, getContext()));
2160 return MatchOperand_Success;
2163 if (Tok.isNot(AsmToken::Identifier)) {
2164 TokError("pre-fetch hint expected");
2165 return MatchOperand_ParseFail;
2168 auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
2170 TokError("pre-fetch hint expected");
2171 return MatchOperand_ParseFail;
2174 Parser.Lex(); // Eat identifier token.
2175 Operands.push_back(AArch64Operand::CreatePrefetch(
2176 PRFM->Encoding, Tok.getString(), S, getContext()));
2177 return MatchOperand_Success;
2180 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2181 OperandMatchResultTy
2182 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2183 MCAsmParser &Parser = getParser();
2185 const AsmToken &Tok = Parser.getTok();
2186 if (Tok.isNot(AsmToken::Identifier)) {
2187 TokError("invalid operand for instruction");
2188 return MatchOperand_ParseFail;
2191 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2193 TokError("invalid operand for instruction");
2194 return MatchOperand_ParseFail;
2197 Parser.Lex(); // Eat identifier token.
2198 Operands.push_back(AArch64Operand::CreatePSBHint(
2199 PSB->Encoding, Tok.getString(), S, getContext()));
2200 return MatchOperand_Success;
2203 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2205 OperandMatchResultTy
2206 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2207 MCAsmParser &Parser = getParser();
2211 if (Parser.getTok().is(AsmToken::Hash)) {
2212 Parser.Lex(); // Eat hash token.
2215 if (parseSymbolicImmVal(Expr))
2216 return MatchOperand_ParseFail;
2218 AArch64MCExpr::VariantKind ELFRefKind;
2219 MCSymbolRefExpr::VariantKind DarwinRefKind;
2221 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2222 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2223 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2224 // No modifier was specified at all; this is the syntax for an ELF basic
2225 // ADRP relocation (unfortunately).
2227 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2228 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2229 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2231 Error(S, "gotpage label reference not allowed an addend");
2232 return MatchOperand_ParseFail;
2233 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2234 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2235 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2236 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2237 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2238 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2239 // The operand must be an @page or @gotpage qualified symbolref.
2240 Error(S, "page or gotpage label reference expected");
2241 return MatchOperand_ParseFail;
2245 // We have either a label reference possibly with addend or an immediate. The
2246 // addend is a raw value here. The linker will adjust it to only reference the
2248 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2249 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2251 return MatchOperand_Success;
2254 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2256 OperandMatchResultTy
2257 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2261 parseOptionalToken(AsmToken::Hash);
2262 if (getParser().parseExpression(Expr))
2263 return MatchOperand_ParseFail;
2265 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2266 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2268 return MatchOperand_Success;
2271 /// tryParseFPImm - A floating point immediate expression operand.
2272 OperandMatchResultTy
2273 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2274 MCAsmParser &Parser = getParser();
2277 bool Hash = parseOptionalToken(AsmToken::Hash);
2279 // Handle negation, as that still comes through as a separate token.
2280 bool isNegative = parseOptionalToken(AsmToken::Minus);
2282 const AsmToken &Tok = Parser.getTok();
2283 if (Tok.is(AsmToken::Real) || Tok.is(AsmToken::Integer)) {
2285 if (Tok.is(AsmToken::Integer) && !isNegative && Tok.getString().startswith("0x")) {
2286 Val = Tok.getIntVal();
2287 if (Val > 255 || Val < 0) {
2288 TokError("encoded floating point value out of range");
2289 return MatchOperand_ParseFail;
2292 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2294 RealVal.changeSign();
2296 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2297 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2299 // Check for out of range values. As an exception we let Zero through,
2300 // but as tokens instead of an FPImm so that it can be matched by the
2301 // appropriate alias if one exists.
2302 if (RealVal.isPosZero()) {
2303 Parser.Lex(); // Eat the token.
2304 Operands.push_back(AArch64Operand::CreateToken("#0", false, S, getContext()));
2305 Operands.push_back(AArch64Operand::CreateToken(".0", false, S, getContext()));
2306 return MatchOperand_Success;
2307 } else if (Val == -1) {
2308 TokError("expected compatible register or floating-point constant");
2309 return MatchOperand_ParseFail;
2312 Parser.Lex(); // Eat the token.
2313 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2314 return MatchOperand_Success;
2318 return MatchOperand_NoMatch;
2320 TokError("invalid floating point immediate");
2321 return MatchOperand_ParseFail;
2324 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2325 OperandMatchResultTy
2326 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2327 MCAsmParser &Parser = getParser();
2330 if (Parser.getTok().is(AsmToken::Hash))
2331 Parser.Lex(); // Eat '#'
2332 else if (Parser.getTok().isNot(AsmToken::Integer))
2333 // Operand should start from # or should be integer, emit error otherwise.
2334 return MatchOperand_NoMatch;
2337 if (parseSymbolicImmVal(Imm))
2338 return MatchOperand_ParseFail;
2339 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2340 uint64_t ShiftAmount = 0;
2341 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2343 int64_t Val = MCE->getValue();
2344 if (Val > 0xfff && (Val & 0xfff) == 0) {
2345 Imm = MCConstantExpr::create(Val >> 12, getContext());
2349 SMLoc E = Parser.getTok().getLoc();
2350 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2352 return MatchOperand_Success;
2358 // The optional operand must be "lsl #N" where N is non-negative.
2359 if (!Parser.getTok().is(AsmToken::Identifier) ||
2360 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2361 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2362 return MatchOperand_ParseFail;
2368 parseOptionalToken(AsmToken::Hash);
2370 if (Parser.getTok().isNot(AsmToken::Integer)) {
2371 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2372 return MatchOperand_ParseFail;
2375 int64_t ShiftAmount = Parser.getTok().getIntVal();
2377 if (ShiftAmount < 0) {
2378 Error(Parser.getTok().getLoc(), "positive shift amount required");
2379 return MatchOperand_ParseFail;
2381 Parser.Lex(); // Eat the number
2383 SMLoc E = Parser.getTok().getLoc();
2384 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2385 S, E, getContext()));
2386 return MatchOperand_Success;
2389 /// parseCondCodeString - Parse a Condition Code string.
2390 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2391 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2392 .Case("eq", AArch64CC::EQ)
2393 .Case("ne", AArch64CC::NE)
2394 .Case("cs", AArch64CC::HS)
2395 .Case("hs", AArch64CC::HS)
2396 .Case("cc", AArch64CC::LO)
2397 .Case("lo", AArch64CC::LO)
2398 .Case("mi", AArch64CC::MI)
2399 .Case("pl", AArch64CC::PL)
2400 .Case("vs", AArch64CC::VS)
2401 .Case("vc", AArch64CC::VC)
2402 .Case("hi", AArch64CC::HI)
2403 .Case("ls", AArch64CC::LS)
2404 .Case("ge", AArch64CC::GE)
2405 .Case("lt", AArch64CC::LT)
2406 .Case("gt", AArch64CC::GT)
2407 .Case("le", AArch64CC::LE)
2408 .Case("al", AArch64CC::AL)
2409 .Case("nv", AArch64CC::NV)
2410 .Default(AArch64CC::Invalid);
2414 /// parseCondCode - Parse a Condition Code operand.
2415 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2416 bool invertCondCode) {
2417 MCAsmParser &Parser = getParser();
2419 const AsmToken &Tok = Parser.getTok();
2420 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2422 StringRef Cond = Tok.getString();
2423 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2424 if (CC == AArch64CC::Invalid)
2425 return TokError("invalid condition code");
2426 Parser.Lex(); // Eat identifier token.
2428 if (invertCondCode) {
2429 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2430 return TokError("condition codes AL and NV are invalid for this instruction");
2431 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2435 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2439 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2440 /// them if present.
2441 OperandMatchResultTy
2442 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2443 MCAsmParser &Parser = getParser();
2444 const AsmToken &Tok = Parser.getTok();
2445 std::string LowerID = Tok.getString().lower();
2446 AArch64_AM::ShiftExtendType ShOp =
2447 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2448 .Case("lsl", AArch64_AM::LSL)
2449 .Case("lsr", AArch64_AM::LSR)
2450 .Case("asr", AArch64_AM::ASR)
2451 .Case("ror", AArch64_AM::ROR)
2452 .Case("msl", AArch64_AM::MSL)
2453 .Case("uxtb", AArch64_AM::UXTB)
2454 .Case("uxth", AArch64_AM::UXTH)
2455 .Case("uxtw", AArch64_AM::UXTW)
2456 .Case("uxtx", AArch64_AM::UXTX)
2457 .Case("sxtb", AArch64_AM::SXTB)
2458 .Case("sxth", AArch64_AM::SXTH)
2459 .Case("sxtw", AArch64_AM::SXTW)
2460 .Case("sxtx", AArch64_AM::SXTX)
2461 .Default(AArch64_AM::InvalidShiftExtend);
2463 if (ShOp == AArch64_AM::InvalidShiftExtend)
2464 return MatchOperand_NoMatch;
2466 SMLoc S = Tok.getLoc();
2469 bool Hash = parseOptionalToken(AsmToken::Hash);
2471 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2472 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2473 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2474 ShOp == AArch64_AM::MSL) {
2475 // We expect a number here.
2476 TokError("expected #imm after shift specifier");
2477 return MatchOperand_ParseFail;
2480 // "extend" type operations don't need an immediate, #0 is implicit.
2481 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2483 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2484 return MatchOperand_Success;
2487 // Make sure we do actually have a number, identifier or a parenthesized
2489 SMLoc E = Parser.getTok().getLoc();
2490 if (!Parser.getTok().is(AsmToken::Integer) &&
2491 !Parser.getTok().is(AsmToken::LParen) &&
2492 !Parser.getTok().is(AsmToken::Identifier)) {
2493 Error(E, "expected integer shift amount");
2494 return MatchOperand_ParseFail;
2497 const MCExpr *ImmVal;
2498 if (getParser().parseExpression(ImmVal))
2499 return MatchOperand_ParseFail;
2501 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2503 Error(E, "expected constant '#imm' after shift specifier");
2504 return MatchOperand_ParseFail;
2507 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2508 Operands.push_back(AArch64Operand::CreateShiftExtend(
2509 ShOp, MCE->getValue(), true, S, E, getContext()));
2510 return MatchOperand_Success;
2513 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2514 if (FBS[AArch64::HasV8_1aOps])
2516 else if (FBS[AArch64::HasV8_2aOps])
2522 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2524 const uint16_t Op2 = Encoding & 7;
2525 const uint16_t Cm = (Encoding & 0x78) >> 3;
2526 const uint16_t Cn = (Encoding & 0x780) >> 7;
2527 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2529 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2532 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2534 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2536 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2537 Expr = MCConstantExpr::create(Op2, getContext());
2539 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2542 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2543 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2544 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2545 OperandVector &Operands) {
2546 if (Name.find('.') != StringRef::npos)
2547 return TokError("invalid operand");
2551 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2553 MCAsmParser &Parser = getParser();
2554 const AsmToken &Tok = Parser.getTok();
2555 StringRef Op = Tok.getString();
2556 SMLoc S = Tok.getLoc();
2558 if (Mnemonic == "ic") {
2559 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2561 return TokError("invalid operand for IC instruction");
2562 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2563 std::string Str("IC " + std::string(IC->Name) + " requires ");
2564 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2565 return TokError(Str.c_str());
2567 createSysAlias(IC->Encoding, Operands, S);
2568 } else if (Mnemonic == "dc") {
2569 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2571 return TokError("invalid operand for DC instruction");
2572 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2573 std::string Str("DC " + std::string(DC->Name) + " requires ");
2574 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2575 return TokError(Str.c_str());
2577 createSysAlias(DC->Encoding, Operands, S);
2578 } else if (Mnemonic == "at") {
2579 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2581 return TokError("invalid operand for AT instruction");
2582 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2583 std::string Str("AT " + std::string(AT->Name) + " requires ");
2584 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2585 return TokError(Str.c_str());
2587 createSysAlias(AT->Encoding, Operands, S);
2588 } else if (Mnemonic == "tlbi") {
2589 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2591 return TokError("invalid operand for TLBI instruction");
2592 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2593 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2594 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2595 return TokError(Str.c_str());
2597 createSysAlias(TLBI->Encoding, Operands, S);
2600 Parser.Lex(); // Eat operand.
2602 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2603 bool HasRegister = false;
2605 // Check for the optional register operand.
2606 if (parseOptionalToken(AsmToken::Comma)) {
2607 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2608 return TokError("expected register operand");
2612 if (ExpectRegister && !HasRegister)
2613 return TokError("specified " + Mnemonic + " op requires a register");
2614 else if (!ExpectRegister && HasRegister)
2615 return TokError("specified " + Mnemonic + " op does not use a register");
2617 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2623 OperandMatchResultTy
2624 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2625 MCAsmParser &Parser = getParser();
2626 const AsmToken &Tok = Parser.getTok();
2628 // Can be either a #imm style literal or an option name
2629 if (parseOptionalToken(AsmToken::Hash) ||
2630 Tok.is(AsmToken::Integer)) {
2631 // Immediate operand.
2632 const MCExpr *ImmVal;
2633 SMLoc ExprLoc = getLoc();
2634 if (getParser().parseExpression(ImmVal))
2635 return MatchOperand_ParseFail;
2636 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2638 Error(ExprLoc, "immediate value expected for barrier operand");
2639 return MatchOperand_ParseFail;
2641 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2642 Error(ExprLoc, "barrier operand out of range");
2643 return MatchOperand_ParseFail;
2645 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2646 Operands.push_back(AArch64Operand::CreateBarrier(
2647 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2648 return MatchOperand_Success;
2651 if (Tok.isNot(AsmToken::Identifier)) {
2652 TokError("invalid operand for instruction");
2653 return MatchOperand_ParseFail;
2656 // The only valid named option for ISB is 'sy'
2657 auto DB = AArch64DB::lookupDBByName(Tok.getString());
2658 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
2659 TokError("'sy' or #imm operand expected");
2660 return MatchOperand_ParseFail;
2662 TokError("invalid barrier option name");
2663 return MatchOperand_ParseFail;
2666 Operands.push_back(AArch64Operand::CreateBarrier(
2667 DB->Encoding, Tok.getString(), getLoc(), getContext()));
2668 Parser.Lex(); // Consume the option
2670 return MatchOperand_Success;
2673 OperandMatchResultTy
2674 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2675 MCAsmParser &Parser = getParser();
2676 const AsmToken &Tok = Parser.getTok();
2678 if (Tok.isNot(AsmToken::Identifier))
2679 return MatchOperand_NoMatch;
2682 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2683 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2684 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2685 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2687 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2689 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2690 unsigned PStateImm = -1;
2691 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2692 PStateImm = PState->Encoding;
2695 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2696 PStateImm, getContext()));
2697 Parser.Lex(); // Eat identifier
2699 return MatchOperand_Success;
2702 /// tryParseNeonVectorRegister - Parse a vector register operand.
2703 bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
2704 MCAsmParser &Parser = getParser();
2705 if (Parser.getTok().isNot(AsmToken::Identifier))
2709 // Check for a vector register specifier first.
2711 int64_t Reg = tryMatchVectorRegister(Kind, false);
2715 AArch64Operand::CreateReg(Reg, RegKind::NeonVector, S, getLoc(),
2718 // If there was an explicit qualifier, that goes on as a literal text
2722 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2724 // If there is an index specifier following the register, parse that too.
2725 SMLoc SIdx = getLoc();
2726 if (parseOptionalToken(AsmToken::LBrac)) {
2727 const MCExpr *ImmVal;
2728 if (getParser().parseExpression(ImmVal))
2730 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2732 TokError("immediate value expected for vector index");
2738 if (parseToken(AsmToken::RBrac, "']' expected"))
2741 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2748 // tryParseSVEDataVectorRegister - Try to parse a SVE vector register name with
2749 // optional kind specifier. If it is a register specifier, eat the token
2751 OperandMatchResultTy
2752 AArch64AsmParser::tryParseSVERegister(int &Reg, StringRef &Kind,
2753 RegKind MatchKind) {
2754 MCAsmParser &Parser = getParser();
2755 const AsmToken &Tok = Parser.getTok();
2757 if (Tok.isNot(AsmToken::Identifier))
2758 return MatchOperand_NoMatch;
2760 StringRef Name = Tok.getString();
2761 // If there is a kind specifier, it's separated from the register name by
2763 size_t Start = 0, Next = Name.find('.');
2764 StringRef Head = Name.slice(Start, Next);
2765 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
2768 if (Next != StringRef::npos) {
2769 Kind = Name.slice(Next, StringRef::npos);
2770 if (!isValidSVEKind(Kind)) {
2771 TokError("invalid sve vector kind qualifier");
2772 return MatchOperand_ParseFail;
2775 Parser.Lex(); // Eat the register token.
2778 return MatchOperand_Success;
2781 return MatchOperand_NoMatch;
2784 /// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
2785 OperandMatchResultTy
2786 AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
2787 // Check for a SVE predicate register specifier first.
2788 const SMLoc S = getLoc();
2791 auto Res = tryParseSVERegister(RegNum, Kind, RegKind::SVEPredicateVector);
2792 if (Res != MatchOperand_Success)
2795 unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower())
2805 return MatchOperand_NoMatch;
2808 AArch64Operand::CreateReg(RegNum, RegKind::SVEPredicateVector,
2809 ElementWidth, S, getLoc(), getContext()));
2811 return MatchOperand_Success;
2814 /// parseRegister - Parse a non-vector register operand.
2815 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2817 // Try for a vector (neon) register.
2818 if (!tryParseNeonVectorRegister(Operands))
2821 // Try for a scalar register.
2822 int64_t Reg = tryParseRegister();
2825 Operands.push_back(AArch64Operand::CreateReg(Reg, RegKind::Scalar, S,
2826 getLoc(), getContext()));
2831 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2832 MCAsmParser &Parser = getParser();
2833 bool HasELFModifier = false;
2834 AArch64MCExpr::VariantKind RefKind;
2836 if (parseOptionalToken(AsmToken::Colon)) {
2837 HasELFModifier = true;
2839 if (Parser.getTok().isNot(AsmToken::Identifier))
2840 return TokError("expect relocation specifier in operand after ':'");
2842 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2843 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2844 .Case("lo12", AArch64MCExpr::VK_LO12)
2845 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2846 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2847 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2848 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2849 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2850 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2851 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2852 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2853 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2854 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2855 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2856 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2857 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2858 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2859 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2860 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2861 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2862 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2863 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2864 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2865 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2866 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2867 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2868 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2869 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2870 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2871 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2872 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2873 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2874 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2875 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2876 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2877 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2878 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2879 .Default(AArch64MCExpr::VK_INVALID);
2881 if (RefKind == AArch64MCExpr::VK_INVALID)
2882 return TokError("expect relocation specifier in operand after ':'");
2884 Parser.Lex(); // Eat identifier
2886 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
2890 if (getParser().parseExpression(ImmVal))
2894 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2899 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2900 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2901 MCAsmParser &Parser = getParser();
2902 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2904 Parser.Lex(); // Eat left bracket token.
2906 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2909 int64_t PrevReg = FirstReg;
2912 if (parseOptionalToken(AsmToken::Minus)) {
2913 SMLoc Loc = getLoc();
2915 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2918 // Any Kind suffices must match on all regs in the list.
2919 if (Kind != NextKind)
2920 return Error(Loc, "mismatched register size suffix");
2922 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2924 if (Space == 0 || Space > 3) {
2925 return Error(Loc, "invalid number of vectors");
2931 while (parseOptionalToken(AsmToken::Comma)) {
2932 SMLoc Loc = getLoc();
2934 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2937 // Any Kind suffices must match on all regs in the list.
2938 if (Kind != NextKind)
2939 return Error(Loc, "mismatched register size suffix");
2941 // Registers must be incremental (with wraparound at 31)
2942 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2943 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2944 return Error(Loc, "registers must be sequential");
2951 if (parseToken(AsmToken::RCurly, "'}' expected"))
2955 return Error(S, "invalid number of vectors");
2957 unsigned NumElements = 0;
2958 char ElementKind = 0;
2960 parseValidVectorKind(Kind, NumElements, ElementKind);
2962 Operands.push_back(AArch64Operand::CreateVectorList(
2963 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2965 // If there is an index specifier following the list, parse that too.
2966 SMLoc SIdx = getLoc();
2967 if (parseOptionalToken(AsmToken::LBrac)) { // Eat left bracket token.
2968 const MCExpr *ImmVal;
2969 if (getParser().parseExpression(ImmVal))
2971 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2973 TokError("immediate value expected for vector index");
2978 if (parseToken(AsmToken::RBrac, "']' expected"))
2981 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2987 OperandMatchResultTy
2988 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2989 MCAsmParser &Parser = getParser();
2990 const AsmToken &Tok = Parser.getTok();
2991 if (!Tok.is(AsmToken::Identifier))
2992 return MatchOperand_NoMatch;
2994 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), RegKind::Scalar);
2996 MCContext &Ctx = getContext();
2997 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2998 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2999 return MatchOperand_NoMatch;
3002 Parser.Lex(); // Eat register
3004 if (!parseOptionalToken(AsmToken::Comma)) {
3006 AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
3007 return MatchOperand_Success;
3010 parseOptionalToken(AsmToken::Hash);
3012 if (Parser.getTok().isNot(AsmToken::Integer)) {
3013 Error(getLoc(), "index must be absent or #0");
3014 return MatchOperand_ParseFail;
3017 const MCExpr *ImmVal;
3018 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3019 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3020 Error(getLoc(), "index must be absent or #0");
3021 return MatchOperand_ParseFail;
3025 AArch64Operand::CreateReg(RegNum, RegKind::Scalar, S, getLoc(), Ctx));
3026 return MatchOperand_Success;
3029 /// parseOperand - Parse a arm instruction operand. For now this parses the
3030 /// operand regardless of the mnemonic.
3031 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3032 bool invertCondCode) {
3033 MCAsmParser &Parser = getParser();
3035 OperandMatchResultTy ResTy =
3036 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/ true);
3038 // Check if the current operand has a custom associated parser, if so, try to
3039 // custom parse the operand, or fallback to the general approach.
3040 if (ResTy == MatchOperand_Success)
3042 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3043 // there was a match, but an error occurred, in which case, just return that
3044 // the operand parsing failed.
3045 if (ResTy == MatchOperand_ParseFail)
3048 // Nothing custom, so do general case parsing.
3050 switch (getLexer().getKind()) {
3054 if (parseSymbolicImmVal(Expr))
3055 return Error(S, "invalid operand");
3057 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3058 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3061 case AsmToken::LBrac: {
3062 SMLoc Loc = Parser.getTok().getLoc();
3063 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3065 Parser.Lex(); // Eat '['
3067 // There's no comma after a '[', so we can parse the next operand
3069 return parseOperand(Operands, false, false);
3071 case AsmToken::LCurly:
3072 return parseVectorList(Operands);
3073 case AsmToken::Identifier: {
3074 // If we're expecting a Condition Code operand, then just parse that.
3076 return parseCondCode(Operands, invertCondCode);
3078 // If it's a register name, parse it.
3079 if (!parseRegister(Operands))
3082 // This could be an optional "shift" or "extend" operand.
3083 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3084 // We can only continue if no tokens were eaten.
3085 if (GotShift != MatchOperand_NoMatch)
3088 // This was not a register so parse other operands that start with an
3089 // identifier (like labels) as expressions and create them as immediates.
3090 const MCExpr *IdVal;
3092 if (getParser().parseExpression(IdVal))
3094 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3095 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3098 case AsmToken::Integer:
3099 case AsmToken::Real:
3100 case AsmToken::Hash: {
3101 // #42 -> immediate.
3104 parseOptionalToken(AsmToken::Hash);
3106 // Parse a negative sign
3107 bool isNegative = false;
3108 if (Parser.getTok().is(AsmToken::Minus)) {
3110 // We need to consume this token only when we have a Real, otherwise
3111 // we let parseSymbolicImmVal take care of it
3112 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3116 // The only Real that should come through here is a literal #0.0 for
3117 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3118 // so convert the value.
3119 const AsmToken &Tok = Parser.getTok();
3120 if (Tok.is(AsmToken::Real)) {
3121 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3122 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3123 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3124 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3125 Mnemonic != "fcmlt")
3126 return TokError("unexpected floating point literal");
3127 else if (IntVal != 0 || isNegative)
3128 return TokError("expected floating-point constant #0.0");
3129 Parser.Lex(); // Eat the token.
3132 AArch64Operand::CreateToken("#0", false, S, getContext()));
3134 AArch64Operand::CreateToken(".0", false, S, getContext()));
3138 const MCExpr *ImmVal;
3139 if (parseSymbolicImmVal(ImmVal))
3142 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3143 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3146 case AsmToken::Equal: {
3147 SMLoc Loc = getLoc();
3148 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3149 return TokError("unexpected token in operand");
3150 Parser.Lex(); // Eat '='
3151 const MCExpr *SubExprVal;
3152 if (getParser().parseExpression(SubExprVal))
3155 if (Operands.size() < 2 ||
3156 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
3157 return Error(Loc, "Only valid when first operand is register");
3160 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3161 Operands[1]->getReg());
3163 MCContext& Ctx = getContext();
3164 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3165 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3166 if (isa<MCConstantExpr>(SubExprVal)) {
3167 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3168 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3169 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3173 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3174 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3175 Operands.push_back(AArch64Operand::CreateImm(
3176 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3178 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3179 ShiftAmt, true, S, E, Ctx));
3182 APInt Simm = APInt(64, Imm << ShiftAmt);
3183 // check if the immediate is an unsigned or signed 32-bit int for W regs
3184 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3185 return Error(Loc, "Immediate too large for register");
3187 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3188 const MCExpr *CPLoc =
3189 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3190 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3196 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3198 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3199 StringRef Name, SMLoc NameLoc,
3200 OperandVector &Operands) {
3201 MCAsmParser &Parser = getParser();
3202 Name = StringSwitch<StringRef>(Name.lower())
3203 .Case("beq", "b.eq")
3204 .Case("bne", "b.ne")
3205 .Case("bhs", "b.hs")
3206 .Case("bcs", "b.cs")
3207 .Case("blo", "b.lo")
3208 .Case("bcc", "b.cc")
3209 .Case("bmi", "b.mi")
3210 .Case("bpl", "b.pl")
3211 .Case("bvs", "b.vs")
3212 .Case("bvc", "b.vc")
3213 .Case("bhi", "b.hi")
3214 .Case("bls", "b.ls")
3215 .Case("bge", "b.ge")
3216 .Case("blt", "b.lt")
3217 .Case("bgt", "b.gt")
3218 .Case("ble", "b.le")
3219 .Case("bal", "b.al")
3220 .Case("bnv", "b.nv")
3223 // First check for the AArch64-specific .req directive.
3224 if (Parser.getTok().is(AsmToken::Identifier) &&
3225 Parser.getTok().getIdentifier() == ".req") {
3226 parseDirectiveReq(Name, NameLoc);
3227 // We always return 'error' for this, as we're done with this
3228 // statement and don't need to match the 'instruction."
3232 // Create the leading tokens for the mnemonic, split by '.' characters.
3233 size_t Start = 0, Next = Name.find('.');
3234 StringRef Head = Name.slice(Start, Next);
3236 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3237 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3238 return parseSysAlias(Head, NameLoc, Operands);
3241 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3244 // Handle condition codes for a branch mnemonic
3245 if (Head == "b" && Next != StringRef::npos) {
3247 Next = Name.find('.', Start + 1);
3248 Head = Name.slice(Start + 1, Next);
3250 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3251 (Head.data() - Name.data()));
3252 AArch64CC::CondCode CC = parseCondCodeString(Head);
3253 if (CC == AArch64CC::Invalid)
3254 return Error(SuffixLoc, "invalid condition code");
3256 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3258 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3261 // Add the remaining tokens in the mnemonic.
3262 while (Next != StringRef::npos) {
3264 Next = Name.find('.', Start + 1);
3265 Head = Name.slice(Start, Next);
3266 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3267 (Head.data() - Name.data()) + 1);
3269 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3272 // Conditional compare instructions have a Condition Code operand, which needs
3273 // to be parsed and an immediate operand created.
3274 bool condCodeFourthOperand =
3275 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3276 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3277 Head == "csinc" || Head == "csinv" || Head == "csneg");
3279 // These instructions are aliases to some of the conditional select
3280 // instructions. However, the condition code is inverted in the aliased
3283 // FIXME: Is this the correct way to handle these? Or should the parser
3284 // generate the aliased instructions directly?
3285 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3286 bool condCodeThirdOperand =
3287 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3289 // Read the remaining operands.
3290 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3291 // Read the first operand.
3292 if (parseOperand(Operands, false, false)) {
3297 while (parseOptionalToken(AsmToken::Comma)) {
3298 // Parse and remember the operand.
3299 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3300 (N == 3 && condCodeThirdOperand) ||
3301 (N == 2 && condCodeSecondOperand),
3302 condCodeSecondOperand || condCodeThirdOperand)) {
3306 // After successfully parsing some operands there are two special cases to
3307 // consider (i.e. notional operands not separated by commas). Both are due
3308 // to memory specifiers:
3309 // + An RBrac will end an address for load/store/prefetch
3310 // + An '!' will indicate a pre-indexed operation.
3312 // It's someone else's responsibility to make sure these tokens are sane
3313 // in the given context!
3315 SMLoc RLoc = Parser.getTok().getLoc();
3316 if (parseOptionalToken(AsmToken::RBrac))
3318 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3319 SMLoc ELoc = Parser.getTok().getLoc();
3320 if (parseOptionalToken(AsmToken::Exclaim))
3322 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3328 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3334 // FIXME: This entire function is a giant hack to provide us with decent
3335 // operand range validation/diagnostics until TableGen/MC can be extended
3336 // to support autogeneration of this kind of validation.
3337 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3338 SmallVectorImpl<SMLoc> &Loc) {
3339 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3340 // Check for indexed addressing modes w/ the base register being the
3341 // same as a destination/source register or pair load where
3342 // the Rt == Rt2. All of those are undefined behaviour.
3343 switch (Inst.getOpcode()) {
3344 case AArch64::LDPSWpre:
3345 case AArch64::LDPWpost:
3346 case AArch64::LDPWpre:
3347 case AArch64::LDPXpost:
3348 case AArch64::LDPXpre: {
3349 unsigned Rt = Inst.getOperand(1).getReg();
3350 unsigned Rt2 = Inst.getOperand(2).getReg();
3351 unsigned Rn = Inst.getOperand(3).getReg();
3352 if (RI->isSubRegisterEq(Rn, Rt))
3353 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3354 "is also a destination");
3355 if (RI->isSubRegisterEq(Rn, Rt2))
3356 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3357 "is also a destination");
3360 case AArch64::LDPDi:
3361 case AArch64::LDPQi:
3362 case AArch64::LDPSi:
3363 case AArch64::LDPSWi:
3364 case AArch64::LDPWi:
3365 case AArch64::LDPXi: {
3366 unsigned Rt = Inst.getOperand(0).getReg();
3367 unsigned Rt2 = Inst.getOperand(1).getReg();
3369 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3372 case AArch64::LDPDpost:
3373 case AArch64::LDPDpre:
3374 case AArch64::LDPQpost:
3375 case AArch64::LDPQpre:
3376 case AArch64::LDPSpost:
3377 case AArch64::LDPSpre:
3378 case AArch64::LDPSWpost: {
3379 unsigned Rt = Inst.getOperand(1).getReg();
3380 unsigned Rt2 = Inst.getOperand(2).getReg();
3382 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3385 case AArch64::STPDpost:
3386 case AArch64::STPDpre:
3387 case AArch64::STPQpost:
3388 case AArch64::STPQpre:
3389 case AArch64::STPSpost:
3390 case AArch64::STPSpre:
3391 case AArch64::STPWpost:
3392 case AArch64::STPWpre:
3393 case AArch64::STPXpost:
3394 case AArch64::STPXpre: {
3395 unsigned Rt = Inst.getOperand(1).getReg();
3396 unsigned Rt2 = Inst.getOperand(2).getReg();
3397 unsigned Rn = Inst.getOperand(3).getReg();
3398 if (RI->isSubRegisterEq(Rn, Rt))
3399 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3400 "is also a source");
3401 if (RI->isSubRegisterEq(Rn, Rt2))
3402 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3403 "is also a source");
3406 case AArch64::LDRBBpre:
3407 case AArch64::LDRBpre:
3408 case AArch64::LDRHHpre:
3409 case AArch64::LDRHpre:
3410 case AArch64::LDRSBWpre:
3411 case AArch64::LDRSBXpre:
3412 case AArch64::LDRSHWpre:
3413 case AArch64::LDRSHXpre:
3414 case AArch64::LDRSWpre:
3415 case AArch64::LDRWpre:
3416 case AArch64::LDRXpre:
3417 case AArch64::LDRBBpost:
3418 case AArch64::LDRBpost:
3419 case AArch64::LDRHHpost:
3420 case AArch64::LDRHpost:
3421 case AArch64::LDRSBWpost:
3422 case AArch64::LDRSBXpost:
3423 case AArch64::LDRSHWpost:
3424 case AArch64::LDRSHXpost:
3425 case AArch64::LDRSWpost:
3426 case AArch64::LDRWpost:
3427 case AArch64::LDRXpost: {
3428 unsigned Rt = Inst.getOperand(1).getReg();
3429 unsigned Rn = Inst.getOperand(2).getReg();
3430 if (RI->isSubRegisterEq(Rn, Rt))
3431 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3432 "is also a source");
3435 case AArch64::STRBBpost:
3436 case AArch64::STRBpost:
3437 case AArch64::STRHHpost:
3438 case AArch64::STRHpost:
3439 case AArch64::STRWpost:
3440 case AArch64::STRXpost:
3441 case AArch64::STRBBpre:
3442 case AArch64::STRBpre:
3443 case AArch64::STRHHpre:
3444 case AArch64::STRHpre:
3445 case AArch64::STRWpre:
3446 case AArch64::STRXpre: {
3447 unsigned Rt = Inst.getOperand(1).getReg();
3448 unsigned Rn = Inst.getOperand(2).getReg();
3449 if (RI->isSubRegisterEq(Rn, Rt))
3450 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3451 "is also a source");
3456 // Now check immediate ranges. Separate from the above as there is overlap
3457 // in the instructions being checked and this keeps the nested conditionals
3459 switch (Inst.getOpcode()) {
3460 case AArch64::ADDSWri:
3461 case AArch64::ADDSXri:
3462 case AArch64::ADDWri:
3463 case AArch64::ADDXri:
3464 case AArch64::SUBSWri:
3465 case AArch64::SUBSXri:
3466 case AArch64::SUBWri:
3467 case AArch64::SUBXri: {
3468 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3469 // some slight duplication here.
3470 if (Inst.getOperand(2).isExpr()) {
3471 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3472 AArch64MCExpr::VariantKind ELFRefKind;
3473 MCSymbolRefExpr::VariantKind DarwinRefKind;
3475 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3477 // Only allow these with ADDXri.
3478 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3479 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3480 Inst.getOpcode() == AArch64::ADDXri)
3483 // Only allow these with ADDXri/ADDWri
3484 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3485 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3486 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3487 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3488 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3489 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3490 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3491 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3492 (Inst.getOpcode() == AArch64::ADDXri ||
3493 Inst.getOpcode() == AArch64::ADDWri))
3496 // Don't allow symbol refs in the immediate field otherwise
3497 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3498 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3499 // 'cmp w0, 'borked')
3500 return Error(Loc.back(), "invalid immediate expression");
3502 // We don't validate more complex expressions here
3511 static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS,
3512 unsigned VariantID = 0);
3514 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
3515 OperandVector &Operands) {
3517 case Match_MissingFeature:
3519 "instruction requires a CPU feature not currently enabled");
3520 case Match_InvalidOperand:
3521 return Error(Loc, "invalid operand for instruction");
3522 case Match_InvalidSuffix:
3523 return Error(Loc, "invalid type suffix for instruction");
3524 case Match_InvalidCondCode:
3525 return Error(Loc, "expected AArch64 condition code");
3526 case Match_AddSubRegExtendSmall:
3528 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3529 case Match_AddSubRegExtendLarge:
3531 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3532 case Match_AddSubSecondSource:
3534 "expected compatible register, symbol or integer in range [0, 4095]");
3535 case Match_LogicalSecondSource:
3536 return Error(Loc, "expected compatible register or logical immediate");
3537 case Match_InvalidMovImm32Shift:
3538 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3539 case Match_InvalidMovImm64Shift:
3540 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3541 case Match_AddSubRegShift32:
3543 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3544 case Match_AddSubRegShift64:
3546 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3547 case Match_InvalidFPImm:
3549 "expected compatible register or floating-point constant");
3550 case Match_InvalidMemoryIndexedSImm9:
3551 return Error(Loc, "index must be an integer in range [-256, 255].");
3552 case Match_InvalidMemoryIndexedSImm10:
3553 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
3554 case Match_InvalidMemoryIndexed4SImm7:
3555 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3556 case Match_InvalidMemoryIndexed8SImm7:
3557 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3558 case Match_InvalidMemoryIndexed16SImm7:
3559 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3560 case Match_InvalidMemoryWExtend8:
3562 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3563 case Match_InvalidMemoryWExtend16:
3565 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3566 case Match_InvalidMemoryWExtend32:
3568 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3569 case Match_InvalidMemoryWExtend64:
3571 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3572 case Match_InvalidMemoryWExtend128:
3574 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3575 case Match_InvalidMemoryXExtend8:
3577 "expected 'lsl' or 'sxtx' with optional shift of #0");
3578 case Match_InvalidMemoryXExtend16:
3580 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3581 case Match_InvalidMemoryXExtend32:
3583 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3584 case Match_InvalidMemoryXExtend64:
3586 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3587 case Match_InvalidMemoryXExtend128:
3589 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3590 case Match_InvalidMemoryIndexed1:
3591 return Error(Loc, "index must be an integer in range [0, 4095].");
3592 case Match_InvalidMemoryIndexed2:
3593 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3594 case Match_InvalidMemoryIndexed4:
3595 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3596 case Match_InvalidMemoryIndexed8:
3597 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3598 case Match_InvalidMemoryIndexed16:
3599 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3600 case Match_InvalidImm0_1:
3601 return Error(Loc, "immediate must be an integer in range [0, 1].");
3602 case Match_InvalidImm0_7:
3603 return Error(Loc, "immediate must be an integer in range [0, 7].");
3604 case Match_InvalidImm0_15:
3605 return Error(Loc, "immediate must be an integer in range [0, 15].");
3606 case Match_InvalidImm0_31:
3607 return Error(Loc, "immediate must be an integer in range [0, 31].");
3608 case Match_InvalidImm0_63:
3609 return Error(Loc, "immediate must be an integer in range [0, 63].");
3610 case Match_InvalidImm0_127:
3611 return Error(Loc, "immediate must be an integer in range [0, 127].");
3612 case Match_InvalidImm0_255:
3613 return Error(Loc, "immediate must be an integer in range [0, 255].");
3614 case Match_InvalidImm0_65535:
3615 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3616 case Match_InvalidImm1_8:
3617 return Error(Loc, "immediate must be an integer in range [1, 8].");
3618 case Match_InvalidImm1_16:
3619 return Error(Loc, "immediate must be an integer in range [1, 16].");
3620 case Match_InvalidImm1_32:
3621 return Error(Loc, "immediate must be an integer in range [1, 32].");
3622 case Match_InvalidImm1_64:
3623 return Error(Loc, "immediate must be an integer in range [1, 64].");
3624 case Match_InvalidIndex1:
3625 return Error(Loc, "expected lane specifier '[1]'");
3626 case Match_InvalidIndexB:
3627 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3628 case Match_InvalidIndexH:
3629 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3630 case Match_InvalidIndexS:
3631 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3632 case Match_InvalidIndexD:
3633 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3634 case Match_InvalidLabel:
3635 return Error(Loc, "expected label or encodable integer pc offset");
3637 return Error(Loc, "expected readable system register");
3639 return Error(Loc, "expected writable system register or pstate");
3640 case Match_InvalidComplexRotationEven:
3641 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
3642 case Match_InvalidComplexRotationOdd:
3643 return Error(Loc, "complex rotation must be 90 or 270.");
3644 case Match_MnemonicFail: {
3645 std::string Suggestion = AArch64MnemonicSpellCheck(
3646 ((AArch64Operand &)*Operands[0]).getToken(),
3647 ComputeAvailableFeatures(STI->getFeatureBits()));
3648 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
3650 case Match_InvalidSVEPredicateAnyReg:
3651 case Match_InvalidSVEPredicateBReg:
3652 case Match_InvalidSVEPredicateHReg:
3653 case Match_InvalidSVEPredicateSReg:
3654 case Match_InvalidSVEPredicateDReg:
3655 return Error(Loc, "invalid predicate register.");
3656 case Match_InvalidSVEPredicate3bAnyReg:
3657 case Match_InvalidSVEPredicate3bBReg:
3658 case Match_InvalidSVEPredicate3bHReg:
3659 case Match_InvalidSVEPredicate3bSReg:
3660 case Match_InvalidSVEPredicate3bDReg:
3661 return Error(Loc, "restricted predicate has range [0, 7].");
3663 llvm_unreachable("unexpected error code!");
3667 static const char *getSubtargetFeatureName(uint64_t Val);
3669 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3670 OperandVector &Operands,
3672 uint64_t &ErrorInfo,
3673 bool MatchingInlineAsm) {
3674 assert(!Operands.empty() && "Unexpect empty operand list!");
3675 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3676 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3678 StringRef Tok = Op.getToken();
3679 unsigned NumOperands = Operands.size();
3681 if (NumOperands == 4 && Tok == "lsl") {
3682 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3683 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3684 if (Op2.isScalarReg() && Op3.isImm()) {
3685 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3687 uint64_t Op3Val = Op3CE->getValue();
3688 uint64_t NewOp3Val = 0;
3689 uint64_t NewOp4Val = 0;
3690 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3692 NewOp3Val = (32 - Op3Val) & 0x1f;
3693 NewOp4Val = 31 - Op3Val;
3695 NewOp3Val = (64 - Op3Val) & 0x3f;
3696 NewOp4Val = 63 - Op3Val;
3699 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3700 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3702 Operands[0] = AArch64Operand::CreateToken(
3703 "ubfm", false, Op.getStartLoc(), getContext());
3704 Operands.push_back(AArch64Operand::CreateImm(
3705 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3706 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3707 Op3.getEndLoc(), getContext());
3710 } else if (NumOperands == 4 && Tok == "bfc") {
3711 // FIXME: Horrible hack to handle BFC->BFM alias.
3712 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3713 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3714 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3716 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
3717 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3718 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3720 if (LSBCE && WidthCE) {
3721 uint64_t LSB = LSBCE->getValue();
3722 uint64_t Width = WidthCE->getValue();
3724 uint64_t RegWidth = 0;
3725 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3731 if (LSB >= RegWidth)
3732 return Error(LSBOp.getStartLoc(),
3733 "expected integer in range [0, 31]");
3734 if (Width < 1 || Width > RegWidth)
3735 return Error(WidthOp.getStartLoc(),
3736 "expected integer in range [1, 32]");
3740 ImmR = (32 - LSB) & 0x1f;
3742 ImmR = (64 - LSB) & 0x3f;
3744 uint64_t ImmS = Width - 1;
3746 if (ImmR != 0 && ImmS >= ImmR)
3747 return Error(WidthOp.getStartLoc(),
3748 "requested insert overflows register");
3750 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3751 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3752 Operands[0] = AArch64Operand::CreateToken(
3753 "bfm", false, Op.getStartLoc(), getContext());
3754 Operands[2] = AArch64Operand::CreateReg(
3755 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
3756 SMLoc(), SMLoc(), getContext());
3757 Operands[3] = AArch64Operand::CreateImm(
3758 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3759 Operands.emplace_back(
3760 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3761 WidthOp.getEndLoc(), getContext()));
3764 } else if (NumOperands == 5) {
3765 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3766 // UBFIZ -> UBFM aliases.
3767 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3768 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3769 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3770 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3772 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
3773 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3774 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3776 if (Op3CE && Op4CE) {
3777 uint64_t Op3Val = Op3CE->getValue();
3778 uint64_t Op4Val = Op4CE->getValue();
3780 uint64_t RegWidth = 0;
3781 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3787 if (Op3Val >= RegWidth)
3788 return Error(Op3.getStartLoc(),
3789 "expected integer in range [0, 31]");
3790 if (Op4Val < 1 || Op4Val > RegWidth)
3791 return Error(Op4.getStartLoc(),
3792 "expected integer in range [1, 32]");
3794 uint64_t NewOp3Val = 0;
3796 NewOp3Val = (32 - Op3Val) & 0x1f;
3798 NewOp3Val = (64 - Op3Val) & 0x3f;
3800 uint64_t NewOp4Val = Op4Val - 1;
3802 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3803 return Error(Op4.getStartLoc(),
3804 "requested insert overflows register");
3806 const MCExpr *NewOp3 =
3807 MCConstantExpr::create(NewOp3Val, getContext());
3808 const MCExpr *NewOp4 =
3809 MCConstantExpr::create(NewOp4Val, getContext());
3810 Operands[3] = AArch64Operand::CreateImm(
3811 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3812 Operands[4] = AArch64Operand::CreateImm(
3813 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3815 Operands[0] = AArch64Operand::CreateToken(
3816 "bfm", false, Op.getStartLoc(), getContext());
3817 else if (Tok == "sbfiz")
3818 Operands[0] = AArch64Operand::CreateToken(
3819 "sbfm", false, Op.getStartLoc(), getContext());
3820 else if (Tok == "ubfiz")
3821 Operands[0] = AArch64Operand::CreateToken(
3822 "ubfm", false, Op.getStartLoc(), getContext());
3824 llvm_unreachable("No valid mnemonic for alias?");
3828 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3829 // UBFX -> UBFM aliases.
3830 } else if (NumOperands == 5 &&
3831 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3832 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3833 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3834 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3836 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
3837 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3838 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3840 if (Op3CE && Op4CE) {
3841 uint64_t Op3Val = Op3CE->getValue();
3842 uint64_t Op4Val = Op4CE->getValue();
3844 uint64_t RegWidth = 0;
3845 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3851 if (Op3Val >= RegWidth)
3852 return Error(Op3.getStartLoc(),
3853 "expected integer in range [0, 31]");
3854 if (Op4Val < 1 || Op4Val > RegWidth)
3855 return Error(Op4.getStartLoc(),
3856 "expected integer in range [1, 32]");
3858 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3860 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3861 return Error(Op4.getStartLoc(),
3862 "requested extract overflows register");
3864 const MCExpr *NewOp4 =
3865 MCConstantExpr::create(NewOp4Val, getContext());
3866 Operands[4] = AArch64Operand::CreateImm(
3867 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3869 Operands[0] = AArch64Operand::CreateToken(
3870 "bfm", false, Op.getStartLoc(), getContext());
3871 else if (Tok == "sbfx")
3872 Operands[0] = AArch64Operand::CreateToken(
3873 "sbfm", false, Op.getStartLoc(), getContext());
3874 else if (Tok == "ubfx")
3875 Operands[0] = AArch64Operand::CreateToken(
3876 "ubfm", false, Op.getStartLoc(), getContext());
3878 llvm_unreachable("No valid mnemonic for alias?");
3884 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
3885 // instruction for FP registers correctly in some rare circumstances. Convert
3886 // it to a safe instruction and warn (because silently changing someone's
3887 // assembly is rude).
3888 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
3889 NumOperands == 4 && Tok == "movi") {
3890 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3891 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3892 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3893 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
3894 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
3895 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
3896 if (Suffix.lower() == ".2d" &&
3897 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
3898 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
3899 " correctly on this CPU, converting to equivalent movi.16b");
3900 // Switch the suffix to .16b.
3901 unsigned Idx = Op1.isToken() ? 1 : 2;
3902 Operands[Idx] = AArch64Operand::CreateToken(".16b", false, IDLoc,
3908 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3909 // InstAlias can't quite handle this since the reg classes aren't
3911 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3912 // The source register can be Wn here, but the matcher expects a
3913 // GPR64. Twiddle it here if necessary.
3914 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3915 if (Op.isScalarReg()) {
3916 unsigned Reg = getXRegFromWReg(Op.getReg());
3917 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3918 Op.getStartLoc(), Op.getEndLoc(),
3922 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3923 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3924 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3925 if (Op.isScalarReg() &&
3926 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3928 // The source register can be Wn here, but the matcher expects a
3929 // GPR64. Twiddle it here if necessary.
3930 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3931 if (Op.isScalarReg()) {
3932 unsigned Reg = getXRegFromWReg(Op.getReg());
3933 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3935 Op.getEndLoc(), getContext());
3939 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3940 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3941 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3942 if (Op.isScalarReg() &&
3943 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3945 // The source register can be Wn here, but the matcher expects a
3946 // GPR32. Twiddle it here if necessary.
3947 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3948 if (Op.isScalarReg()) {
3949 unsigned Reg = getWRegFromXReg(Op.getReg());
3950 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
3952 Op.getEndLoc(), getContext());
3958 // First try to match against the secondary set of tables containing the
3959 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3960 unsigned MatchResult =
3961 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3963 // If that fails, try against the alternate table containing long-form NEON:
3964 // "fadd v0.2s, v1.2s, v2.2s"
3965 if (MatchResult != Match_Success) {
3966 // But first, save the short-form match result: we can use it in case the
3967 // long-form match also fails.
3968 auto ShortFormNEONErrorInfo = ErrorInfo;
3969 auto ShortFormNEONMatchResult = MatchResult;
3972 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3974 // Now, both matches failed, and the long-form match failed on the mnemonic
3975 // suffix token operand. The short-form match failure is probably more
3976 // relevant: use it instead.
3977 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3978 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3979 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3980 MatchResult = ShortFormNEONMatchResult;
3981 ErrorInfo = ShortFormNEONErrorInfo;
3985 switch (MatchResult) {
3986 case Match_Success: {
3987 // Perform range checking and other semantic validations
3988 SmallVector<SMLoc, 8> OperandLocs;
3989 NumOperands = Operands.size();
3990 for (unsigned i = 1; i < NumOperands; ++i)
3991 OperandLocs.push_back(Operands[i]->getStartLoc());
3992 if (validateInstruction(Inst, OperandLocs))
3996 Out.EmitInstruction(Inst, getSTI());
3999 case Match_MissingFeature: {
4000 assert(ErrorInfo && "Unknown missing feature!");
4001 // Special case the error message for the very common case where only
4002 // a single subtarget feature is missing (neon, e.g.).
4003 std::string Msg = "instruction requires:";
4005 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4006 if (ErrorInfo & Mask) {
4008 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4012 return Error(IDLoc, Msg);
4014 case Match_MnemonicFail:
4015 return showMatchError(IDLoc, MatchResult, Operands);
4016 case Match_InvalidOperand: {
4017 SMLoc ErrorLoc = IDLoc;
4019 if (ErrorInfo != ~0ULL) {
4020 if (ErrorInfo >= Operands.size())
4021 return Error(IDLoc, "too few operands for instruction",
4022 SMRange(IDLoc, getTok().getLoc()));
4024 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4025 if (ErrorLoc == SMLoc())
4028 // If the match failed on a suffix token operand, tweak the diagnostic
4030 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4031 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4032 MatchResult = Match_InvalidSuffix;
4034 return showMatchError(ErrorLoc, MatchResult, Operands);
4036 case Match_InvalidMemoryIndexed1:
4037 case Match_InvalidMemoryIndexed2:
4038 case Match_InvalidMemoryIndexed4:
4039 case Match_InvalidMemoryIndexed8:
4040 case Match_InvalidMemoryIndexed16:
4041 case Match_InvalidCondCode:
4042 case Match_AddSubRegExtendSmall:
4043 case Match_AddSubRegExtendLarge:
4044 case Match_AddSubSecondSource:
4045 case Match_LogicalSecondSource:
4046 case Match_AddSubRegShift32:
4047 case Match_AddSubRegShift64:
4048 case Match_InvalidMovImm32Shift:
4049 case Match_InvalidMovImm64Shift:
4050 case Match_InvalidFPImm:
4051 case Match_InvalidMemoryWExtend8:
4052 case Match_InvalidMemoryWExtend16:
4053 case Match_InvalidMemoryWExtend32:
4054 case Match_InvalidMemoryWExtend64:
4055 case Match_InvalidMemoryWExtend128:
4056 case Match_InvalidMemoryXExtend8:
4057 case Match_InvalidMemoryXExtend16:
4058 case Match_InvalidMemoryXExtend32:
4059 case Match_InvalidMemoryXExtend64:
4060 case Match_InvalidMemoryXExtend128:
4061 case Match_InvalidMemoryIndexed4SImm7:
4062 case Match_InvalidMemoryIndexed8SImm7:
4063 case Match_InvalidMemoryIndexed16SImm7:
4064 case Match_InvalidMemoryIndexedSImm9:
4065 case Match_InvalidMemoryIndexedSImm10:
4066 case Match_InvalidImm0_1:
4067 case Match_InvalidImm0_7:
4068 case Match_InvalidImm0_15:
4069 case Match_InvalidImm0_31:
4070 case Match_InvalidImm0_63:
4071 case Match_InvalidImm0_127:
4072 case Match_InvalidImm0_255:
4073 case Match_InvalidImm0_65535:
4074 case Match_InvalidImm1_8:
4075 case Match_InvalidImm1_16:
4076 case Match_InvalidImm1_32:
4077 case Match_InvalidImm1_64:
4078 case Match_InvalidIndex1:
4079 case Match_InvalidIndexB:
4080 case Match_InvalidIndexH:
4081 case Match_InvalidIndexS:
4082 case Match_InvalidIndexD:
4083 case Match_InvalidLabel:
4084 case Match_InvalidComplexRotationEven:
4085 case Match_InvalidComplexRotationOdd:
4086 case Match_InvalidSVEPredicateAnyReg:
4087 case Match_InvalidSVEPredicateBReg:
4088 case Match_InvalidSVEPredicateHReg:
4089 case Match_InvalidSVEPredicateSReg:
4090 case Match_InvalidSVEPredicateDReg:
4091 case Match_InvalidSVEPredicate3bAnyReg:
4092 case Match_InvalidSVEPredicate3bBReg:
4093 case Match_InvalidSVEPredicate3bHReg:
4094 case Match_InvalidSVEPredicate3bSReg:
4095 case Match_InvalidSVEPredicate3bDReg:
4098 if (ErrorInfo >= Operands.size())
4099 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4100 // Any time we get here, there's nothing fancy to do. Just get the
4101 // operand SMLoc and display the diagnostic.
4102 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4103 if (ErrorLoc == SMLoc())
4105 return showMatchError(ErrorLoc, MatchResult, Operands);
4109 llvm_unreachable("Implement any new match types added!");
4112 /// ParseDirective parses the arm specific directives
4113 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4114 const MCObjectFileInfo::Environment Format =
4115 getContext().getObjectFileInfo()->getObjectFileType();
4116 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4117 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4119 StringRef IDVal = DirectiveID.getIdentifier();
4120 SMLoc Loc = DirectiveID.getLoc();
4121 if (IDVal == ".arch")
4122 parseDirectiveArch(Loc);
4123 else if (IDVal == ".cpu")
4124 parseDirectiveCPU(Loc);
4125 else if (IDVal == ".hword")
4126 parseDirectiveWord(2, Loc);
4127 else if (IDVal == ".word")
4128 parseDirectiveWord(4, Loc);
4129 else if (IDVal == ".xword")
4130 parseDirectiveWord(8, Loc);
4131 else if (IDVal == ".tlsdesccall")
4132 parseDirectiveTLSDescCall(Loc);
4133 else if (IDVal == ".ltorg" || IDVal == ".pool")
4134 parseDirectiveLtorg(Loc);
4135 else if (IDVal == ".unreq")
4136 parseDirectiveUnreq(Loc);
4137 else if (!IsMachO && !IsCOFF) {
4138 if (IDVal == ".inst")
4139 parseDirectiveInst(Loc);
4142 } else if (IDVal == MCLOHDirectiveName())
4143 parseDirectiveLOH(IDVal, Loc);
4149 static const struct {
4151 const FeatureBitset Features;
4152 } ExtensionMap[] = {
4153 { "crc", {AArch64::FeatureCRC} },
4154 { "crypto", {AArch64::FeatureCrypto} },
4155 { "fp", {AArch64::FeatureFPARMv8} },
4156 { "simd", {AArch64::FeatureNEON} },
4157 { "ras", {AArch64::FeatureRAS} },
4158 { "lse", {AArch64::FeatureLSE} },
4160 // FIXME: Unsupported extensions
4167 /// parseDirectiveArch
4169 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4170 SMLoc ArchLoc = getLoc();
4172 StringRef Arch, ExtensionString;
4173 std::tie(Arch, ExtensionString) =
4174 getParser().parseStringToEndOfStatement().trim().split('+');
4176 AArch64::ArchKind ID = AArch64::parseArch(Arch);
4177 if (ID == AArch64::ArchKind::INVALID)
4178 return Error(ArchLoc, "unknown arch name");
4180 if (parseToken(AsmToken::EndOfStatement))
4183 // Get the architecture and extension features.
4184 std::vector<StringRef> AArch64Features;
4185 AArch64::getArchFeatures(ID, AArch64Features);
4186 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
4189 MCSubtargetInfo &STI = copySTI();
4190 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4191 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4193 SmallVector<StringRef, 4> RequestedExtensions;
4194 if (!ExtensionString.empty())
4195 ExtensionString.split(RequestedExtensions, '+');
4197 FeatureBitset Features = STI.getFeatureBits();
4198 for (auto Name : RequestedExtensions) {
4199 bool EnableFeature = true;
4201 if (Name.startswith_lower("no")) {
4202 EnableFeature = false;
4203 Name = Name.substr(2);
4206 for (const auto &Extension : ExtensionMap) {
4207 if (Extension.Name != Name)
4210 if (Extension.Features.none())
4211 report_fatal_error("unsupported architectural extension: " + Name);
4213 FeatureBitset ToggleFeatures = EnableFeature
4214 ? (~Features & Extension.Features)
4215 : ( Features & Extension.Features);
4217 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4218 setAvailableFeatures(Features);
4225 static SMLoc incrementLoc(SMLoc L, int Offset) {
4226 return SMLoc::getFromPointer(L.getPointer() + Offset);
4229 /// parseDirectiveCPU
4231 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4232 SMLoc CurLoc = getLoc();
4234 StringRef CPU, ExtensionString;
4235 std::tie(CPU, ExtensionString) =
4236 getParser().parseStringToEndOfStatement().trim().split('+');
4238 if (parseToken(AsmToken::EndOfStatement))
4241 SmallVector<StringRef, 4> RequestedExtensions;
4242 if (!ExtensionString.empty())
4243 ExtensionString.split(RequestedExtensions, '+');
4245 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4246 // once that is tablegen'ed
4247 if (!getSTI().isCPUStringValid(CPU)) {
4248 Error(CurLoc, "unknown CPU name");
4252 MCSubtargetInfo &STI = copySTI();
4253 STI.setDefaultFeatures(CPU, "");
4254 CurLoc = incrementLoc(CurLoc, CPU.size());
4256 FeatureBitset Features = STI.getFeatureBits();
4257 for (auto Name : RequestedExtensions) {
4258 // Advance source location past '+'.
4259 CurLoc = incrementLoc(CurLoc, 1);
4261 bool EnableFeature = true;
4263 if (Name.startswith_lower("no")) {
4264 EnableFeature = false;
4265 Name = Name.substr(2);
4268 bool FoundExtension = false;
4269 for (const auto &Extension : ExtensionMap) {
4270 if (Extension.Name != Name)
4273 if (Extension.Features.none())
4274 report_fatal_error("unsupported architectural extension: " + Name);
4276 FeatureBitset ToggleFeatures = EnableFeature
4277 ? (~Features & Extension.Features)
4278 : ( Features & Extension.Features);
4280 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4281 setAvailableFeatures(Features);
4282 FoundExtension = true;
4287 if (!FoundExtension)
4288 Error(CurLoc, "unsupported architectural extension");
4290 CurLoc = incrementLoc(CurLoc, Name.size());
4295 /// parseDirectiveWord
4296 /// ::= .word [ expression (, expression)* ]
4297 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4298 auto parseOp = [&]() -> bool {
4299 const MCExpr *Value;
4300 if (getParser().parseExpression(Value))
4302 getParser().getStreamer().EmitValue(Value, Size, L);
4306 if (parseMany(parseOp))
4311 /// parseDirectiveInst
4312 /// ::= .inst opcode [, ...]
4313 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4314 if (getLexer().is(AsmToken::EndOfStatement))
4315 return Error(Loc, "expected expression following '.inst' directive");
4317 auto parseOp = [&]() -> bool {
4320 if (check(getParser().parseExpression(Expr), L, "expected expression"))
4322 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4323 if (check(!Value, L, "expected constant expression"))
4325 getTargetStreamer().emitInst(Value->getValue());
4329 if (parseMany(parseOp))
4330 return addErrorSuffix(" in '.inst' directive");
4334 // parseDirectiveTLSDescCall:
4335 // ::= .tlsdesccall symbol
4336 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4338 if (check(getParser().parseIdentifier(Name), L,
4339 "expected symbol after directive") ||
4340 parseToken(AsmToken::EndOfStatement))
4343 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4344 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4345 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4348 Inst.setOpcode(AArch64::TLSDESCCALL);
4349 Inst.addOperand(MCOperand::createExpr(Expr));
4351 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4355 /// ::= .loh <lohName | lohId> label1, ..., labelN
4356 /// The number of arguments depends on the loh identifier.
4357 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4359 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4360 if (getParser().getTok().isNot(AsmToken::Integer))
4361 return TokError("expected an identifier or a number in directive");
4362 // We successfully get a numeric value for the identifier.
4363 // Check if it is valid.
4364 int64_t Id = getParser().getTok().getIntVal();
4365 if (Id <= -1U && !isValidMCLOHType(Id))
4366 return TokError("invalid numeric identifier in directive");
4367 Kind = (MCLOHType)Id;
4369 StringRef Name = getTok().getIdentifier();
4370 // We successfully parse an identifier.
4371 // Check if it is a recognized one.
4372 int Id = MCLOHNameToId(Name);
4375 return TokError("invalid identifier in directive");
4376 Kind = (MCLOHType)Id;
4378 // Consume the identifier.
4380 // Get the number of arguments of this LOH.
4381 int NbArgs = MCLOHIdToNbArgs(Kind);
4383 assert(NbArgs != -1 && "Invalid number of arguments");
4385 SmallVector<MCSymbol *, 3> Args;
4386 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4388 if (getParser().parseIdentifier(Name))
4389 return TokError("expected identifier in directive");
4390 Args.push_back(getContext().getOrCreateSymbol(Name));
4392 if (Idx + 1 == NbArgs)
4394 if (parseToken(AsmToken::Comma,
4395 "unexpected token in '" + Twine(IDVal) + "' directive"))
4398 if (parseToken(AsmToken::EndOfStatement,
4399 "unexpected token in '" + Twine(IDVal) + "' directive"))
4402 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4406 /// parseDirectiveLtorg
4407 /// ::= .ltorg | .pool
4408 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4409 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4411 getTargetStreamer().emitCurrentConstantPool();
4415 /// parseDirectiveReq
4416 /// ::= name .req registername
4417 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4418 MCAsmParser &Parser = getParser();
4419 Parser.Lex(); // Eat the '.req' token.
4420 SMLoc SRegLoc = getLoc();
4421 int RegNum = tryParseRegister();
4422 RegKind RegisterKind = RegKind::Scalar;
4426 RegisterKind = RegKind::NeonVector;
4427 RegNum = tryMatchVectorRegister(Kind, false);
4429 return Error(SRegLoc, "vector register without type specifier expected");
4434 RegisterKind = RegKind::SVEDataVector;
4435 OperandMatchResultTy Res =
4436 tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4438 if (Res == MatchOperand_ParseFail)
4441 if (Res == MatchOperand_Success && !Kind.empty())
4442 return Error(SRegLoc,
4443 "sve vector register without type specifier expected");
4448 RegisterKind = RegKind::SVEPredicateVector;
4449 OperandMatchResultTy Res =
4450 tryParseSVERegister(RegNum, Kind, RegKind::SVEPredicateVector);
4452 if (Res == MatchOperand_ParseFail)
4455 if (Res == MatchOperand_Success && !Kind.empty())
4456 return Error(SRegLoc,
4457 "sve predicate register without type specifier expected");
4461 return Error(SRegLoc, "register name or alias expected");
4463 // Shouldn't be anything else.
4464 if (parseToken(AsmToken::EndOfStatement,
4465 "unexpected input in .req directive"))
4468 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
4469 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4470 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4475 /// parseDirectiveUneq
4476 /// ::= .unreq registername
4477 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4478 MCAsmParser &Parser = getParser();
4479 if (getTok().isNot(AsmToken::Identifier))
4480 return TokError("unexpected input in .unreq directive.");
4481 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4482 Parser.Lex(); // Eat the identifier.
4483 if (parseToken(AsmToken::EndOfStatement))
4484 return addErrorSuffix("in '.unreq' directive");
4489 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4490 AArch64MCExpr::VariantKind &ELFRefKind,
4491 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4493 ELFRefKind = AArch64MCExpr::VK_INVALID;
4494 DarwinRefKind = MCSymbolRefExpr::VK_None;
4497 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4498 ELFRefKind = AE->getKind();
4499 Expr = AE->getSubExpr();
4502 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4504 // It's a simple symbol reference with no addend.
4505 DarwinRefKind = SE->getKind();
4509 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4513 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4516 DarwinRefKind = SE->getKind();
4518 if (BE->getOpcode() != MCBinaryExpr::Add &&
4519 BE->getOpcode() != MCBinaryExpr::Sub)
4522 // See if the addend is is a constant, otherwise there's more going
4523 // on here than we can deal with.
4524 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4528 Addend = AddendExpr->getValue();
4529 if (BE->getOpcode() == MCBinaryExpr::Sub)
4532 // It's some symbol reference + a constant addend, but really
4533 // shouldn't use both Darwin and ELF syntax.
4534 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4535 DarwinRefKind == MCSymbolRefExpr::VK_None;
4538 /// Force static initialization.
4539 extern "C" void LLVMInitializeAArch64AsmParser() {
4540 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
4541 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
4542 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
4545 #define GET_REGISTER_MATCHER
4546 #define GET_SUBTARGET_FEATURE_NAME
4547 #define GET_MATCHER_IMPLEMENTATION
4548 #define GET_MNEMONIC_SPELL_CHECKER
4549 #include "AArch64GenAsmMatcher.inc"
4551 // Define this matcher function after the auto-generated include so we
4552 // have the match class enum definitions.
4553 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4555 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4556 // If the kind is a token for a literal immediate, check if our asm
4557 // operand matches. This is for InstAliases which have a fixed-value
4558 // immediate in the syntax.
4559 int64_t ExpectedVal;
4562 return Match_InvalidOperand;
4604 return Match_InvalidOperand;
4605 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4607 return Match_InvalidOperand;
4608 if (CE->getValue() == ExpectedVal)
4609 return Match_Success;
4610 return Match_InvalidOperand;
4613 OperandMatchResultTy
4614 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4618 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4619 Error(S, "expected register");
4620 return MatchOperand_ParseFail;
4623 int FirstReg = tryParseRegister();
4624 if (FirstReg == -1) {
4625 return MatchOperand_ParseFail;
4627 const MCRegisterClass &WRegClass =
4628 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4629 const MCRegisterClass &XRegClass =
4630 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4632 bool isXReg = XRegClass.contains(FirstReg),
4633 isWReg = WRegClass.contains(FirstReg);
4634 if (!isXReg && !isWReg) {
4635 Error(S, "expected first even register of a "
4636 "consecutive same-size even/odd register pair");
4637 return MatchOperand_ParseFail;
4640 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4641 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4643 if (FirstEncoding & 0x1) {
4644 Error(S, "expected first even register of a "
4645 "consecutive same-size even/odd register pair");
4646 return MatchOperand_ParseFail;
4650 if (getParser().getTok().isNot(AsmToken::Comma)) {
4651 Error(M, "expected comma");
4652 return MatchOperand_ParseFail;
4658 int SecondReg = tryParseRegister();
4659 if (SecondReg ==-1) {
4660 return MatchOperand_ParseFail;
4663 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4664 (isXReg && !XRegClass.contains(SecondReg)) ||
4665 (isWReg && !WRegClass.contains(SecondReg))) {
4666 Error(E,"expected second odd register of a "
4667 "consecutive same-size even/odd register pair");
4668 return MatchOperand_ParseFail;
4673 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4674 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4676 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4677 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4680 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
4681 getLoc(), getContext()));
4683 return MatchOperand_Success;
4686 template <bool ParseSuffix>
4687 OperandMatchResultTy
4688 AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
4689 const SMLoc S = getLoc();
4690 // Check for a SVE vector register specifier first.
4694 OperandMatchResultTy Res =
4695 tryParseSVERegister(RegNum, Kind, RegKind::SVEDataVector);
4697 if (Res != MatchOperand_Success)
4700 if (ParseSuffix && Kind.empty())
4701 return MatchOperand_NoMatch;
4703 unsigned ElementWidth = StringSwitch<unsigned>(Kind.lower())
4712 return MatchOperand_NoMatch;
4715 AArch64Operand::CreateReg(RegNum, RegKind::SVEDataVector, ElementWidth,
4716 S, S, getContext()));
4718 return MatchOperand_Success;