1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64MCTargetDesc.h"
13 #include "MCTargetDesc/AArch64TargetStreamer.h"
14 #include "Utils/AArch64BaseInfo.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/StringSwitch.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCInst.h"
28 #include "llvm/MC/MCLinkerOptimizationHint.h"
29 #include "llvm/MC/MCObjectFileInfo.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
33 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCStreamer.h"
37 #include "llvm/MC/MCSubtargetInfo.h"
38 #include "llvm/MC/MCSymbol.h"
39 #include "llvm/MC/MCTargetOptions.h"
40 #include "llvm/MC/SubtargetFeature.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/Compiler.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/SMLoc.h"
46 #include "llvm/Support/TargetParser.h"
47 #include "llvm/Support/TargetRegistry.h"
48 #include "llvm/Support/raw_ostream.h"
62 class AArch64AsmParser : public MCTargetAsmParser {
64 StringRef Mnemonic; ///< Instruction mnemonic.
66 // Map of register aliases registers via the .req directive.
67 StringMap<std::pair<bool, unsigned>> RegisterReqs;
69 AArch64TargetStreamer &getTargetStreamer() {
70 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
71 return static_cast<AArch64TargetStreamer &>(TS);
74 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
76 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
77 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
78 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
79 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
80 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
81 int tryParseRegister();
82 int tryMatchVectorRegister(StringRef &Kind, bool expected);
83 bool parseRegister(OperandVector &Operands);
84 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
85 bool parseVectorList(OperandVector &Operands);
86 bool parseOperand(OperandVector &Operands, bool isCondCode,
89 bool showMatchError(SMLoc Loc, unsigned ErrCode);
91 bool parseDirectiveArch(SMLoc L);
92 bool parseDirectiveCPU(SMLoc L);
93 bool parseDirectiveWord(unsigned Size, SMLoc L);
94 bool parseDirectiveInst(SMLoc L);
96 bool parseDirectiveTLSDescCall(SMLoc L);
98 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
99 bool parseDirectiveLtorg(SMLoc L);
101 bool parseDirectiveReq(StringRef Name, SMLoc L);
102 bool parseDirectiveUnreq(SMLoc L);
104 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
105 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
106 OperandVector &Operands, MCStreamer &Out,
108 bool MatchingInlineAsm) override;
109 /// @name Auto-generated Match Functions
112 #define GET_ASSEMBLER_HEADER
113 #include "AArch64GenAsmMatcher.inc"
117 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
118 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
119 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
120 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
121 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
122 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
123 OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
124 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
125 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
126 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
127 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
128 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
129 bool tryParseVectorRegister(OperandVector &Operands);
130 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
133 enum AArch64MatchResultTy {
134 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
135 #define GET_OPERAND_DIAGNOSTIC_TYPES
136 #include "AArch64GenAsmMatcher.inc"
140 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
141 const MCInstrInfo &MII, const MCTargetOptions &Options)
142 : MCTargetAsmParser(Options, STI) {
143 IsILP32 = Options.getABIName() == "ilp32";
144 MCAsmParserExtension::Initialize(Parser);
145 MCStreamer &S = getParser().getStreamer();
146 if (S.getTargetStreamer() == nullptr)
147 new AArch64TargetStreamer(S);
149 // Initialize the set of available features.
150 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
153 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
154 SMLoc NameLoc, OperandVector &Operands) override;
155 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
156 bool ParseDirective(AsmToken DirectiveID) override;
157 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
158 unsigned Kind) override;
160 static bool classifySymbolRef(const MCExpr *Expr,
161 AArch64MCExpr::VariantKind &ELFRefKind,
162 MCSymbolRefExpr::VariantKind &DarwinRefKind,
166 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
168 class AArch64Operand : public MCParsedAsmOperand {
187 SMLoc StartLoc, EndLoc;
192 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
200 struct VectorListOp {
203 unsigned NumElements;
204 unsigned ElementKind;
207 struct VectorIndexOp {
215 struct ShiftedImmOp {
217 unsigned ShiftAmount;
221 AArch64CC::CondCode Code;
225 unsigned Val; // Encoded 8-bit representation.
231 unsigned Val; // Not the enum since not all values have names.
239 uint32_t PStateField;
258 struct ShiftExtendOp {
259 AArch64_AM::ShiftExtendType Type;
261 bool HasExplicitAmount;
271 struct VectorListOp VectorList;
272 struct VectorIndexOp VectorIndex;
274 struct ShiftedImmOp ShiftedImm;
275 struct CondCodeOp CondCode;
276 struct FPImmOp FPImm;
277 struct BarrierOp Barrier;
278 struct SysRegOp SysReg;
279 struct SysCRImmOp SysCRImm;
280 struct PrefetchOp Prefetch;
281 struct PSBHintOp PSBHint;
282 struct ShiftExtendOp ShiftExtend;
285 // Keep the MCContext around as the MCExprs may need manipulated during
286 // the add<>Operands() calls.
290 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
292 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
294 StartLoc = o.StartLoc;
304 ShiftedImm = o.ShiftedImm;
307 CondCode = o.CondCode;
319 VectorList = o.VectorList;
322 VectorIndex = o.VectorIndex;
328 SysCRImm = o.SysCRImm;
331 Prefetch = o.Prefetch;
337 ShiftExtend = o.ShiftExtend;
342 /// getStartLoc - Get the location of the first token of this operand.
343 SMLoc getStartLoc() const override { return StartLoc; }
344 /// getEndLoc - Get the location of the last token of this operand.
345 SMLoc getEndLoc() const override { return EndLoc; }
347 StringRef getToken() const {
348 assert(Kind == k_Token && "Invalid access!");
349 return StringRef(Tok.Data, Tok.Length);
352 bool isTokenSuffix() const {
353 assert(Kind == k_Token && "Invalid access!");
357 const MCExpr *getImm() const {
358 assert(Kind == k_Immediate && "Invalid access!");
362 const MCExpr *getShiftedImmVal() const {
363 assert(Kind == k_ShiftedImm && "Invalid access!");
364 return ShiftedImm.Val;
367 unsigned getShiftedImmShift() const {
368 assert(Kind == k_ShiftedImm && "Invalid access!");
369 return ShiftedImm.ShiftAmount;
372 AArch64CC::CondCode getCondCode() const {
373 assert(Kind == k_CondCode && "Invalid access!");
374 return CondCode.Code;
377 unsigned getFPImm() const {
378 assert(Kind == k_FPImm && "Invalid access!");
382 unsigned getBarrier() const {
383 assert(Kind == k_Barrier && "Invalid access!");
387 StringRef getBarrierName() const {
388 assert(Kind == k_Barrier && "Invalid access!");
389 return StringRef(Barrier.Data, Barrier.Length);
392 unsigned getReg() const override {
393 assert(Kind == k_Register && "Invalid access!");
397 unsigned getVectorListStart() const {
398 assert(Kind == k_VectorList && "Invalid access!");
399 return VectorList.RegNum;
402 unsigned getVectorListCount() const {
403 assert(Kind == k_VectorList && "Invalid access!");
404 return VectorList.Count;
407 unsigned getVectorIndex() const {
408 assert(Kind == k_VectorIndex && "Invalid access!");
409 return VectorIndex.Val;
412 StringRef getSysReg() const {
413 assert(Kind == k_SysReg && "Invalid access!");
414 return StringRef(SysReg.Data, SysReg.Length);
417 unsigned getSysCR() const {
418 assert(Kind == k_SysCR && "Invalid access!");
422 unsigned getPrefetch() const {
423 assert(Kind == k_Prefetch && "Invalid access!");
427 unsigned getPSBHint() const {
428 assert(Kind == k_PSBHint && "Invalid access!");
432 StringRef getPSBHintName() const {
433 assert(Kind == k_PSBHint && "Invalid access!");
434 return StringRef(PSBHint.Data, PSBHint.Length);
437 StringRef getPrefetchName() const {
438 assert(Kind == k_Prefetch && "Invalid access!");
439 return StringRef(Prefetch.Data, Prefetch.Length);
442 AArch64_AM::ShiftExtendType getShiftExtendType() const {
443 assert(Kind == k_ShiftExtend && "Invalid access!");
444 return ShiftExtend.Type;
447 unsigned getShiftExtendAmount() const {
448 assert(Kind == k_ShiftExtend && "Invalid access!");
449 return ShiftExtend.Amount;
452 bool hasShiftExtendAmount() const {
453 assert(Kind == k_ShiftExtend && "Invalid access!");
454 return ShiftExtend.HasExplicitAmount;
457 bool isImm() const override { return Kind == k_Immediate; }
458 bool isMem() const override { return false; }
459 bool isSImm9() const {
462 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
465 int64_t Val = MCE->getValue();
466 return (Val >= -256 && Val < 256);
468 bool isSImm7s4() const {
471 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
474 int64_t Val = MCE->getValue();
475 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
477 bool isSImm7s8() const {
480 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
483 int64_t Val = MCE->getValue();
484 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
486 bool isSImm7s16() const {
489 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
492 int64_t Val = MCE->getValue();
493 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
496 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
497 AArch64MCExpr::VariantKind ELFRefKind;
498 MCSymbolRefExpr::VariantKind DarwinRefKind;
500 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
502 // If we don't understand the expression, assume the best and
503 // let the fixup and relocation code deal with it.
507 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
508 ELFRefKind == AArch64MCExpr::VK_LO12 ||
509 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
510 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
511 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
512 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
513 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
514 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
515 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
516 // Note that we don't range-check the addend. It's adjusted modulo page
517 // size when converted, so there is no "out of range" condition when using
519 return Addend >= 0 && (Addend % Scale) == 0;
520 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
521 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
522 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
529 template <int Scale> bool isUImm12Offset() const {
533 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
535 return isSymbolicUImm12Offset(getImm(), Scale);
537 int64_t Val = MCE->getValue();
538 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
541 template <int N, int M>
542 bool isImmInRange() const {
545 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
548 int64_t Val = MCE->getValue();
549 return (Val >= N && Val <= M);
552 bool isLogicalImm32() const {
555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 int64_t Val = MCE->getValue();
559 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
562 return AArch64_AM::isLogicalImmediate(Val, 32);
565 bool isLogicalImm64() const {
568 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
571 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
574 bool isLogicalImm32Not() const {
577 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
580 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
581 return AArch64_AM::isLogicalImmediate(Val, 32);
584 bool isLogicalImm64Not() const {
587 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
590 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
593 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
595 bool isAddSubImm() const {
596 if (!isShiftedImm() && !isImm())
601 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
602 if (isShiftedImm()) {
603 unsigned Shift = ShiftedImm.ShiftAmount;
604 Expr = ShiftedImm.Val;
605 if (Shift != 0 && Shift != 12)
611 AArch64MCExpr::VariantKind ELFRefKind;
612 MCSymbolRefExpr::VariantKind DarwinRefKind;
614 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
615 DarwinRefKind, Addend)) {
616 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
617 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
618 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
619 || ELFRefKind == AArch64MCExpr::VK_LO12
620 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
621 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
622 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
623 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
624 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
625 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
626 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
629 // If it's a constant, it should be a real immediate in range:
630 if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
631 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
633 // If it's an expression, we hope for the best and let the fixup/relocation
634 // code deal with it.
638 bool isAddSubImmNeg() const {
639 if (!isShiftedImm() && !isImm())
644 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
645 if (isShiftedImm()) {
646 unsigned Shift = ShiftedImm.ShiftAmount;
647 Expr = ShiftedImm.Val;
648 if (Shift != 0 && Shift != 12)
653 // Otherwise it should be a real negative immediate in range:
654 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
655 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
658 bool isCondCode() const { return Kind == k_CondCode; }
660 bool isSIMDImmType10() const {
663 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
666 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
670 bool isBranchTarget() const {
673 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
676 int64_t Val = MCE->getValue();
679 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
680 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
684 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
688 AArch64MCExpr::VariantKind ELFRefKind;
689 MCSymbolRefExpr::VariantKind DarwinRefKind;
691 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
692 DarwinRefKind, Addend)) {
695 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
698 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
699 if (ELFRefKind == AllowedModifiers[i])
706 bool isMovZSymbolG3() const {
707 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
710 bool isMovZSymbolG2() const {
711 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
712 AArch64MCExpr::VK_TPREL_G2,
713 AArch64MCExpr::VK_DTPREL_G2});
716 bool isMovZSymbolG1() const {
717 return isMovWSymbol({
718 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
719 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
720 AArch64MCExpr::VK_DTPREL_G1,
724 bool isMovZSymbolG0() const {
725 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
726 AArch64MCExpr::VK_TPREL_G0,
727 AArch64MCExpr::VK_DTPREL_G0});
730 bool isMovKSymbolG3() const {
731 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
734 bool isMovKSymbolG2() const {
735 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
738 bool isMovKSymbolG1() const {
739 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
740 AArch64MCExpr::VK_TPREL_G1_NC,
741 AArch64MCExpr::VK_DTPREL_G1_NC});
744 bool isMovKSymbolG0() const {
746 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
747 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
750 template<int RegWidth, int Shift>
751 bool isMOVZMovAlias() const {
752 if (!isImm()) return false;
754 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755 if (!CE) return false;
756 uint64_t Value = CE->getValue();
758 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
761 template<int RegWidth, int Shift>
762 bool isMOVNMovAlias() const {
763 if (!isImm()) return false;
765 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
766 if (!CE) return false;
767 uint64_t Value = CE->getValue();
769 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
772 bool isFPImm() const { return Kind == k_FPImm; }
773 bool isBarrier() const { return Kind == k_Barrier; }
774 bool isSysReg() const { return Kind == k_SysReg; }
776 bool isMRSSystemRegister() const {
777 if (!isSysReg()) return false;
779 return SysReg.MRSReg != -1U;
782 bool isMSRSystemRegister() const {
783 if (!isSysReg()) return false;
784 return SysReg.MSRReg != -1U;
787 bool isSystemPStateFieldWithImm0_1() const {
788 if (!isSysReg()) return false;
789 return (SysReg.PStateField == AArch64PState::PAN ||
790 SysReg.PStateField == AArch64PState::UAO);
793 bool isSystemPStateFieldWithImm0_15() const {
794 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
795 return SysReg.PStateField != -1U;
798 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
799 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
801 bool isVectorRegLo() const {
802 return Kind == k_Register && Reg.isVector &&
803 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
807 bool isGPR32as64() const {
808 return Kind == k_Register && !Reg.isVector &&
809 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
812 bool isWSeqPair() const {
813 return Kind == k_Register && !Reg.isVector &&
814 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
818 bool isXSeqPair() const {
819 return Kind == k_Register && !Reg.isVector &&
820 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
824 bool isGPR64sp0() const {
825 return Kind == k_Register && !Reg.isVector &&
826 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
829 /// Is this a vector list with the type implicit (presumably attached to the
830 /// instruction itself)?
831 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
832 return Kind == k_VectorList && VectorList.Count == NumRegs &&
833 !VectorList.ElementKind;
836 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
837 bool isTypedVectorList() const {
838 if (Kind != k_VectorList)
840 if (VectorList.Count != NumRegs)
842 if (VectorList.ElementKind != ElementKind)
844 return VectorList.NumElements == NumElements;
847 bool isVectorIndex1() const {
848 return Kind == k_VectorIndex && VectorIndex.Val == 1;
851 bool isVectorIndexB() const {
852 return Kind == k_VectorIndex && VectorIndex.Val < 16;
855 bool isVectorIndexH() const {
856 return Kind == k_VectorIndex && VectorIndex.Val < 8;
859 bool isVectorIndexS() const {
860 return Kind == k_VectorIndex && VectorIndex.Val < 4;
863 bool isVectorIndexD() const {
864 return Kind == k_VectorIndex && VectorIndex.Val < 2;
867 bool isToken() const override { return Kind == k_Token; }
869 bool isTokenEqual(StringRef Str) const {
870 return Kind == k_Token && getToken() == Str;
872 bool isSysCR() const { return Kind == k_SysCR; }
873 bool isPrefetch() const { return Kind == k_Prefetch; }
874 bool isPSBHint() const { return Kind == k_PSBHint; }
875 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
876 bool isShifter() const {
877 if (!isShiftExtend())
880 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
881 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
882 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
883 ST == AArch64_AM::MSL);
885 bool isExtend() const {
886 if (!isShiftExtend())
889 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
890 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
891 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
892 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
893 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
894 ET == AArch64_AM::LSL) &&
895 getShiftExtendAmount() <= 4;
898 bool isExtend64() const {
901 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
902 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
903 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
906 bool isExtendLSL64() const {
909 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
910 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
911 ET == AArch64_AM::LSL) &&
912 getShiftExtendAmount() <= 4;
915 template<int Width> bool isMemXExtend() const {
918 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
919 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
920 (getShiftExtendAmount() == Log2_32(Width / 8) ||
921 getShiftExtendAmount() == 0);
924 template<int Width> bool isMemWExtend() const {
927 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
928 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
929 (getShiftExtendAmount() == Log2_32(Width / 8) ||
930 getShiftExtendAmount() == 0);
933 template <unsigned width>
934 bool isArithmeticShifter() const {
938 // An arithmetic shifter is LSL, LSR, or ASR.
939 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
940 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
941 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
944 template <unsigned width>
945 bool isLogicalShifter() const {
949 // A logical shifter is LSL, LSR, ASR or ROR.
950 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
951 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
952 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
953 getShiftExtendAmount() < width;
956 bool isMovImm32Shifter() const {
960 // A MOVi shifter is LSL of 0, 16, 32, or 48.
961 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
962 if (ST != AArch64_AM::LSL)
964 uint64_t Val = getShiftExtendAmount();
965 return (Val == 0 || Val == 16);
968 bool isMovImm64Shifter() const {
972 // A MOVi shifter is LSL of 0 or 16.
973 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
974 if (ST != AArch64_AM::LSL)
976 uint64_t Val = getShiftExtendAmount();
977 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
980 bool isLogicalVecShifter() const {
984 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
985 unsigned Shift = getShiftExtendAmount();
986 return getShiftExtendType() == AArch64_AM::LSL &&
987 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
990 bool isLogicalVecHalfWordShifter() const {
991 if (!isLogicalVecShifter())
994 // A logical vector shifter is a left shift by 0 or 8.
995 unsigned Shift = getShiftExtendAmount();
996 return getShiftExtendType() == AArch64_AM::LSL &&
997 (Shift == 0 || Shift == 8);
1000 bool isMoveVecShifter() const {
1001 if (!isShiftExtend())
1004 // A logical vector shifter is a left shift by 8 or 16.
1005 unsigned Shift = getShiftExtendAmount();
1006 return getShiftExtendType() == AArch64_AM::MSL &&
1007 (Shift == 8 || Shift == 16);
1010 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1011 // to LDUR/STUR when the offset is not legal for the former but is for
1012 // the latter. As such, in addition to checking for being a legal unscaled
1013 // address, also check that it is not a legal scaled address. This avoids
1014 // ambiguity in the matcher.
1016 bool isSImm9OffsetFB() const {
1017 return isSImm9() && !isUImm12Offset<Width / 8>();
1020 bool isAdrpLabel() const {
1021 // Validation was handled during parsing, so we just sanity check that
1022 // something didn't go haywire.
1026 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1027 int64_t Val = CE->getValue();
1028 int64_t Min = - (4096 * (1LL << (21 - 1)));
1029 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1030 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1036 bool isAdrLabel() const {
1037 // Validation was handled during parsing, so we just sanity check that
1038 // something didn't go haywire.
1042 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1043 int64_t Val = CE->getValue();
1044 int64_t Min = - (1LL << (21 - 1));
1045 int64_t Max = ((1LL << (21 - 1)) - 1);
1046 return Val >= Min && Val <= Max;
1052 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1053 // Add as immediates when possible. Null MCExpr = 0.
1055 Inst.addOperand(MCOperand::createImm(0));
1056 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1057 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1059 Inst.addOperand(MCOperand::createExpr(Expr));
1062 void addRegOperands(MCInst &Inst, unsigned N) const {
1063 assert(N == 1 && "Invalid number of operands!");
1064 Inst.addOperand(MCOperand::createReg(getReg()));
1067 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1068 assert(N == 1 && "Invalid number of operands!");
1070 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1072 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1073 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1074 RI->getEncodingValue(getReg()));
1076 Inst.addOperand(MCOperand::createReg(Reg));
1079 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1080 assert(N == 1 && "Invalid number of operands!");
1082 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1083 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1086 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1087 assert(N == 1 && "Invalid number of operands!");
1089 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1090 Inst.addOperand(MCOperand::createReg(getReg()));
1093 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1094 assert(N == 1 && "Invalid number of operands!");
1095 Inst.addOperand(MCOperand::createReg(getReg()));
1098 template <unsigned NumRegs>
1099 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1100 assert(N == 1 && "Invalid number of operands!");
1101 static const unsigned FirstRegs[] = { AArch64::D0,
1104 AArch64::D0_D1_D2_D3 };
1105 unsigned FirstReg = FirstRegs[NumRegs - 1];
1108 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1111 template <unsigned NumRegs>
1112 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1113 assert(N == 1 && "Invalid number of operands!");
1114 static const unsigned FirstRegs[] = { AArch64::Q0,
1117 AArch64::Q0_Q1_Q2_Q3 };
1118 unsigned FirstReg = FirstRegs[NumRegs - 1];
1121 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1124 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1125 assert(N == 1 && "Invalid number of operands!");
1126 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1129 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1130 assert(N == 1 && "Invalid number of operands!");
1131 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1134 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1135 assert(N == 1 && "Invalid number of operands!");
1136 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1139 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1141 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1144 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1146 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1149 void addImmOperands(MCInst &Inst, unsigned N) const {
1150 assert(N == 1 && "Invalid number of operands!");
1151 // If this is a pageoff symrefexpr with an addend, adjust the addend
1152 // to be only the page-offset portion. Otherwise, just add the expr
1154 addExpr(Inst, getImm());
1157 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1158 assert(N == 2 && "Invalid number of operands!");
1159 if (isShiftedImm()) {
1160 addExpr(Inst, getShiftedImmVal());
1161 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1163 addExpr(Inst, getImm());
1164 Inst.addOperand(MCOperand::createImm(0));
1168 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1169 assert(N == 2 && "Invalid number of operands!");
1171 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1172 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1173 int64_t Val = -CE->getValue();
1174 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1176 Inst.addOperand(MCOperand::createImm(Val));
1177 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1180 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1181 assert(N == 1 && "Invalid number of operands!");
1182 Inst.addOperand(MCOperand::createImm(getCondCode()));
1185 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1187 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1189 addExpr(Inst, getImm());
1191 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1194 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1195 addImmOperands(Inst, N);
1199 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1200 assert(N == 1 && "Invalid number of operands!");
1201 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1204 Inst.addOperand(MCOperand::createExpr(getImm()));
1207 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1210 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1213 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1216 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1217 assert(N == 1 && "Invalid number of operands!");
1218 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1219 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1222 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1223 assert(N == 1 && "Invalid number of operands!");
1224 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1225 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1228 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1229 assert(N == 1 && "Invalid number of operands!");
1230 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1231 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1234 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1235 assert(N == 1 && "Invalid number of operands!");
1236 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1237 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1240 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1241 assert(N == 1 && "Invalid number of operands!");
1242 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1243 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1246 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1247 assert(N == 1 && "Invalid number of operands!");
1248 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1249 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1252 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1255 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1258 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1261 assert(MCE && "Invalid constant immediate operand!");
1262 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1265 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1266 assert(N == 1 && "Invalid number of operands!");
1267 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1268 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1271 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1272 assert(N == 1 && "Invalid number of operands!");
1273 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1274 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1277 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1278 assert(N == 1 && "Invalid number of operands!");
1279 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1280 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1283 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1284 assert(N == 1 && "Invalid number of operands!");
1285 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1286 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1289 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1295 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1301 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1307 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1313 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1319 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1325 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1329 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1330 Inst.addOperand(MCOperand::createImm(encoding));
1333 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!");
1335 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1336 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1337 Inst.addOperand(MCOperand::createImm(encoding));
1340 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
1342 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1343 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1344 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1345 Inst.addOperand(MCOperand::createImm(encoding));
1348 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1349 assert(N == 1 && "Invalid number of operands!");
1350 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1352 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1353 Inst.addOperand(MCOperand::createImm(encoding));
1356 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1357 assert(N == 1 && "Invalid number of operands!");
1358 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1359 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1360 Inst.addOperand(MCOperand::createImm(encoding));
1363 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1364 // Branch operands don't encode the low bits, so shift them off
1365 // here. If it's a label, however, just put it on directly as there's
1366 // not enough information now to do anything.
1367 assert(N == 1 && "Invalid number of operands!");
1368 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1370 addExpr(Inst, getImm());
1373 assert(MCE && "Invalid constant immediate operand!");
1374 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1377 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1378 // Branch operands don't encode the low bits, so shift them off
1379 // here. If it's a label, however, just put it on directly as there's
1380 // not enough information now to do anything.
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1384 addExpr(Inst, getImm());
1387 assert(MCE && "Invalid constant immediate operand!");
1388 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1391 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1392 // Branch operands don't encode the low bits, so shift them off
1393 // here. If it's a label, however, just put it on directly as there's
1394 // not enough information now to do anything.
1395 assert(N == 1 && "Invalid number of operands!");
1396 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1398 addExpr(Inst, getImm());
1401 assert(MCE && "Invalid constant immediate operand!");
1402 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1405 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1406 assert(N == 1 && "Invalid number of operands!");
1407 Inst.addOperand(MCOperand::createImm(getFPImm()));
1410 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1411 assert(N == 1 && "Invalid number of operands!");
1412 Inst.addOperand(MCOperand::createImm(getBarrier()));
1415 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1416 assert(N == 1 && "Invalid number of operands!");
1418 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1421 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1422 assert(N == 1 && "Invalid number of operands!");
1424 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1427 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1428 assert(N == 1 && "Invalid number of operands!");
1430 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1433 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1434 assert(N == 1 && "Invalid number of operands!");
1436 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1439 void addSysCROperands(MCInst &Inst, unsigned N) const {
1440 assert(N == 1 && "Invalid number of operands!");
1441 Inst.addOperand(MCOperand::createImm(getSysCR()));
1444 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1446 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1449 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1450 assert(N == 1 && "Invalid number of operands!");
1451 Inst.addOperand(MCOperand::createImm(getPSBHint()));
1454 void addShifterOperands(MCInst &Inst, unsigned N) const {
1455 assert(N == 1 && "Invalid number of operands!");
1457 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1458 Inst.addOperand(MCOperand::createImm(Imm));
1461 void addExtendOperands(MCInst &Inst, unsigned N) const {
1462 assert(N == 1 && "Invalid number of operands!");
1463 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1464 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1465 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1466 Inst.addOperand(MCOperand::createImm(Imm));
1469 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1470 assert(N == 1 && "Invalid number of operands!");
1471 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1472 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1473 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1474 Inst.addOperand(MCOperand::createImm(Imm));
1477 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1478 assert(N == 2 && "Invalid number of operands!");
1479 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1480 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1481 Inst.addOperand(MCOperand::createImm(IsSigned));
1482 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1485 // For 8-bit load/store instructions with a register offset, both the
1486 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1487 // they're disambiguated by whether the shift was explicit or implicit rather
1489 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1490 assert(N == 2 && "Invalid number of operands!");
1491 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1492 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1493 Inst.addOperand(MCOperand::createImm(IsSigned));
1494 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1498 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1499 assert(N == 1 && "Invalid number of operands!");
1501 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1502 uint64_t Value = CE->getValue();
1503 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1507 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1508 assert(N == 1 && "Invalid number of operands!");
1510 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1511 uint64_t Value = CE->getValue();
1512 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1515 void print(raw_ostream &OS) const override;
1517 static std::unique_ptr<AArch64Operand>
1518 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1519 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1520 Op->Tok.Data = Str.data();
1521 Op->Tok.Length = Str.size();
1522 Op->Tok.IsSuffix = IsSuffix;
1528 static std::unique_ptr<AArch64Operand>
1529 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1530 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1531 Op->Reg.RegNum = RegNum;
1532 Op->Reg.isVector = isVector;
1538 static std::unique_ptr<AArch64Operand>
1539 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1540 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1541 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1542 Op->VectorList.RegNum = RegNum;
1543 Op->VectorList.Count = Count;
1544 Op->VectorList.NumElements = NumElements;
1545 Op->VectorList.ElementKind = ElementKind;
1551 static std::unique_ptr<AArch64Operand>
1552 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1553 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1554 Op->VectorIndex.Val = Idx;
1560 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1561 SMLoc E, MCContext &Ctx) {
1562 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1569 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1570 unsigned ShiftAmount,
1573 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1574 Op->ShiftedImm .Val = Val;
1575 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1581 static std::unique_ptr<AArch64Operand>
1582 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1583 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1584 Op->CondCode.Code = Code;
1590 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1592 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1593 Op->FPImm.Val = Val;
1599 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1603 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1604 Op->Barrier.Val = Val;
1605 Op->Barrier.Data = Str.data();
1606 Op->Barrier.Length = Str.size();
1612 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1615 uint32_t PStateField,
1617 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1618 Op->SysReg.Data = Str.data();
1619 Op->SysReg.Length = Str.size();
1620 Op->SysReg.MRSReg = MRSReg;
1621 Op->SysReg.MSRReg = MSRReg;
1622 Op->SysReg.PStateField = PStateField;
1628 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1629 SMLoc E, MCContext &Ctx) {
1630 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1631 Op->SysCRImm.Val = Val;
1637 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1641 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1642 Op->Prefetch.Val = Val;
1643 Op->Barrier.Data = Str.data();
1644 Op->Barrier.Length = Str.size();
1650 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1654 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1655 Op->PSBHint.Val = Val;
1656 Op->PSBHint.Data = Str.data();
1657 Op->PSBHint.Length = Str.size();
1663 static std::unique_ptr<AArch64Operand>
1664 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1665 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1666 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1667 Op->ShiftExtend.Type = ShOp;
1668 Op->ShiftExtend.Amount = Val;
1669 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1676 } // end anonymous namespace.
1678 void AArch64Operand::print(raw_ostream &OS) const {
1681 OS << "<fpimm " << getFPImm() << "("
1682 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1685 StringRef Name = getBarrierName();
1687 OS << "<barrier " << Name << ">";
1689 OS << "<barrier invalid #" << getBarrier() << ">";
1695 case k_ShiftedImm: {
1696 unsigned Shift = getShiftedImmShift();
1697 OS << "<shiftedimm ";
1698 OS << *getShiftedImmVal();
1699 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1703 OS << "<condcode " << getCondCode() << ">";
1706 OS << "<register " << getReg() << ">";
1708 case k_VectorList: {
1709 OS << "<vectorlist ";
1710 unsigned Reg = getVectorListStart();
1711 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1712 OS << Reg + i << " ";
1717 OS << "<vectorindex " << getVectorIndex() << ">";
1720 OS << "<sysreg: " << getSysReg() << '>';
1723 OS << "'" << getToken() << "'";
1726 OS << "c" << getSysCR();
1729 StringRef Name = getPrefetchName();
1731 OS << "<prfop " << Name << ">";
1733 OS << "<prfop invalid #" << getPrefetch() << ">";
1737 OS << getPSBHintName();
1740 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1741 << getShiftExtendAmount();
1742 if (!hasShiftExtendAmount())
1749 /// @name Auto-generated Match Functions
1752 static unsigned MatchRegisterName(StringRef Name);
1756 static unsigned matchVectorRegName(StringRef Name) {
1757 return StringSwitch<unsigned>(Name.lower())
1758 .Case("v0", AArch64::Q0)
1759 .Case("v1", AArch64::Q1)
1760 .Case("v2", AArch64::Q2)
1761 .Case("v3", AArch64::Q3)
1762 .Case("v4", AArch64::Q4)
1763 .Case("v5", AArch64::Q5)
1764 .Case("v6", AArch64::Q6)
1765 .Case("v7", AArch64::Q7)
1766 .Case("v8", AArch64::Q8)
1767 .Case("v9", AArch64::Q9)
1768 .Case("v10", AArch64::Q10)
1769 .Case("v11", AArch64::Q11)
1770 .Case("v12", AArch64::Q12)
1771 .Case("v13", AArch64::Q13)
1772 .Case("v14", AArch64::Q14)
1773 .Case("v15", AArch64::Q15)
1774 .Case("v16", AArch64::Q16)
1775 .Case("v17", AArch64::Q17)
1776 .Case("v18", AArch64::Q18)
1777 .Case("v19", AArch64::Q19)
1778 .Case("v20", AArch64::Q20)
1779 .Case("v21", AArch64::Q21)
1780 .Case("v22", AArch64::Q22)
1781 .Case("v23", AArch64::Q23)
1782 .Case("v24", AArch64::Q24)
1783 .Case("v25", AArch64::Q25)
1784 .Case("v26", AArch64::Q26)
1785 .Case("v27", AArch64::Q27)
1786 .Case("v28", AArch64::Q28)
1787 .Case("v29", AArch64::Q29)
1788 .Case("v30", AArch64::Q30)
1789 .Case("v31", AArch64::Q31)
1793 static bool isValidVectorKind(StringRef Name) {
1794 return StringSwitch<bool>(Name.lower())
1804 // Accept the width neutral ones, too, for verbose syntax. If those
1805 // aren't used in the right places, the token operand won't match so
1806 // all will work out.
1811 // Needed for fp16 scalar pairwise reductions
1816 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1817 char &ElementKind) {
1818 assert(isValidVectorKind(Name));
1820 ElementKind = Name.lower()[Name.size() - 1];
1823 if (Name.size() == 2)
1826 // Parse the lane count
1827 Name = Name.drop_front();
1828 while (isdigit(Name.front())) {
1829 NumElements = 10 * NumElements + (Name.front() - '0');
1830 Name = Name.drop_front();
1834 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1836 StartLoc = getLoc();
1837 RegNo = tryParseRegister();
1838 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1839 return (RegNo == (unsigned)-1);
1842 // Matches a register name or register alias previously defined by '.req'
1843 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1845 unsigned RegNum = isVector ? matchVectorRegName(Name)
1846 : MatchRegisterName(Name);
1849 // Check for aliases registered via .req. Canonicalize to lower case.
1850 // That's more consistent since register names are case insensitive, and
1851 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1852 auto Entry = RegisterReqs.find(Name.lower());
1853 if (Entry == RegisterReqs.end())
1855 // set RegNum if the match is the right kind of register
1856 if (isVector == Entry->getValue().first)
1857 RegNum = Entry->getValue().second;
1862 /// tryParseRegister - Try to parse a register name. The token must be an
1863 /// Identifier when called, and if it is a register name the token is eaten and
1864 /// the register is added to the operand list.
1865 int AArch64AsmParser::tryParseRegister() {
1866 MCAsmParser &Parser = getParser();
1867 const AsmToken &Tok = Parser.getTok();
1868 if (Tok.isNot(AsmToken::Identifier))
1871 std::string lowerCase = Tok.getString().lower();
1872 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1873 // Also handle a few aliases of registers.
1875 RegNum = StringSwitch<unsigned>(lowerCase)
1876 .Case("fp", AArch64::FP)
1877 .Case("lr", AArch64::LR)
1878 .Case("x31", AArch64::XZR)
1879 .Case("w31", AArch64::WZR)
1885 Parser.Lex(); // Eat identifier token.
1889 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1890 /// kind specifier. If it is a register specifier, eat the token and return it.
1891 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1892 MCAsmParser &Parser = getParser();
1893 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1894 TokError("vector register expected");
1898 StringRef Name = Parser.getTok().getString();
1899 // If there is a kind specifier, it's separated from the register name by
1901 size_t Start = 0, Next = Name.find('.');
1902 StringRef Head = Name.slice(Start, Next);
1903 unsigned RegNum = matchRegisterNameAlias(Head, true);
1906 if (Next != StringRef::npos) {
1907 Kind = Name.slice(Next, StringRef::npos);
1908 if (!isValidVectorKind(Kind)) {
1909 TokError("invalid vector kind qualifier");
1913 Parser.Lex(); // Eat the register token.
1918 TokError("vector register expected");
1922 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1923 OperandMatchResultTy
1924 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1925 MCAsmParser &Parser = getParser();
1928 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1929 Error(S, "Expected cN operand where 0 <= N <= 15");
1930 return MatchOperand_ParseFail;
1933 StringRef Tok = Parser.getTok().getIdentifier();
1934 if (Tok[0] != 'c' && Tok[0] != 'C') {
1935 Error(S, "Expected cN operand where 0 <= N <= 15");
1936 return MatchOperand_ParseFail;
1940 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1941 if (BadNum || CRNum > 15) {
1942 Error(S, "Expected cN operand where 0 <= N <= 15");
1943 return MatchOperand_ParseFail;
1946 Parser.Lex(); // Eat identifier token.
1948 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1949 return MatchOperand_Success;
1952 /// tryParsePrefetch - Try to parse a prefetch operand.
1953 OperandMatchResultTy
1954 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1955 MCAsmParser &Parser = getParser();
1957 const AsmToken &Tok = Parser.getTok();
1958 // Either an identifier for named values or a 5-bit immediate.
1959 // Eat optional hash.
1960 if (parseOptionalToken(AsmToken::Hash) ||
1961 Tok.is(AsmToken::Integer)) {
1962 const MCExpr *ImmVal;
1963 if (getParser().parseExpression(ImmVal))
1964 return MatchOperand_ParseFail;
1966 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1968 TokError("immediate value expected for prefetch operand");
1969 return MatchOperand_ParseFail;
1971 unsigned prfop = MCE->getValue();
1973 TokError("prefetch operand out of range, [0,31] expected");
1974 return MatchOperand_ParseFail;
1977 auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
1978 Operands.push_back(AArch64Operand::CreatePrefetch(
1979 prfop, PRFM ? PRFM->Name : "", S, getContext()));
1980 return MatchOperand_Success;
1983 if (Tok.isNot(AsmToken::Identifier)) {
1984 TokError("pre-fetch hint expected");
1985 return MatchOperand_ParseFail;
1988 auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
1990 TokError("pre-fetch hint expected");
1991 return MatchOperand_ParseFail;
1994 Parser.Lex(); // Eat identifier token.
1995 Operands.push_back(AArch64Operand::CreatePrefetch(
1996 PRFM->Encoding, Tok.getString(), S, getContext()));
1997 return MatchOperand_Success;
2000 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2001 OperandMatchResultTy
2002 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2003 MCAsmParser &Parser = getParser();
2005 const AsmToken &Tok = Parser.getTok();
2006 if (Tok.isNot(AsmToken::Identifier)) {
2007 TokError("invalid operand for instruction");
2008 return MatchOperand_ParseFail;
2011 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2013 TokError("invalid operand for instruction");
2014 return MatchOperand_ParseFail;
2017 Parser.Lex(); // Eat identifier token.
2018 Operands.push_back(AArch64Operand::CreatePSBHint(
2019 PSB->Encoding, Tok.getString(), S, getContext()));
2020 return MatchOperand_Success;
2023 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2025 OperandMatchResultTy
2026 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2027 MCAsmParser &Parser = getParser();
2031 if (Parser.getTok().is(AsmToken::Hash)) {
2032 Parser.Lex(); // Eat hash token.
2035 if (parseSymbolicImmVal(Expr))
2036 return MatchOperand_ParseFail;
2038 AArch64MCExpr::VariantKind ELFRefKind;
2039 MCSymbolRefExpr::VariantKind DarwinRefKind;
2041 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2042 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2043 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2044 // No modifier was specified at all; this is the syntax for an ELF basic
2045 // ADRP relocation (unfortunately).
2047 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2048 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2049 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2051 Error(S, "gotpage label reference not allowed an addend");
2052 return MatchOperand_ParseFail;
2053 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2054 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2055 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2056 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2057 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2058 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2059 // The operand must be an @page or @gotpage qualified symbolref.
2060 Error(S, "page or gotpage label reference expected");
2061 return MatchOperand_ParseFail;
2065 // We have either a label reference possibly with addend or an immediate. The
2066 // addend is a raw value here. The linker will adjust it to only reference the
2068 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2069 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2071 return MatchOperand_Success;
2074 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2076 OperandMatchResultTy
2077 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2081 parseOptionalToken(AsmToken::Hash);
2082 if (getParser().parseExpression(Expr))
2083 return MatchOperand_ParseFail;
2085 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2086 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2088 return MatchOperand_Success;
2091 /// tryParseFPImm - A floating point immediate expression operand.
2092 OperandMatchResultTy
2093 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2094 MCAsmParser &Parser = getParser();
2097 bool Hash = parseOptionalToken(AsmToken::Hash);
2099 // Handle negation, as that still comes through as a separate token.
2100 bool isNegative = parseOptionalToken(AsmToken::Minus);
2102 const AsmToken &Tok = Parser.getTok();
2103 if (Tok.is(AsmToken::Real) || Tok.is(AsmToken::Integer)) {
2105 if (Tok.is(AsmToken::Integer) && !isNegative && Tok.getString().startswith("0x")) {
2106 Val = Tok.getIntVal();
2107 if (Val > 255 || Val < 0) {
2108 TokError("encoded floating point value out of range");
2109 return MatchOperand_ParseFail;
2112 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2114 RealVal.changeSign();
2116 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2117 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2119 // Check for out of range values. As an exception we let Zero through,
2120 // but as tokens instead of an FPImm so that it can be matched by the
2121 // appropriate alias if one exists.
2122 if (RealVal.isPosZero()) {
2123 Parser.Lex(); // Eat the token.
2124 Operands.push_back(AArch64Operand::CreateToken("#0", false, S, getContext()));
2125 Operands.push_back(AArch64Operand::CreateToken(".0", false, S, getContext()));
2126 return MatchOperand_Success;
2127 } else if (Val == -1) {
2128 TokError("expected compatible register or floating-point constant");
2129 return MatchOperand_ParseFail;
2132 Parser.Lex(); // Eat the token.
2133 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2134 return MatchOperand_Success;
2138 return MatchOperand_NoMatch;
2140 TokError("invalid floating point immediate");
2141 return MatchOperand_ParseFail;
2144 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2145 OperandMatchResultTy
2146 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2147 MCAsmParser &Parser = getParser();
2150 if (Parser.getTok().is(AsmToken::Hash))
2151 Parser.Lex(); // Eat '#'
2152 else if (Parser.getTok().isNot(AsmToken::Integer))
2153 // Operand should start from # or should be integer, emit error otherwise.
2154 return MatchOperand_NoMatch;
2157 if (parseSymbolicImmVal(Imm))
2158 return MatchOperand_ParseFail;
2159 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2160 uint64_t ShiftAmount = 0;
2161 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2163 int64_t Val = MCE->getValue();
2164 if (Val > 0xfff && (Val & 0xfff) == 0) {
2165 Imm = MCConstantExpr::create(Val >> 12, getContext());
2169 SMLoc E = Parser.getTok().getLoc();
2170 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2172 return MatchOperand_Success;
2178 // The optional operand must be "lsl #N" where N is non-negative.
2179 if (!Parser.getTok().is(AsmToken::Identifier) ||
2180 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2181 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2182 return MatchOperand_ParseFail;
2188 parseOptionalToken(AsmToken::Hash);
2190 if (Parser.getTok().isNot(AsmToken::Integer)) {
2191 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2192 return MatchOperand_ParseFail;
2195 int64_t ShiftAmount = Parser.getTok().getIntVal();
2197 if (ShiftAmount < 0) {
2198 Error(Parser.getTok().getLoc(), "positive shift amount required");
2199 return MatchOperand_ParseFail;
2201 Parser.Lex(); // Eat the number
2203 SMLoc E = Parser.getTok().getLoc();
2204 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2205 S, E, getContext()));
2206 return MatchOperand_Success;
2209 /// parseCondCodeString - Parse a Condition Code string.
2210 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2211 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2212 .Case("eq", AArch64CC::EQ)
2213 .Case("ne", AArch64CC::NE)
2214 .Case("cs", AArch64CC::HS)
2215 .Case("hs", AArch64CC::HS)
2216 .Case("cc", AArch64CC::LO)
2217 .Case("lo", AArch64CC::LO)
2218 .Case("mi", AArch64CC::MI)
2219 .Case("pl", AArch64CC::PL)
2220 .Case("vs", AArch64CC::VS)
2221 .Case("vc", AArch64CC::VC)
2222 .Case("hi", AArch64CC::HI)
2223 .Case("ls", AArch64CC::LS)
2224 .Case("ge", AArch64CC::GE)
2225 .Case("lt", AArch64CC::LT)
2226 .Case("gt", AArch64CC::GT)
2227 .Case("le", AArch64CC::LE)
2228 .Case("al", AArch64CC::AL)
2229 .Case("nv", AArch64CC::NV)
2230 .Default(AArch64CC::Invalid);
2234 /// parseCondCode - Parse a Condition Code operand.
2235 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2236 bool invertCondCode) {
2237 MCAsmParser &Parser = getParser();
2239 const AsmToken &Tok = Parser.getTok();
2240 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2242 StringRef Cond = Tok.getString();
2243 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2244 if (CC == AArch64CC::Invalid)
2245 return TokError("invalid condition code");
2246 Parser.Lex(); // Eat identifier token.
2248 if (invertCondCode) {
2249 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2250 return TokError("condition codes AL and NV are invalid for this instruction");
2251 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2255 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2259 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2260 /// them if present.
2261 OperandMatchResultTy
2262 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2263 MCAsmParser &Parser = getParser();
2264 const AsmToken &Tok = Parser.getTok();
2265 std::string LowerID = Tok.getString().lower();
2266 AArch64_AM::ShiftExtendType ShOp =
2267 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2268 .Case("lsl", AArch64_AM::LSL)
2269 .Case("lsr", AArch64_AM::LSR)
2270 .Case("asr", AArch64_AM::ASR)
2271 .Case("ror", AArch64_AM::ROR)
2272 .Case("msl", AArch64_AM::MSL)
2273 .Case("uxtb", AArch64_AM::UXTB)
2274 .Case("uxth", AArch64_AM::UXTH)
2275 .Case("uxtw", AArch64_AM::UXTW)
2276 .Case("uxtx", AArch64_AM::UXTX)
2277 .Case("sxtb", AArch64_AM::SXTB)
2278 .Case("sxth", AArch64_AM::SXTH)
2279 .Case("sxtw", AArch64_AM::SXTW)
2280 .Case("sxtx", AArch64_AM::SXTX)
2281 .Default(AArch64_AM::InvalidShiftExtend);
2283 if (ShOp == AArch64_AM::InvalidShiftExtend)
2284 return MatchOperand_NoMatch;
2286 SMLoc S = Tok.getLoc();
2289 bool Hash = parseOptionalToken(AsmToken::Hash);
2291 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2292 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2293 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2294 ShOp == AArch64_AM::MSL) {
2295 // We expect a number here.
2296 TokError("expected #imm after shift specifier");
2297 return MatchOperand_ParseFail;
2300 // "extend" type operations don't need an immediate, #0 is implicit.
2301 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2303 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2304 return MatchOperand_Success;
2307 // Make sure we do actually have a number, identifier or a parenthesized
2309 SMLoc E = Parser.getTok().getLoc();
2310 if (!Parser.getTok().is(AsmToken::Integer) &&
2311 !Parser.getTok().is(AsmToken::LParen) &&
2312 !Parser.getTok().is(AsmToken::Identifier)) {
2313 Error(E, "expected integer shift amount");
2314 return MatchOperand_ParseFail;
2317 const MCExpr *ImmVal;
2318 if (getParser().parseExpression(ImmVal))
2319 return MatchOperand_ParseFail;
2321 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2323 Error(E, "expected constant '#imm' after shift specifier");
2324 return MatchOperand_ParseFail;
2327 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2328 Operands.push_back(AArch64Operand::CreateShiftExtend(
2329 ShOp, MCE->getValue(), true, S, E, getContext()));
2330 return MatchOperand_Success;
2333 static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
2334 if (FBS[AArch64::HasV8_1aOps])
2336 else if (FBS[AArch64::HasV8_2aOps])
2342 void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
2344 const uint16_t Op2 = Encoding & 7;
2345 const uint16_t Cm = (Encoding & 0x78) >> 3;
2346 const uint16_t Cn = (Encoding & 0x780) >> 7;
2347 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2349 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
2352 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2354 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2356 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2357 Expr = MCConstantExpr::create(Op2, getContext());
2359 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2362 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2363 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2364 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2365 OperandVector &Operands) {
2366 if (Name.find('.') != StringRef::npos)
2367 return TokError("invalid operand");
2371 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2373 MCAsmParser &Parser = getParser();
2374 const AsmToken &Tok = Parser.getTok();
2375 StringRef Op = Tok.getString();
2376 SMLoc S = Tok.getLoc();
2378 if (Mnemonic == "ic") {
2379 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
2381 return TokError("invalid operand for IC instruction");
2382 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
2383 std::string Str("IC " + std::string(IC->Name) + " requires ");
2384 setRequiredFeatureString(IC->getRequiredFeatures(), Str);
2385 return TokError(Str.c_str());
2387 createSysAlias(IC->Encoding, Operands, S);
2388 } else if (Mnemonic == "dc") {
2389 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
2391 return TokError("invalid operand for DC instruction");
2392 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
2393 std::string Str("DC " + std::string(DC->Name) + " requires ");
2394 setRequiredFeatureString(DC->getRequiredFeatures(), Str);
2395 return TokError(Str.c_str());
2397 createSysAlias(DC->Encoding, Operands, S);
2398 } else if (Mnemonic == "at") {
2399 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
2401 return TokError("invalid operand for AT instruction");
2402 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
2403 std::string Str("AT " + std::string(AT->Name) + " requires ");
2404 setRequiredFeatureString(AT->getRequiredFeatures(), Str);
2405 return TokError(Str.c_str());
2407 createSysAlias(AT->Encoding, Operands, S);
2408 } else if (Mnemonic == "tlbi") {
2409 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
2411 return TokError("invalid operand for TLBI instruction");
2412 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
2413 std::string Str("TLBI " + std::string(TLBI->Name) + " requires ");
2414 setRequiredFeatureString(TLBI->getRequiredFeatures(), Str);
2415 return TokError(Str.c_str());
2417 createSysAlias(TLBI->Encoding, Operands, S);
2420 Parser.Lex(); // Eat operand.
2422 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2423 bool HasRegister = false;
2425 // Check for the optional register operand.
2426 if (parseOptionalToken(AsmToken::Comma)) {
2427 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2428 return TokError("expected register operand");
2432 if (ExpectRegister && !HasRegister)
2433 return TokError("specified " + Mnemonic + " op requires a register");
2434 else if (!ExpectRegister && HasRegister)
2435 return TokError("specified " + Mnemonic + " op does not use a register");
2437 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2443 OperandMatchResultTy
2444 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2445 MCAsmParser &Parser = getParser();
2446 const AsmToken &Tok = Parser.getTok();
2448 // Can be either a #imm style literal or an option name
2449 if (parseOptionalToken(AsmToken::Hash) ||
2450 Tok.is(AsmToken::Integer)) {
2451 // Immediate operand.
2452 const MCExpr *ImmVal;
2453 SMLoc ExprLoc = getLoc();
2454 if (getParser().parseExpression(ImmVal))
2455 return MatchOperand_ParseFail;
2456 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2458 Error(ExprLoc, "immediate value expected for barrier operand");
2459 return MatchOperand_ParseFail;
2461 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2462 Error(ExprLoc, "barrier operand out of range");
2463 return MatchOperand_ParseFail;
2465 auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2466 Operands.push_back(AArch64Operand::CreateBarrier(
2467 MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2468 return MatchOperand_Success;
2471 if (Tok.isNot(AsmToken::Identifier)) {
2472 TokError("invalid operand for instruction");
2473 return MatchOperand_ParseFail;
2476 auto DB = AArch64DB::lookupDBByName(Tok.getString());
2478 TokError("invalid barrier option name");
2479 return MatchOperand_ParseFail;
2482 // The only valid named option for ISB is 'sy'
2483 if (Mnemonic == "isb" && DB->Encoding != AArch64DB::sy) {
2484 TokError("'sy' or #imm operand expected");
2485 return MatchOperand_ParseFail;
2488 Operands.push_back(AArch64Operand::CreateBarrier(
2489 DB->Encoding, Tok.getString(), getLoc(), getContext()));
2490 Parser.Lex(); // Consume the option
2492 return MatchOperand_Success;
2495 OperandMatchResultTy
2496 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2497 MCAsmParser &Parser = getParser();
2498 const AsmToken &Tok = Parser.getTok();
2500 if (Tok.isNot(AsmToken::Identifier))
2501 return MatchOperand_NoMatch;
2504 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2505 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2506 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2507 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2509 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2511 auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2512 unsigned PStateImm = -1;
2513 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2514 PStateImm = PState->Encoding;
2517 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2518 PStateImm, getContext()));
2519 Parser.Lex(); // Eat identifier
2521 return MatchOperand_Success;
2524 /// tryParseVectorRegister - Parse a vector register operand.
2525 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2526 MCAsmParser &Parser = getParser();
2527 if (Parser.getTok().isNot(AsmToken::Identifier))
2531 // Check for a vector register specifier first.
2533 int64_t Reg = tryMatchVectorRegister(Kind, false);
2537 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2538 // If there was an explicit qualifier, that goes on as a literal text
2542 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2544 // If there is an index specifier following the register, parse that too.
2545 SMLoc SIdx = getLoc();
2546 if (parseOptionalToken(AsmToken::LBrac)) {
2547 const MCExpr *ImmVal;
2548 if (getParser().parseExpression(ImmVal))
2550 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2552 TokError("immediate value expected for vector index");
2558 if (parseToken(AsmToken::RBrac, "']' expected"))
2561 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2568 /// parseRegister - Parse a non-vector register operand.
2569 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2571 // Try for a vector register.
2572 if (!tryParseVectorRegister(Operands))
2575 // Try for a scalar register.
2576 int64_t Reg = tryParseRegister();
2580 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2585 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2586 MCAsmParser &Parser = getParser();
2587 bool HasELFModifier = false;
2588 AArch64MCExpr::VariantKind RefKind;
2590 if (parseOptionalToken(AsmToken::Colon)) {
2591 HasELFModifier = true;
2593 if (Parser.getTok().isNot(AsmToken::Identifier))
2594 return TokError("expect relocation specifier in operand after ':'");
2596 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2597 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2598 .Case("lo12", AArch64MCExpr::VK_LO12)
2599 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2600 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2601 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2602 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2603 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2604 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2605 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2606 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2607 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2608 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2609 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2610 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2611 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2612 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2613 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2614 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2615 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2616 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2617 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2618 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2619 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2620 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2621 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2622 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2623 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2624 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2625 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2626 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2627 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2628 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2629 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2630 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2631 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2632 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2633 .Default(AArch64MCExpr::VK_INVALID);
2635 if (RefKind == AArch64MCExpr::VK_INVALID)
2636 return TokError("expect relocation specifier in operand after ':'");
2638 Parser.Lex(); // Eat identifier
2640 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
2644 if (getParser().parseExpression(ImmVal))
2648 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2653 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2654 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2655 MCAsmParser &Parser = getParser();
2656 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2658 Parser.Lex(); // Eat left bracket token.
2660 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2663 int64_t PrevReg = FirstReg;
2666 if (parseOptionalToken(AsmToken::Minus)) {
2667 SMLoc Loc = getLoc();
2669 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2672 // Any Kind suffices must match on all regs in the list.
2673 if (Kind != NextKind)
2674 return Error(Loc, "mismatched register size suffix");
2676 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2678 if (Space == 0 || Space > 3) {
2679 return Error(Loc, "invalid number of vectors");
2685 while (parseOptionalToken(AsmToken::Comma)) {
2686 SMLoc Loc = getLoc();
2688 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2691 // Any Kind suffices must match on all regs in the list.
2692 if (Kind != NextKind)
2693 return Error(Loc, "mismatched register size suffix");
2695 // Registers must be incremental (with wraparound at 31)
2696 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2697 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2698 return Error(Loc, "registers must be sequential");
2705 if (parseToken(AsmToken::RCurly, "'}' expected"))
2709 return Error(S, "invalid number of vectors");
2711 unsigned NumElements = 0;
2712 char ElementKind = 0;
2714 parseValidVectorKind(Kind, NumElements, ElementKind);
2716 Operands.push_back(AArch64Operand::CreateVectorList(
2717 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2719 // If there is an index specifier following the list, parse that too.
2720 SMLoc SIdx = getLoc();
2721 if (parseOptionalToken(AsmToken::LBrac)) { // Eat left bracket token.
2722 const MCExpr *ImmVal;
2723 if (getParser().parseExpression(ImmVal))
2725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2727 TokError("immediate value expected for vector index");
2732 if (parseToken(AsmToken::RBrac, "']' expected"))
2735 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2741 OperandMatchResultTy
2742 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2743 MCAsmParser &Parser = getParser();
2744 const AsmToken &Tok = Parser.getTok();
2745 if (!Tok.is(AsmToken::Identifier))
2746 return MatchOperand_NoMatch;
2748 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2750 MCContext &Ctx = getContext();
2751 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2752 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2753 return MatchOperand_NoMatch;
2756 Parser.Lex(); // Eat register
2758 if (!parseOptionalToken(AsmToken::Comma)) {
2760 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2761 return MatchOperand_Success;
2764 parseOptionalToken(AsmToken::Hash);
2766 if (Parser.getTok().isNot(AsmToken::Integer)) {
2767 Error(getLoc(), "index must be absent or #0");
2768 return MatchOperand_ParseFail;
2771 const MCExpr *ImmVal;
2772 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2773 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2774 Error(getLoc(), "index must be absent or #0");
2775 return MatchOperand_ParseFail;
2779 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2780 return MatchOperand_Success;
2783 /// parseOperand - Parse a arm instruction operand. For now this parses the
2784 /// operand regardless of the mnemonic.
2785 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2786 bool invertCondCode) {
2787 MCAsmParser &Parser = getParser();
2788 // Check if the current operand has a custom associated parser, if so, try to
2789 // custom parse the operand, or fallback to the general approach.
2790 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2791 if (ResTy == MatchOperand_Success)
2793 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2794 // there was a match, but an error occurred, in which case, just return that
2795 // the operand parsing failed.
2796 if (ResTy == MatchOperand_ParseFail)
2799 // Nothing custom, so do general case parsing.
2801 switch (getLexer().getKind()) {
2805 if (parseSymbolicImmVal(Expr))
2806 return Error(S, "invalid operand");
2808 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2809 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2812 case AsmToken::LBrac: {
2813 SMLoc Loc = Parser.getTok().getLoc();
2814 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2816 Parser.Lex(); // Eat '['
2818 // There's no comma after a '[', so we can parse the next operand
2820 return parseOperand(Operands, false, false);
2822 case AsmToken::LCurly:
2823 return parseVectorList(Operands);
2824 case AsmToken::Identifier: {
2825 // If we're expecting a Condition Code operand, then just parse that.
2827 return parseCondCode(Operands, invertCondCode);
2829 // If it's a register name, parse it.
2830 if (!parseRegister(Operands))
2833 // This could be an optional "shift" or "extend" operand.
2834 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2835 // We can only continue if no tokens were eaten.
2836 if (GotShift != MatchOperand_NoMatch)
2839 // This was not a register so parse other operands that start with an
2840 // identifier (like labels) as expressions and create them as immediates.
2841 const MCExpr *IdVal;
2843 if (getParser().parseExpression(IdVal))
2845 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2846 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
2849 case AsmToken::Integer:
2850 case AsmToken::Real:
2851 case AsmToken::Hash: {
2852 // #42 -> immediate.
2855 parseOptionalToken(AsmToken::Hash);
2857 // Parse a negative sign
2858 bool isNegative = false;
2859 if (Parser.getTok().is(AsmToken::Minus)) {
2861 // We need to consume this token only when we have a Real, otherwise
2862 // we let parseSymbolicImmVal take care of it
2863 if (Parser.getLexer().peekTok().is(AsmToken::Real))
2867 // The only Real that should come through here is a literal #0.0 for
2868 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
2869 // so convert the value.
2870 const AsmToken &Tok = Parser.getTok();
2871 if (Tok.is(AsmToken::Real)) {
2872 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2873 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2874 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
2875 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
2876 Mnemonic != "fcmlt")
2877 return TokError("unexpected floating point literal");
2878 else if (IntVal != 0 || isNegative)
2879 return TokError("expected floating-point constant #0.0");
2880 Parser.Lex(); // Eat the token.
2883 AArch64Operand::CreateToken("#0", false, S, getContext()));
2885 AArch64Operand::CreateToken(".0", false, S, getContext()));
2889 const MCExpr *ImmVal;
2890 if (parseSymbolicImmVal(ImmVal))
2893 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2894 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
2897 case AsmToken::Equal: {
2898 SMLoc Loc = getLoc();
2899 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
2900 return TokError("unexpected token in operand");
2901 Parser.Lex(); // Eat '='
2902 const MCExpr *SubExprVal;
2903 if (getParser().parseExpression(SubExprVal))
2906 if (Operands.size() < 2 ||
2907 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
2908 return Error(Loc, "Only valid when first operand is register");
2911 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
2912 Operands[1]->getReg());
2914 MCContext& Ctx = getContext();
2915 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
2916 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
2917 if (isa<MCConstantExpr>(SubExprVal)) {
2918 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
2919 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
2920 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
2924 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
2925 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
2926 Operands.push_back(AArch64Operand::CreateImm(
2927 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
2929 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
2930 ShiftAmt, true, S, E, Ctx));
2933 APInt Simm = APInt(64, Imm << ShiftAmt);
2934 // check if the immediate is an unsigned or signed 32-bit int for W regs
2935 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
2936 return Error(Loc, "Immediate too large for register");
2938 // If it is a label or an imm that cannot fit in a movz, put it into CP.
2939 const MCExpr *CPLoc =
2940 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
2941 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
2947 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
2949 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
2950 StringRef Name, SMLoc NameLoc,
2951 OperandVector &Operands) {
2952 MCAsmParser &Parser = getParser();
2953 Name = StringSwitch<StringRef>(Name.lower())
2954 .Case("beq", "b.eq")
2955 .Case("bne", "b.ne")
2956 .Case("bhs", "b.hs")
2957 .Case("bcs", "b.cs")
2958 .Case("blo", "b.lo")
2959 .Case("bcc", "b.cc")
2960 .Case("bmi", "b.mi")
2961 .Case("bpl", "b.pl")
2962 .Case("bvs", "b.vs")
2963 .Case("bvc", "b.vc")
2964 .Case("bhi", "b.hi")
2965 .Case("bls", "b.ls")
2966 .Case("bge", "b.ge")
2967 .Case("blt", "b.lt")
2968 .Case("bgt", "b.gt")
2969 .Case("ble", "b.le")
2970 .Case("bal", "b.al")
2971 .Case("bnv", "b.nv")
2974 // First check for the AArch64-specific .req directive.
2975 if (Parser.getTok().is(AsmToken::Identifier) &&
2976 Parser.getTok().getIdentifier() == ".req") {
2977 parseDirectiveReq(Name, NameLoc);
2978 // We always return 'error' for this, as we're done with this
2979 // statement and don't need to match the 'instruction."
2983 // Create the leading tokens for the mnemonic, split by '.' characters.
2984 size_t Start = 0, Next = Name.find('.');
2985 StringRef Head = Name.slice(Start, Next);
2987 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
2988 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
2989 return parseSysAlias(Head, NameLoc, Operands);
2992 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
2995 // Handle condition codes for a branch mnemonic
2996 if (Head == "b" && Next != StringRef::npos) {
2998 Next = Name.find('.', Start + 1);
2999 Head = Name.slice(Start + 1, Next);
3001 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3002 (Head.data() - Name.data()));
3003 AArch64CC::CondCode CC = parseCondCodeString(Head);
3004 if (CC == AArch64CC::Invalid)
3005 return Error(SuffixLoc, "invalid condition code");
3007 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3009 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3012 // Add the remaining tokens in the mnemonic.
3013 while (Next != StringRef::npos) {
3015 Next = Name.find('.', Start + 1);
3016 Head = Name.slice(Start, Next);
3017 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3018 (Head.data() - Name.data()) + 1);
3020 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3023 // Conditional compare instructions have a Condition Code operand, which needs
3024 // to be parsed and an immediate operand created.
3025 bool condCodeFourthOperand =
3026 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3027 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3028 Head == "csinc" || Head == "csinv" || Head == "csneg");
3030 // These instructions are aliases to some of the conditional select
3031 // instructions. However, the condition code is inverted in the aliased
3034 // FIXME: Is this the correct way to handle these? Or should the parser
3035 // generate the aliased instructions directly?
3036 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3037 bool condCodeThirdOperand =
3038 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3040 // Read the remaining operands.
3041 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3042 // Read the first operand.
3043 if (parseOperand(Operands, false, false)) {
3048 while (parseOptionalToken(AsmToken::Comma)) {
3049 // Parse and remember the operand.
3050 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3051 (N == 3 && condCodeThirdOperand) ||
3052 (N == 2 && condCodeSecondOperand),
3053 condCodeSecondOperand || condCodeThirdOperand)) {
3057 // After successfully parsing some operands there are two special cases to
3058 // consider (i.e. notional operands not separated by commas). Both are due
3059 // to memory specifiers:
3060 // + An RBrac will end an address for load/store/prefetch
3061 // + An '!' will indicate a pre-indexed operation.
3063 // It's someone else's responsibility to make sure these tokens are sane
3064 // in the given context!
3066 SMLoc RLoc = Parser.getTok().getLoc();
3067 if (parseOptionalToken(AsmToken::RBrac))
3069 AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3070 SMLoc ELoc = Parser.getTok().getLoc();
3071 if (parseOptionalToken(AsmToken::Exclaim))
3073 AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3079 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3085 // FIXME: This entire function is a giant hack to provide us with decent
3086 // operand range validation/diagnostics until TableGen/MC can be extended
3087 // to support autogeneration of this kind of validation.
3088 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3089 SmallVectorImpl<SMLoc> &Loc) {
3090 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3091 // Check for indexed addressing modes w/ the base register being the
3092 // same as a destination/source register or pair load where
3093 // the Rt == Rt2. All of those are undefined behaviour.
3094 switch (Inst.getOpcode()) {
3095 case AArch64::LDPSWpre:
3096 case AArch64::LDPWpost:
3097 case AArch64::LDPWpre:
3098 case AArch64::LDPXpost:
3099 case AArch64::LDPXpre: {
3100 unsigned Rt = Inst.getOperand(1).getReg();
3101 unsigned Rt2 = Inst.getOperand(2).getReg();
3102 unsigned Rn = Inst.getOperand(3).getReg();
3103 if (RI->isSubRegisterEq(Rn, Rt))
3104 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3105 "is also a destination");
3106 if (RI->isSubRegisterEq(Rn, Rt2))
3107 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3108 "is also a destination");
3111 case AArch64::LDPDi:
3112 case AArch64::LDPQi:
3113 case AArch64::LDPSi:
3114 case AArch64::LDPSWi:
3115 case AArch64::LDPWi:
3116 case AArch64::LDPXi: {
3117 unsigned Rt = Inst.getOperand(0).getReg();
3118 unsigned Rt2 = Inst.getOperand(1).getReg();
3120 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3123 case AArch64::LDPDpost:
3124 case AArch64::LDPDpre:
3125 case AArch64::LDPQpost:
3126 case AArch64::LDPQpre:
3127 case AArch64::LDPSpost:
3128 case AArch64::LDPSpre:
3129 case AArch64::LDPSWpost: {
3130 unsigned Rt = Inst.getOperand(1).getReg();
3131 unsigned Rt2 = Inst.getOperand(2).getReg();
3133 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3136 case AArch64::STPDpost:
3137 case AArch64::STPDpre:
3138 case AArch64::STPQpost:
3139 case AArch64::STPQpre:
3140 case AArch64::STPSpost:
3141 case AArch64::STPSpre:
3142 case AArch64::STPWpost:
3143 case AArch64::STPWpre:
3144 case AArch64::STPXpost:
3145 case AArch64::STPXpre: {
3146 unsigned Rt = Inst.getOperand(1).getReg();
3147 unsigned Rt2 = Inst.getOperand(2).getReg();
3148 unsigned Rn = Inst.getOperand(3).getReg();
3149 if (RI->isSubRegisterEq(Rn, Rt))
3150 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3151 "is also a source");
3152 if (RI->isSubRegisterEq(Rn, Rt2))
3153 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3154 "is also a source");
3157 case AArch64::LDRBBpre:
3158 case AArch64::LDRBpre:
3159 case AArch64::LDRHHpre:
3160 case AArch64::LDRHpre:
3161 case AArch64::LDRSBWpre:
3162 case AArch64::LDRSBXpre:
3163 case AArch64::LDRSHWpre:
3164 case AArch64::LDRSHXpre:
3165 case AArch64::LDRSWpre:
3166 case AArch64::LDRWpre:
3167 case AArch64::LDRXpre:
3168 case AArch64::LDRBBpost:
3169 case AArch64::LDRBpost:
3170 case AArch64::LDRHHpost:
3171 case AArch64::LDRHpost:
3172 case AArch64::LDRSBWpost:
3173 case AArch64::LDRSBXpost:
3174 case AArch64::LDRSHWpost:
3175 case AArch64::LDRSHXpost:
3176 case AArch64::LDRSWpost:
3177 case AArch64::LDRWpost:
3178 case AArch64::LDRXpost: {
3179 unsigned Rt = Inst.getOperand(1).getReg();
3180 unsigned Rn = Inst.getOperand(2).getReg();
3181 if (RI->isSubRegisterEq(Rn, Rt))
3182 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3183 "is also a source");
3186 case AArch64::STRBBpost:
3187 case AArch64::STRBpost:
3188 case AArch64::STRHHpost:
3189 case AArch64::STRHpost:
3190 case AArch64::STRWpost:
3191 case AArch64::STRXpost:
3192 case AArch64::STRBBpre:
3193 case AArch64::STRBpre:
3194 case AArch64::STRHHpre:
3195 case AArch64::STRHpre:
3196 case AArch64::STRWpre:
3197 case AArch64::STRXpre: {
3198 unsigned Rt = Inst.getOperand(1).getReg();
3199 unsigned Rn = Inst.getOperand(2).getReg();
3200 if (RI->isSubRegisterEq(Rn, Rt))
3201 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3202 "is also a source");
3207 // Now check immediate ranges. Separate from the above as there is overlap
3208 // in the instructions being checked and this keeps the nested conditionals
3210 switch (Inst.getOpcode()) {
3211 case AArch64::ADDSWri:
3212 case AArch64::ADDSXri:
3213 case AArch64::ADDWri:
3214 case AArch64::ADDXri:
3215 case AArch64::SUBSWri:
3216 case AArch64::SUBSXri:
3217 case AArch64::SUBWri:
3218 case AArch64::SUBXri: {
3219 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3220 // some slight duplication here.
3221 if (Inst.getOperand(2).isExpr()) {
3222 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3223 AArch64MCExpr::VariantKind ELFRefKind;
3224 MCSymbolRefExpr::VariantKind DarwinRefKind;
3226 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3228 // Only allow these with ADDXri.
3229 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3230 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3231 Inst.getOpcode() == AArch64::ADDXri)
3234 // Only allow these with ADDXri/ADDWri
3235 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3236 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3237 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3238 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3239 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3240 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3241 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3242 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3243 (Inst.getOpcode() == AArch64::ADDXri ||
3244 Inst.getOpcode() == AArch64::ADDWri))
3247 // Don't allow symbol refs in the immediate field otherwise
3248 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3249 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3250 // 'cmp w0, 'borked')
3251 return Error(Loc.back(), "invalid immediate expression");
3253 // We don't validate more complex expressions here
3262 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3264 case Match_MissingFeature:
3266 "instruction requires a CPU feature not currently enabled");
3267 case Match_InvalidOperand:
3268 return Error(Loc, "invalid operand for instruction");
3269 case Match_InvalidSuffix:
3270 return Error(Loc, "invalid type suffix for instruction");
3271 case Match_InvalidCondCode:
3272 return Error(Loc, "expected AArch64 condition code");
3273 case Match_AddSubRegExtendSmall:
3275 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3276 case Match_AddSubRegExtendLarge:
3278 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3279 case Match_AddSubSecondSource:
3281 "expected compatible register, symbol or integer in range [0, 4095]");
3282 case Match_LogicalSecondSource:
3283 return Error(Loc, "expected compatible register or logical immediate");
3284 case Match_InvalidMovImm32Shift:
3285 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3286 case Match_InvalidMovImm64Shift:
3287 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3288 case Match_AddSubRegShift32:
3290 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3291 case Match_AddSubRegShift64:
3293 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3294 case Match_InvalidFPImm:
3296 "expected compatible register or floating-point constant");
3297 case Match_InvalidMemoryIndexedSImm9:
3298 return Error(Loc, "index must be an integer in range [-256, 255].");
3299 case Match_InvalidMemoryIndexed4SImm7:
3300 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3301 case Match_InvalidMemoryIndexed8SImm7:
3302 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3303 case Match_InvalidMemoryIndexed16SImm7:
3304 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3305 case Match_InvalidMemoryWExtend8:
3307 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3308 case Match_InvalidMemoryWExtend16:
3310 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3311 case Match_InvalidMemoryWExtend32:
3313 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3314 case Match_InvalidMemoryWExtend64:
3316 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3317 case Match_InvalidMemoryWExtend128:
3319 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3320 case Match_InvalidMemoryXExtend8:
3322 "expected 'lsl' or 'sxtx' with optional shift of #0");
3323 case Match_InvalidMemoryXExtend16:
3325 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3326 case Match_InvalidMemoryXExtend32:
3328 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3329 case Match_InvalidMemoryXExtend64:
3331 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3332 case Match_InvalidMemoryXExtend128:
3334 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3335 case Match_InvalidMemoryIndexed1:
3336 return Error(Loc, "index must be an integer in range [0, 4095].");
3337 case Match_InvalidMemoryIndexed2:
3338 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3339 case Match_InvalidMemoryIndexed4:
3340 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3341 case Match_InvalidMemoryIndexed8:
3342 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3343 case Match_InvalidMemoryIndexed16:
3344 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3345 case Match_InvalidImm0_1:
3346 return Error(Loc, "immediate must be an integer in range [0, 1].");
3347 case Match_InvalidImm0_7:
3348 return Error(Loc, "immediate must be an integer in range [0, 7].");
3349 case Match_InvalidImm0_15:
3350 return Error(Loc, "immediate must be an integer in range [0, 15].");
3351 case Match_InvalidImm0_31:
3352 return Error(Loc, "immediate must be an integer in range [0, 31].");
3353 case Match_InvalidImm0_63:
3354 return Error(Loc, "immediate must be an integer in range [0, 63].");
3355 case Match_InvalidImm0_127:
3356 return Error(Loc, "immediate must be an integer in range [0, 127].");
3357 case Match_InvalidImm0_255:
3358 return Error(Loc, "immediate must be an integer in range [0, 255].");
3359 case Match_InvalidImm0_65535:
3360 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3361 case Match_InvalidImm1_8:
3362 return Error(Loc, "immediate must be an integer in range [1, 8].");
3363 case Match_InvalidImm1_16:
3364 return Error(Loc, "immediate must be an integer in range [1, 16].");
3365 case Match_InvalidImm1_32:
3366 return Error(Loc, "immediate must be an integer in range [1, 32].");
3367 case Match_InvalidImm1_64:
3368 return Error(Loc, "immediate must be an integer in range [1, 64].");
3369 case Match_InvalidIndex1:
3370 return Error(Loc, "expected lane specifier '[1]'");
3371 case Match_InvalidIndexB:
3372 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3373 case Match_InvalidIndexH:
3374 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3375 case Match_InvalidIndexS:
3376 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3377 case Match_InvalidIndexD:
3378 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3379 case Match_InvalidLabel:
3380 return Error(Loc, "expected label or encodable integer pc offset");
3382 return Error(Loc, "expected readable system register");
3384 return Error(Loc, "expected writable system register or pstate");
3385 case Match_MnemonicFail:
3386 return Error(Loc, "unrecognized instruction mnemonic");
3388 llvm_unreachable("unexpected error code!");
3392 static const char *getSubtargetFeatureName(uint64_t Val);
3394 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3395 OperandVector &Operands,
3397 uint64_t &ErrorInfo,
3398 bool MatchingInlineAsm) {
3399 assert(!Operands.empty() && "Unexpect empty operand list!");
3400 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3401 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3403 StringRef Tok = Op.getToken();
3404 unsigned NumOperands = Operands.size();
3406 if (NumOperands == 4 && Tok == "lsl") {
3407 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3408 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3409 if (Op2.isReg() && Op3.isImm()) {
3410 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3412 uint64_t Op3Val = Op3CE->getValue();
3413 uint64_t NewOp3Val = 0;
3414 uint64_t NewOp4Val = 0;
3415 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3417 NewOp3Val = (32 - Op3Val) & 0x1f;
3418 NewOp4Val = 31 - Op3Val;
3420 NewOp3Val = (64 - Op3Val) & 0x3f;
3421 NewOp4Val = 63 - Op3Val;
3424 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3425 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3427 Operands[0] = AArch64Operand::CreateToken(
3428 "ubfm", false, Op.getStartLoc(), getContext());
3429 Operands.push_back(AArch64Operand::CreateImm(
3430 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3431 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3432 Op3.getEndLoc(), getContext());
3435 } else if (NumOperands == 4 && Tok == "bfc") {
3436 // FIXME: Horrible hack to handle BFC->BFM alias.
3437 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3438 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3439 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3441 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3442 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3443 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3445 if (LSBCE && WidthCE) {
3446 uint64_t LSB = LSBCE->getValue();
3447 uint64_t Width = WidthCE->getValue();
3449 uint64_t RegWidth = 0;
3450 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3456 if (LSB >= RegWidth)
3457 return Error(LSBOp.getStartLoc(),
3458 "expected integer in range [0, 31]");
3459 if (Width < 1 || Width > RegWidth)
3460 return Error(WidthOp.getStartLoc(),
3461 "expected integer in range [1, 32]");
3465 ImmR = (32 - LSB) & 0x1f;
3467 ImmR = (64 - LSB) & 0x3f;
3469 uint64_t ImmS = Width - 1;
3471 if (ImmR != 0 && ImmS >= ImmR)
3472 return Error(WidthOp.getStartLoc(),
3473 "requested insert overflows register");
3475 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3476 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3477 Operands[0] = AArch64Operand::CreateToken(
3478 "bfm", false, Op.getStartLoc(), getContext());
3479 Operands[2] = AArch64Operand::CreateReg(
3480 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3481 SMLoc(), getContext());
3482 Operands[3] = AArch64Operand::CreateImm(
3483 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3484 Operands.emplace_back(
3485 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3486 WidthOp.getEndLoc(), getContext()));
3489 } else if (NumOperands == 5) {
3490 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3491 // UBFIZ -> UBFM aliases.
3492 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3493 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3494 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3495 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3497 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3498 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3499 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3501 if (Op3CE && Op4CE) {
3502 uint64_t Op3Val = Op3CE->getValue();
3503 uint64_t Op4Val = Op4CE->getValue();
3505 uint64_t RegWidth = 0;
3506 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3512 if (Op3Val >= RegWidth)
3513 return Error(Op3.getStartLoc(),
3514 "expected integer in range [0, 31]");
3515 if (Op4Val < 1 || Op4Val > RegWidth)
3516 return Error(Op4.getStartLoc(),
3517 "expected integer in range [1, 32]");
3519 uint64_t NewOp3Val = 0;
3521 NewOp3Val = (32 - Op3Val) & 0x1f;
3523 NewOp3Val = (64 - Op3Val) & 0x3f;
3525 uint64_t NewOp4Val = Op4Val - 1;
3527 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3528 return Error(Op4.getStartLoc(),
3529 "requested insert overflows register");
3531 const MCExpr *NewOp3 =
3532 MCConstantExpr::create(NewOp3Val, getContext());
3533 const MCExpr *NewOp4 =
3534 MCConstantExpr::create(NewOp4Val, getContext());
3535 Operands[3] = AArch64Operand::CreateImm(
3536 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3537 Operands[4] = AArch64Operand::CreateImm(
3538 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3540 Operands[0] = AArch64Operand::CreateToken(
3541 "bfm", false, Op.getStartLoc(), getContext());
3542 else if (Tok == "sbfiz")
3543 Operands[0] = AArch64Operand::CreateToken(
3544 "sbfm", false, Op.getStartLoc(), getContext());
3545 else if (Tok == "ubfiz")
3546 Operands[0] = AArch64Operand::CreateToken(
3547 "ubfm", false, Op.getStartLoc(), getContext());
3549 llvm_unreachable("No valid mnemonic for alias?");
3553 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3554 // UBFX -> UBFM aliases.
3555 } else if (NumOperands == 5 &&
3556 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3557 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3558 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3559 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3561 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3562 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3563 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3565 if (Op3CE && Op4CE) {
3566 uint64_t Op3Val = Op3CE->getValue();
3567 uint64_t Op4Val = Op4CE->getValue();
3569 uint64_t RegWidth = 0;
3570 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3576 if (Op3Val >= RegWidth)
3577 return Error(Op3.getStartLoc(),
3578 "expected integer in range [0, 31]");
3579 if (Op4Val < 1 || Op4Val > RegWidth)
3580 return Error(Op4.getStartLoc(),
3581 "expected integer in range [1, 32]");
3583 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3585 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3586 return Error(Op4.getStartLoc(),
3587 "requested extract overflows register");
3589 const MCExpr *NewOp4 =
3590 MCConstantExpr::create(NewOp4Val, getContext());
3591 Operands[4] = AArch64Operand::CreateImm(
3592 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3594 Operands[0] = AArch64Operand::CreateToken(
3595 "bfm", false, Op.getStartLoc(), getContext());
3596 else if (Tok == "sbfx")
3597 Operands[0] = AArch64Operand::CreateToken(
3598 "sbfm", false, Op.getStartLoc(), getContext());
3599 else if (Tok == "ubfx")
3600 Operands[0] = AArch64Operand::CreateToken(
3601 "ubfm", false, Op.getStartLoc(), getContext());
3603 llvm_unreachable("No valid mnemonic for alias?");
3608 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3609 // InstAlias can't quite handle this since the reg classes aren't
3611 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3612 // The source register can be Wn here, but the matcher expects a
3613 // GPR64. Twiddle it here if necessary.
3614 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3616 unsigned Reg = getXRegFromWReg(Op.getReg());
3617 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3618 Op.getEndLoc(), getContext());
3621 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3622 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3623 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3625 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3627 // The source register can be Wn here, but the matcher expects a
3628 // GPR64. Twiddle it here if necessary.
3629 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3631 unsigned Reg = getXRegFromWReg(Op.getReg());
3632 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3633 Op.getEndLoc(), getContext());
3637 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3638 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3639 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3641 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3643 // The source register can be Wn here, but the matcher expects a
3644 // GPR32. Twiddle it here if necessary.
3645 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3647 unsigned Reg = getWRegFromXReg(Op.getReg());
3648 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3649 Op.getEndLoc(), getContext());
3655 // First try to match against the secondary set of tables containing the
3656 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3657 unsigned MatchResult =
3658 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3660 // If that fails, try against the alternate table containing long-form NEON:
3661 // "fadd v0.2s, v1.2s, v2.2s"
3662 if (MatchResult != Match_Success) {
3663 // But first, save the short-form match result: we can use it in case the
3664 // long-form match also fails.
3665 auto ShortFormNEONErrorInfo = ErrorInfo;
3666 auto ShortFormNEONMatchResult = MatchResult;
3669 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3671 // Now, both matches failed, and the long-form match failed on the mnemonic
3672 // suffix token operand. The short-form match failure is probably more
3673 // relevant: use it instead.
3674 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3675 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3676 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3677 MatchResult = ShortFormNEONMatchResult;
3678 ErrorInfo = ShortFormNEONErrorInfo;
3682 switch (MatchResult) {
3683 case Match_Success: {
3684 // Perform range checking and other semantic validations
3685 SmallVector<SMLoc, 8> OperandLocs;
3686 NumOperands = Operands.size();
3687 for (unsigned i = 1; i < NumOperands; ++i)
3688 OperandLocs.push_back(Operands[i]->getStartLoc());
3689 if (validateInstruction(Inst, OperandLocs))
3693 Out.EmitInstruction(Inst, getSTI());
3696 case Match_MissingFeature: {
3697 assert(ErrorInfo && "Unknown missing feature!");
3698 // Special case the error message for the very common case where only
3699 // a single subtarget feature is missing (neon, e.g.).
3700 std::string Msg = "instruction requires:";
3702 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3703 if (ErrorInfo & Mask) {
3705 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3709 return Error(IDLoc, Msg);
3711 case Match_MnemonicFail:
3712 return showMatchError(IDLoc, MatchResult);
3713 case Match_InvalidOperand: {
3714 SMLoc ErrorLoc = IDLoc;
3716 if (ErrorInfo != ~0ULL) {
3717 if (ErrorInfo >= Operands.size())
3718 return Error(IDLoc, "too few operands for instruction",
3719 SMRange(IDLoc, getTok().getLoc()));
3721 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3722 if (ErrorLoc == SMLoc())
3725 // If the match failed on a suffix token operand, tweak the diagnostic
3727 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3728 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3729 MatchResult = Match_InvalidSuffix;
3731 return showMatchError(ErrorLoc, MatchResult);
3733 case Match_InvalidMemoryIndexed1:
3734 case Match_InvalidMemoryIndexed2:
3735 case Match_InvalidMemoryIndexed4:
3736 case Match_InvalidMemoryIndexed8:
3737 case Match_InvalidMemoryIndexed16:
3738 case Match_InvalidCondCode:
3739 case Match_AddSubRegExtendSmall:
3740 case Match_AddSubRegExtendLarge:
3741 case Match_AddSubSecondSource:
3742 case Match_LogicalSecondSource:
3743 case Match_AddSubRegShift32:
3744 case Match_AddSubRegShift64:
3745 case Match_InvalidMovImm32Shift:
3746 case Match_InvalidMovImm64Shift:
3747 case Match_InvalidFPImm:
3748 case Match_InvalidMemoryWExtend8:
3749 case Match_InvalidMemoryWExtend16:
3750 case Match_InvalidMemoryWExtend32:
3751 case Match_InvalidMemoryWExtend64:
3752 case Match_InvalidMemoryWExtend128:
3753 case Match_InvalidMemoryXExtend8:
3754 case Match_InvalidMemoryXExtend16:
3755 case Match_InvalidMemoryXExtend32:
3756 case Match_InvalidMemoryXExtend64:
3757 case Match_InvalidMemoryXExtend128:
3758 case Match_InvalidMemoryIndexed4SImm7:
3759 case Match_InvalidMemoryIndexed8SImm7:
3760 case Match_InvalidMemoryIndexed16SImm7:
3761 case Match_InvalidMemoryIndexedSImm9:
3762 case Match_InvalidImm0_1:
3763 case Match_InvalidImm0_7:
3764 case Match_InvalidImm0_15:
3765 case Match_InvalidImm0_31:
3766 case Match_InvalidImm0_63:
3767 case Match_InvalidImm0_127:
3768 case Match_InvalidImm0_255:
3769 case Match_InvalidImm0_65535:
3770 case Match_InvalidImm1_8:
3771 case Match_InvalidImm1_16:
3772 case Match_InvalidImm1_32:
3773 case Match_InvalidImm1_64:
3774 case Match_InvalidIndex1:
3775 case Match_InvalidIndexB:
3776 case Match_InvalidIndexH:
3777 case Match_InvalidIndexS:
3778 case Match_InvalidIndexD:
3779 case Match_InvalidLabel:
3782 if (ErrorInfo >= Operands.size())
3783 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
3784 // Any time we get here, there's nothing fancy to do. Just get the
3785 // operand SMLoc and display the diagnostic.
3786 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3787 if (ErrorLoc == SMLoc())
3789 return showMatchError(ErrorLoc, MatchResult);
3793 llvm_unreachable("Implement any new match types added!");
3796 /// ParseDirective parses the arm specific directives
3797 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3798 const MCObjectFileInfo::Environment Format =
3799 getContext().getObjectFileInfo()->getObjectFileType();
3800 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
3801 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
3803 StringRef IDVal = DirectiveID.getIdentifier();
3804 SMLoc Loc = DirectiveID.getLoc();
3805 if (IDVal == ".arch")
3806 parseDirectiveArch(Loc);
3807 else if (IDVal == ".cpu")
3808 parseDirectiveCPU(Loc);
3809 else if (IDVal == ".hword")
3810 parseDirectiveWord(2, Loc);
3811 else if (IDVal == ".word")
3812 parseDirectiveWord(4, Loc);
3813 else if (IDVal == ".xword")
3814 parseDirectiveWord(8, Loc);
3815 else if (IDVal == ".tlsdesccall")
3816 parseDirectiveTLSDescCall(Loc);
3817 else if (IDVal == ".ltorg" || IDVal == ".pool")
3818 parseDirectiveLtorg(Loc);
3819 else if (IDVal == ".unreq")
3820 parseDirectiveUnreq(Loc);
3821 else if (!IsMachO && !IsCOFF) {
3822 if (IDVal == ".inst")
3823 parseDirectiveInst(Loc);
3826 } else if (IDVal == MCLOHDirectiveName())
3827 parseDirectiveLOH(IDVal, Loc);
3833 static const struct {
3835 const FeatureBitset Features;
3836 } ExtensionMap[] = {
3837 { "crc", {AArch64::FeatureCRC} },
3838 { "crypto", {AArch64::FeatureCrypto} },
3839 { "fp", {AArch64::FeatureFPARMv8} },
3840 { "simd", {AArch64::FeatureNEON} },
3841 { "ras", {AArch64::FeatureRAS} },
3842 { "lse", {AArch64::FeatureLSE} },
3844 // FIXME: Unsupported extensions
3851 /// parseDirectiveArch
3853 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
3854 SMLoc ArchLoc = getLoc();
3856 StringRef Arch, ExtensionString;
3857 std::tie(Arch, ExtensionString) =
3858 getParser().parseStringToEndOfStatement().trim().split('+');
3860 unsigned ID = AArch64::parseArch(Arch);
3861 if (ID == static_cast<unsigned>(AArch64::ArchKind::AK_INVALID))
3862 return Error(ArchLoc, "unknown arch name");
3864 if (parseToken(AsmToken::EndOfStatement))
3867 // Get the architecture and extension features.
3868 std::vector<StringRef> AArch64Features;
3869 AArch64::getArchFeatures(ID, AArch64Features);
3870 AArch64::getExtensionFeatures(AArch64::getDefaultExtensions("generic", ID),
3873 MCSubtargetInfo &STI = copySTI();
3874 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
3875 STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
3877 SmallVector<StringRef, 4> RequestedExtensions;
3878 if (!ExtensionString.empty())
3879 ExtensionString.split(RequestedExtensions, '+');
3881 FeatureBitset Features = STI.getFeatureBits();
3882 for (auto Name : RequestedExtensions) {
3883 bool EnableFeature = true;
3885 if (Name.startswith_lower("no")) {
3886 EnableFeature = false;
3887 Name = Name.substr(2);
3890 for (const auto &Extension : ExtensionMap) {
3891 if (Extension.Name != Name)
3894 if (Extension.Features.none())
3895 report_fatal_error("unsupported architectural extension: " + Name);
3897 FeatureBitset ToggleFeatures = EnableFeature
3898 ? (~Features & Extension.Features)
3899 : ( Features & Extension.Features);
3901 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
3902 setAvailableFeatures(Features);
3909 /// parseDirectiveCPU
3911 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
3912 SMLoc CPULoc = getLoc();
3914 StringRef CPU, ExtensionString;
3915 std::tie(CPU, ExtensionString) =
3916 getParser().parseStringToEndOfStatement().trim().split('+');
3918 if (parseToken(AsmToken::EndOfStatement))
3921 SmallVector<StringRef, 4> RequestedExtensions;
3922 if (!ExtensionString.empty())
3923 ExtensionString.split(RequestedExtensions, '+');
3925 // FIXME This is using tablegen data, but should be moved to ARMTargetParser
3926 // once that is tablegen'ed
3927 if (!getSTI().isCPUStringValid(CPU)) {
3928 Error(CPULoc, "unknown CPU name");
3932 MCSubtargetInfo &STI = copySTI();
3933 STI.setDefaultFeatures(CPU, "");
3935 FeatureBitset Features = STI.getFeatureBits();
3936 for (auto Name : RequestedExtensions) {
3937 bool EnableFeature = true;
3939 if (Name.startswith_lower("no")) {
3940 EnableFeature = false;
3941 Name = Name.substr(2);
3944 for (const auto &Extension : ExtensionMap) {
3945 if (Extension.Name != Name)
3948 if (Extension.Features.none())
3949 report_fatal_error("unsupported architectural extension: " + Name);
3951 FeatureBitset ToggleFeatures = EnableFeature
3952 ? (~Features & Extension.Features)
3953 : ( Features & Extension.Features);
3955 ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
3956 setAvailableFeatures(Features);
3964 /// parseDirectiveWord
3965 /// ::= .word [ expression (, expression)* ]
3966 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3967 auto parseOp = [&]() -> bool {
3968 const MCExpr *Value;
3969 if (getParser().parseExpression(Value))
3971 getParser().getStreamer().EmitValue(Value, Size, L);
3975 if (parseMany(parseOp))
3980 /// parseDirectiveInst
3981 /// ::= .inst opcode [, ...]
3982 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
3983 if (getLexer().is(AsmToken::EndOfStatement))
3984 return Error(Loc, "expected expression following '.inst' directive");
3986 auto parseOp = [&]() -> bool {
3989 if (check(getParser().parseExpression(Expr), L, "expected expression"))
3991 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
3992 if (check(!Value, L, "expected constant expression"))
3994 getTargetStreamer().emitInst(Value->getValue());
3998 if (parseMany(parseOp))
3999 return addErrorSuffix(" in '.inst' directive");
4003 // parseDirectiveTLSDescCall:
4004 // ::= .tlsdesccall symbol
4005 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4007 if (check(getParser().parseIdentifier(Name), L,
4008 "expected symbol after directive") ||
4009 parseToken(AsmToken::EndOfStatement))
4012 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4013 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4014 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4017 Inst.setOpcode(AArch64::TLSDESCCALL);
4018 Inst.addOperand(MCOperand::createExpr(Expr));
4020 getParser().getStreamer().EmitInstruction(Inst, getSTI());
4024 /// ::= .loh <lohName | lohId> label1, ..., labelN
4025 /// The number of arguments depends on the loh identifier.
4026 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4028 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4029 if (getParser().getTok().isNot(AsmToken::Integer))
4030 return TokError("expected an identifier or a number in directive");
4031 // We successfully get a numeric value for the identifier.
4032 // Check if it is valid.
4033 int64_t Id = getParser().getTok().getIntVal();
4034 if (Id <= -1U && !isValidMCLOHType(Id))
4035 return TokError("invalid numeric identifier in directive");
4036 Kind = (MCLOHType)Id;
4038 StringRef Name = getTok().getIdentifier();
4039 // We successfully parse an identifier.
4040 // Check if it is a recognized one.
4041 int Id = MCLOHNameToId(Name);
4044 return TokError("invalid identifier in directive");
4045 Kind = (MCLOHType)Id;
4047 // Consume the identifier.
4049 // Get the number of arguments of this LOH.
4050 int NbArgs = MCLOHIdToNbArgs(Kind);
4052 assert(NbArgs != -1 && "Invalid number of arguments");
4054 SmallVector<MCSymbol *, 3> Args;
4055 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4057 if (getParser().parseIdentifier(Name))
4058 return TokError("expected identifier in directive");
4059 Args.push_back(getContext().getOrCreateSymbol(Name));
4061 if (Idx + 1 == NbArgs)
4063 if (parseToken(AsmToken::Comma,
4064 "unexpected token in '" + Twine(IDVal) + "' directive"))
4067 if (parseToken(AsmToken::EndOfStatement,
4068 "unexpected token in '" + Twine(IDVal) + "' directive"))
4071 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4075 /// parseDirectiveLtorg
4076 /// ::= .ltorg | .pool
4077 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4078 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4080 getTargetStreamer().emitCurrentConstantPool();
4084 /// parseDirectiveReq
4085 /// ::= name .req registername
4086 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4087 MCAsmParser &Parser = getParser();
4088 Parser.Lex(); // Eat the '.req' token.
4089 SMLoc SRegLoc = getLoc();
4090 unsigned RegNum = tryParseRegister();
4091 bool IsVector = false;
4093 if (RegNum == static_cast<unsigned>(-1)) {
4095 RegNum = tryMatchVectorRegister(Kind, false);
4097 return Error(SRegLoc, "vector register without type specifier expected");
4101 if (RegNum == static_cast<unsigned>(-1))
4102 return Error(SRegLoc, "register name or alias expected");
4104 // Shouldn't be anything else.
4105 if (parseToken(AsmToken::EndOfStatement,
4106 "unexpected input in .req directive"))
4109 auto pair = std::make_pair(IsVector, RegNum);
4110 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4111 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4116 /// parseDirectiveUneq
4117 /// ::= .unreq registername
4118 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4119 MCAsmParser &Parser = getParser();
4120 if (getTok().isNot(AsmToken::Identifier))
4121 return TokError("unexpected input in .unreq directive.");
4122 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4123 Parser.Lex(); // Eat the identifier.
4124 if (parseToken(AsmToken::EndOfStatement))
4125 return addErrorSuffix("in '.unreq' directive");
4130 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4131 AArch64MCExpr::VariantKind &ELFRefKind,
4132 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4134 ELFRefKind = AArch64MCExpr::VK_INVALID;
4135 DarwinRefKind = MCSymbolRefExpr::VK_None;
4138 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4139 ELFRefKind = AE->getKind();
4140 Expr = AE->getSubExpr();
4143 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4145 // It's a simple symbol reference with no addend.
4146 DarwinRefKind = SE->getKind();
4150 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4154 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4157 DarwinRefKind = SE->getKind();
4159 if (BE->getOpcode() != MCBinaryExpr::Add &&
4160 BE->getOpcode() != MCBinaryExpr::Sub)
4163 // See if the addend is is a constant, otherwise there's more going
4164 // on here than we can deal with.
4165 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4169 Addend = AddendExpr->getValue();
4170 if (BE->getOpcode() == MCBinaryExpr::Sub)
4173 // It's some symbol reference + a constant addend, but really
4174 // shouldn't use both Darwin and ELF syntax.
4175 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4176 DarwinRefKind == MCSymbolRefExpr::VK_None;
4179 /// Force static initialization.
4180 extern "C" void LLVMInitializeAArch64AsmParser() {
4181 RegisterMCAsmParser<AArch64AsmParser> X(getTheAArch64leTarget());
4182 RegisterMCAsmParser<AArch64AsmParser> Y(getTheAArch64beTarget());
4183 RegisterMCAsmParser<AArch64AsmParser> Z(getTheARM64Target());
4186 #define GET_REGISTER_MATCHER
4187 #define GET_SUBTARGET_FEATURE_NAME
4188 #define GET_MATCHER_IMPLEMENTATION
4189 #include "AArch64GenAsmMatcher.inc"
4191 // Define this matcher function after the auto-generated include so we
4192 // have the match class enum definitions.
4193 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4195 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4196 // If the kind is a token for a literal immediate, check if our asm
4197 // operand matches. This is for InstAliases which have a fixed-value
4198 // immediate in the syntax.
4199 int64_t ExpectedVal;
4202 return Match_InvalidOperand;
4244 return Match_InvalidOperand;
4245 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4247 return Match_InvalidOperand;
4248 if (CE->getValue() == ExpectedVal)
4249 return Match_Success;
4250 return Match_InvalidOperand;
4253 OperandMatchResultTy
4254 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4258 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4259 Error(S, "expected register");
4260 return MatchOperand_ParseFail;
4263 int FirstReg = tryParseRegister();
4264 if (FirstReg == -1) {
4265 return MatchOperand_ParseFail;
4267 const MCRegisterClass &WRegClass =
4268 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4269 const MCRegisterClass &XRegClass =
4270 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4272 bool isXReg = XRegClass.contains(FirstReg),
4273 isWReg = WRegClass.contains(FirstReg);
4274 if (!isXReg && !isWReg) {
4275 Error(S, "expected first even register of a "
4276 "consecutive same-size even/odd register pair");
4277 return MatchOperand_ParseFail;
4280 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4281 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4283 if (FirstEncoding & 0x1) {
4284 Error(S, "expected first even register of a "
4285 "consecutive same-size even/odd register pair");
4286 return MatchOperand_ParseFail;
4290 if (getParser().getTok().isNot(AsmToken::Comma)) {
4291 Error(M, "expected comma");
4292 return MatchOperand_ParseFail;
4298 int SecondReg = tryParseRegister();
4299 if (SecondReg ==-1) {
4300 return MatchOperand_ParseFail;
4303 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4304 (isXReg && !XRegClass.contains(SecondReg)) ||
4305 (isWReg && !WRegClass.contains(SecondReg))) {
4306 Error(E,"expected second odd register of a "
4307 "consecutive same-size even/odd register pair");
4308 return MatchOperand_ParseFail;
4313 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4314 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4316 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4317 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4320 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4323 return MatchOperand_Success;