1 //===- X86Operand.h - Parsed X86 machine instruction ------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
10 #define LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H
12 #include "MCTargetDesc/X86IntelInstPrinter.h"
13 #include "MCTargetDesc/X86MCTargetDesc.h"
14 #include "X86AsmParserCommon.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/Support/Casting.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/SMLoc.h"
29 /// X86Operand - Instances of this class represent a parsed X86 machine
31 struct X86Operand final : public MCParsedAsmOperand {
32 enum KindTy { Token, Register, Immediate, Memory, Prefix, DXRegister } Kind;
34 SMLoc StartLoc, EndLoc;
68 /// If the memory operand is unsized and there are multiple instruction
69 /// matches, prefer the one with this size.
70 unsigned FrontendSize;
81 X86Operand(KindTy K, SMLoc Start, SMLoc End)
82 : Kind(K), StartLoc(Start), EndLoc(End), CallOperand(false) {}
84 StringRef getSymName() override { return SymName; }
85 void *getOpDecl() override { return OpDecl; }
87 /// getStartLoc - Get the location of the first token of this operand.
88 SMLoc getStartLoc() const override { return StartLoc; }
90 /// getEndLoc - Get the location of the last token of this operand.
91 SMLoc getEndLoc() const override { return EndLoc; }
93 /// getLocRange - Get the range between the first and last token of this
95 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
97 /// getOffsetOfLoc - Get the location of the offset operator.
98 SMLoc getOffsetOfLoc() const override { return OffsetOfLoc; }
100 void print(raw_ostream &OS) const override {
102 auto PrintImmValue = [&](const MCExpr *Val, const char *VName) {
103 if (Val->getKind() == MCExpr::Constant) {
104 if (auto Imm = cast<MCConstantExpr>(Val)->getValue())
106 } else if (Val->getKind() == MCExpr::SymbolRef) {
107 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Val)) {
108 const MCSymbol &Sym = SRE->getSymbol();
109 if (const char *SymNameStr = Sym.getName().data())
110 OS << VName << SymNameStr;
120 OS << "Reg:" << X86IntelInstPrinter::getRegisterName(Reg.RegNo);
126 PrintImmValue(Imm.Val, "Imm:");
129 OS << "Prefix:" << Pref.Prefixes;
132 OS << "Memory: ModeSize=" << Mem.ModeSize;
134 OS << ",Size=" << Mem.Size;
136 OS << ",BaseReg=" << X86IntelInstPrinter::getRegisterName(Mem.BaseReg);
139 << X86IntelInstPrinter::getRegisterName(Mem.IndexReg);
141 OS << ",Scale=" << Mem.Scale;
143 PrintImmValue(Mem.Disp, ",Disp=");
145 OS << ",SegReg=" << X86IntelInstPrinter::getRegisterName(Mem.SegReg);
150 StringRef getToken() const {
151 assert(Kind == Token && "Invalid access!");
152 return StringRef(Tok.Data, Tok.Length);
154 void setTokenValue(StringRef Value) {
155 assert(Kind == Token && "Invalid access!");
156 Tok.Data = Value.data();
157 Tok.Length = Value.size();
160 unsigned getReg() const override {
161 assert(Kind == Register && "Invalid access!");
165 unsigned getPrefix() const {
166 assert(Kind == Prefix && "Invalid access!");
167 return Pref.Prefixes;
170 const MCExpr *getImm() const {
171 assert(Kind == Immediate && "Invalid access!");
175 const MCExpr *getMemDisp() const {
176 assert(Kind == Memory && "Invalid access!");
179 unsigned getMemSegReg() const {
180 assert(Kind == Memory && "Invalid access!");
183 unsigned getMemBaseReg() const {
184 assert(Kind == Memory && "Invalid access!");
187 unsigned getMemIndexReg() const {
188 assert(Kind == Memory && "Invalid access!");
191 unsigned getMemScale() const {
192 assert(Kind == Memory && "Invalid access!");
195 unsigned getMemModeSize() const {
196 assert(Kind == Memory && "Invalid access!");
199 unsigned getMemFrontendSize() const {
200 assert(Kind == Memory && "Invalid access!");
201 return Mem.FrontendSize;
204 bool isToken() const override {return Kind == Token; }
206 bool isImm() const override { return Kind == Immediate; }
208 bool isImmSExti16i8() const {
212 // If this isn't a constant expr, just assume it fits and let relaxation
214 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
218 // Otherwise, check the value is in a range that makes sense for this
220 return isImmSExti16i8Value(CE->getValue());
222 bool isImmSExti32i8() const {
226 // If this isn't a constant expr, just assume it fits and let relaxation
228 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
232 // Otherwise, check the value is in a range that makes sense for this
234 return isImmSExti32i8Value(CE->getValue());
236 bool isImmSExti64i8() const {
240 // If this isn't a constant expr, just assume it fits and let relaxation
242 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
246 // Otherwise, check the value is in a range that makes sense for this
248 return isImmSExti64i8Value(CE->getValue());
250 bool isImmSExti64i32() const {
254 // If this isn't a constant expr, just assume it fits and let relaxation
256 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
260 // Otherwise, check the value is in a range that makes sense for this
262 return isImmSExti64i32Value(CE->getValue());
265 bool isImmUnsignedi4() const {
266 if (!isImm()) return false;
267 // If this isn't a constant expr, reject it. The immediate byte is shared
268 // with a register encoding. We can't have it affected by a relocation.
269 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
270 if (!CE) return false;
271 return isImmUnsignedi4Value(CE->getValue());
274 bool isImmUnsignedi8() const {
275 if (!isImm()) return false;
276 // If this isn't a constant expr, just assume it fits and let relaxation
278 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
279 if (!CE) return true;
280 return isImmUnsignedi8Value(CE->getValue());
283 bool isOffsetOfLocal() const override { return isImm() && Imm.LocalRef; }
285 bool needAddressOf() const override { return AddressOf; }
287 bool isMem() const override { return Kind == Memory; }
288 bool isMemUnsized() const {
289 return Kind == Memory && Mem.Size == 0;
291 bool isMem8() const {
292 return Kind == Memory && (!Mem.Size || Mem.Size == 8);
294 bool isMem16() const {
295 return Kind == Memory && (!Mem.Size || Mem.Size == 16);
297 bool isMem32() const {
298 return Kind == Memory && (!Mem.Size || Mem.Size == 32);
300 bool isMem64() const {
301 return Kind == Memory && (!Mem.Size || Mem.Size == 64);
303 bool isMem80() const {
304 return Kind == Memory && (!Mem.Size || Mem.Size == 80);
306 bool isMem128() const {
307 return Kind == Memory && (!Mem.Size || Mem.Size == 128);
309 bool isMem256() const {
310 return Kind == Memory && (!Mem.Size || Mem.Size == 256);
312 bool isMem512() const {
313 return Kind == Memory && (!Mem.Size || Mem.Size == 512);
315 bool isMemIndexReg(unsigned LowR, unsigned HighR) const {
316 assert(Kind == Memory && "Invalid access!");
317 return Mem.IndexReg >= LowR && Mem.IndexReg <= HighR;
320 bool isMem64_RC128() const {
321 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM15);
323 bool isMem128_RC128() const {
324 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM15);
326 bool isMem128_RC256() const {
327 return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM15);
329 bool isMem256_RC128() const {
330 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM15);
332 bool isMem256_RC256() const {
333 return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM15);
336 bool isMem64_RC128X() const {
337 return isMem64() && isMemIndexReg(X86::XMM0, X86::XMM31);
339 bool isMem128_RC128X() const {
340 return isMem128() && isMemIndexReg(X86::XMM0, X86::XMM31);
342 bool isMem128_RC256X() const {
343 return isMem128() && isMemIndexReg(X86::YMM0, X86::YMM31);
345 bool isMem256_RC128X() const {
346 return isMem256() && isMemIndexReg(X86::XMM0, X86::XMM31);
348 bool isMem256_RC256X() const {
349 return isMem256() && isMemIndexReg(X86::YMM0, X86::YMM31);
351 bool isMem256_RC512() const {
352 return isMem256() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
354 bool isMem512_RC256X() const {
355 return isMem512() && isMemIndexReg(X86::YMM0, X86::YMM31);
357 bool isMem512_RC512() const {
358 return isMem512() && isMemIndexReg(X86::ZMM0, X86::ZMM31);
361 bool isAbsMem() const {
362 return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
363 !getMemIndexReg() && getMemScale() == 1;
365 bool isAVX512RC() const{
369 bool isAbsMem16() const {
370 return isAbsMem() && Mem.ModeSize == 16;
373 bool isSrcIdx() const {
374 return !getMemIndexReg() && getMemScale() == 1 &&
375 (getMemBaseReg() == X86::RSI || getMemBaseReg() == X86::ESI ||
376 getMemBaseReg() == X86::SI) && isa<MCConstantExpr>(getMemDisp()) &&
377 cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
379 bool isSrcIdx8() const {
380 return isMem8() && isSrcIdx();
382 bool isSrcIdx16() const {
383 return isMem16() && isSrcIdx();
385 bool isSrcIdx32() const {
386 return isMem32() && isSrcIdx();
388 bool isSrcIdx64() const {
389 return isMem64() && isSrcIdx();
392 bool isDstIdx() const {
393 return !getMemIndexReg() && getMemScale() == 1 &&
394 (getMemSegReg() == 0 || getMemSegReg() == X86::ES) &&
395 (getMemBaseReg() == X86::RDI || getMemBaseReg() == X86::EDI ||
396 getMemBaseReg() == X86::DI) && isa<MCConstantExpr>(getMemDisp()) &&
397 cast<MCConstantExpr>(getMemDisp())->getValue() == 0;
399 bool isDstIdx8() const {
400 return isMem8() && isDstIdx();
402 bool isDstIdx16() const {
403 return isMem16() && isDstIdx();
405 bool isDstIdx32() const {
406 return isMem32() && isDstIdx();
408 bool isDstIdx64() const {
409 return isMem64() && isDstIdx();
412 bool isMemOffs() const {
413 return Kind == Memory && !getMemBaseReg() && !getMemIndexReg() &&
417 bool isMemOffs16_8() const {
418 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 8);
420 bool isMemOffs16_16() const {
421 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 16);
423 bool isMemOffs16_32() const {
424 return isMemOffs() && Mem.ModeSize == 16 && (!Mem.Size || Mem.Size == 32);
426 bool isMemOffs32_8() const {
427 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 8);
429 bool isMemOffs32_16() const {
430 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 16);
432 bool isMemOffs32_32() const {
433 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 32);
435 bool isMemOffs32_64() const {
436 return isMemOffs() && Mem.ModeSize == 32 && (!Mem.Size || Mem.Size == 64);
438 bool isMemOffs64_8() const {
439 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 8);
441 bool isMemOffs64_16() const {
442 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 16);
444 bool isMemOffs64_32() const {
445 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 32);
447 bool isMemOffs64_64() const {
448 return isMemOffs() && Mem.ModeSize == 64 && (!Mem.Size || Mem.Size == 64);
451 bool isPrefix() const { return Kind == Prefix; }
452 bool isReg() const override { return Kind == Register; }
453 bool isDXReg() const { return Kind == DXRegister; }
455 bool isGR32orGR64() const {
456 return Kind == Register &&
457 (X86MCRegisterClasses[X86::GR32RegClassID].contains(getReg()) ||
458 X86MCRegisterClasses[X86::GR64RegClassID].contains(getReg()));
461 bool isVK1Pair() const {
462 return Kind == Register &&
463 X86MCRegisterClasses[X86::VK1RegClassID].contains(getReg());
466 bool isVK2Pair() const {
467 return Kind == Register &&
468 X86MCRegisterClasses[X86::VK2RegClassID].contains(getReg());
471 bool isVK4Pair() const {
472 return Kind == Register &&
473 X86MCRegisterClasses[X86::VK4RegClassID].contains(getReg());
476 bool isVK8Pair() const {
477 return Kind == Register &&
478 X86MCRegisterClasses[X86::VK8RegClassID].contains(getReg());
481 bool isVK16Pair() const {
482 return Kind == Register &&
483 X86MCRegisterClasses[X86::VK16RegClassID].contains(getReg());
486 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
487 // Add as immediates when possible.
488 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
489 Inst.addOperand(MCOperand::createImm(CE->getValue()));
491 Inst.addOperand(MCOperand::createExpr(Expr));
494 void addRegOperands(MCInst &Inst, unsigned N) const {
495 assert(N == 1 && "Invalid number of operands!");
496 Inst.addOperand(MCOperand::createReg(getReg()));
499 void addGR32orGR64Operands(MCInst &Inst, unsigned N) const {
500 assert(N == 1 && "Invalid number of operands!");
501 MCRegister RegNo = getReg();
502 if (X86MCRegisterClasses[X86::GR64RegClassID].contains(RegNo))
503 RegNo = getX86SubSuperRegister(RegNo, 32);
504 Inst.addOperand(MCOperand::createReg(RegNo));
507 void addAVX512RCOperands(MCInst &Inst, unsigned N) const {
508 assert(N == 1 && "Invalid number of operands!");
509 addExpr(Inst, getImm());
512 void addImmOperands(MCInst &Inst, unsigned N) const {
513 assert(N == 1 && "Invalid number of operands!");
514 addExpr(Inst, getImm());
517 void addMaskPairOperands(MCInst &Inst, unsigned N) const {
518 assert(N == 1 && "Invalid number of operands!");
519 unsigned Reg = getReg();
538 Inst.addOperand(MCOperand::createReg(Reg));
541 void addMemOperands(MCInst &Inst, unsigned N) const {
542 assert((N == 5) && "Invalid number of operands!");
543 Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
544 Inst.addOperand(MCOperand::createImm(getMemScale()));
545 Inst.addOperand(MCOperand::createReg(getMemIndexReg()));
546 addExpr(Inst, getMemDisp());
547 Inst.addOperand(MCOperand::createReg(getMemSegReg()));
550 void addAbsMemOperands(MCInst &Inst, unsigned N) const {
551 assert((N == 1) && "Invalid number of operands!");
552 // Add as immediates when possible.
553 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
554 Inst.addOperand(MCOperand::createImm(CE->getValue()));
556 Inst.addOperand(MCOperand::createExpr(getMemDisp()));
559 void addSrcIdxOperands(MCInst &Inst, unsigned N) const {
560 assert((N == 2) && "Invalid number of operands!");
561 Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
562 Inst.addOperand(MCOperand::createReg(getMemSegReg()));
565 void addDstIdxOperands(MCInst &Inst, unsigned N) const {
566 assert((N == 1) && "Invalid number of operands!");
567 Inst.addOperand(MCOperand::createReg(getMemBaseReg()));
570 void addMemOffsOperands(MCInst &Inst, unsigned N) const {
571 assert((N == 2) && "Invalid number of operands!");
572 // Add as immediates when possible.
573 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getMemDisp()))
574 Inst.addOperand(MCOperand::createImm(CE->getValue()));
576 Inst.addOperand(MCOperand::createExpr(getMemDisp()));
577 Inst.addOperand(MCOperand::createReg(getMemSegReg()));
580 static std::unique_ptr<X86Operand> CreateToken(StringRef Str, SMLoc Loc) {
581 SMLoc EndLoc = SMLoc::getFromPointer(Loc.getPointer() + Str.size());
582 auto Res = std::make_unique<X86Operand>(Token, Loc, EndLoc);
583 Res->Tok.Data = Str.data();
584 Res->Tok.Length = Str.size();
588 static std::unique_ptr<X86Operand>
589 CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc,
590 bool AddressOf = false, SMLoc OffsetOfLoc = SMLoc(),
591 StringRef SymName = StringRef(), void *OpDecl = nullptr) {
592 auto Res = std::make_unique<X86Operand>(Register, StartLoc, EndLoc);
593 Res->Reg.RegNo = RegNo;
594 Res->AddressOf = AddressOf;
595 Res->OffsetOfLoc = OffsetOfLoc;
596 Res->SymName = SymName;
597 Res->OpDecl = OpDecl;
601 static std::unique_ptr<X86Operand>
602 CreateDXReg(SMLoc StartLoc, SMLoc EndLoc) {
603 return std::make_unique<X86Operand>(DXRegister, StartLoc, EndLoc);
606 static std::unique_ptr<X86Operand>
607 CreatePrefix(unsigned Prefixes, SMLoc StartLoc, SMLoc EndLoc) {
608 auto Res = std::make_unique<X86Operand>(Prefix, StartLoc, EndLoc);
609 Res->Pref.Prefixes = Prefixes;
613 static std::unique_ptr<X86Operand> CreateImm(const MCExpr *Val,
614 SMLoc StartLoc, SMLoc EndLoc,
615 StringRef SymName = StringRef(),
616 void *OpDecl = nullptr,
617 bool GlobalRef = true) {
618 auto Res = std::make_unique<X86Operand>(Immediate, StartLoc, EndLoc);
620 Res->Imm.LocalRef = !GlobalRef;
621 Res->SymName = SymName;
622 Res->OpDecl = OpDecl;
623 Res->AddressOf = true;
627 /// Create an absolute memory operand.
628 static std::unique_ptr<X86Operand>
629 CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc,
630 unsigned Size = 0, StringRef SymName = StringRef(),
631 void *OpDecl = nullptr, unsigned FrontendSize = 0) {
632 auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
634 Res->Mem.Disp = Disp;
635 Res->Mem.BaseReg = 0;
636 Res->Mem.IndexReg = 0;
638 Res->Mem.Size = Size;
639 Res->Mem.ModeSize = ModeSize;
640 Res->Mem.FrontendSize = FrontendSize;
641 Res->SymName = SymName;
642 Res->OpDecl = OpDecl;
643 Res->AddressOf = false;
647 /// Create a generalized memory operand.
648 static std::unique_ptr<X86Operand>
649 CreateMem(unsigned ModeSize, unsigned SegReg, const MCExpr *Disp,
650 unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc,
651 SMLoc EndLoc, unsigned Size = 0, StringRef SymName = StringRef(),
652 void *OpDecl = nullptr, unsigned FrontendSize = 0) {
653 // We should never just have a displacement, that should be parsed as an
654 // absolute memory operand.
655 assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
657 // The scale should always be one of {1,2,4,8}.
658 assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
660 auto Res = std::make_unique<X86Operand>(Memory, StartLoc, EndLoc);
661 Res->Mem.SegReg = SegReg;
662 Res->Mem.Disp = Disp;
663 Res->Mem.BaseReg = BaseReg;
664 Res->Mem.IndexReg = IndexReg;
665 Res->Mem.Scale = Scale;
666 Res->Mem.Size = Size;
667 Res->Mem.ModeSize = ModeSize;
668 Res->Mem.FrontendSize = FrontendSize;
669 Res->SymName = SymName;
670 Res->OpDecl = OpDecl;
671 Res->AddressOf = false;
676 } // end namespace llvm
678 #endif // LLVM_LIB_TARGET_X86_ASMPARSER_X86OPERAND_H