1 //===-- X86InstrInfo.td - Main X86 Instruction Definition --*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the X86 instruction set, defining the instructions, and
10 // properties of the instructions which are needed for code generation, machine
11 // code emission, and analysis.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // X86 specific DAG Nodes.
19 def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
21 def SDTX86FCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisFP<1>,
24 def SDTX86Cmov : SDTypeProfile<1, 4,
25 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
26 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
28 // Unary and binary operator instructions that set EFLAGS as a side-effect.
29 def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
31 SDTCisInt<0>, SDTCisVT<1, i32>]>;
33 def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
36 SDTCisInt<0>, SDTCisVT<1, i32>]>;
38 // SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
39 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
45 // RES1, RES2, FLAGS = op LHS, RHS
46 def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
50 SDTCisInt<0>, SDTCisVT<1, i32>]>;
51 def SDTX86BrCond : SDTypeProfile<0, 3,
52 [SDTCisVT<0, OtherVT>,
53 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
55 def SDTX86SetCC : SDTypeProfile<1, 2,
57 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
58 def SDTX86SetCC_C : SDTypeProfile<1, 2,
60 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
62 def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>;
64 def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
66 def SDTX86rdpkru : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
67 def SDTX86wrpkru : SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
70 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
72 def SDTX86cas8pair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
73 def SDTX86cas16pair : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i64>]>;
75 def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
79 def SDTLockUnaryArithWithFlags : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
82 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
84 def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
86 def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
89 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
91 def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
93 def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
96 def SDT_X86VAARG : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
102 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
104 def SDTX86Void : SDTypeProfile<0, 0, []>;
106 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
108 def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
110 def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
112 def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
114 def SDT_X86DYN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
116 def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
118 def SDT_X86PROBED_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
120 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
122 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
124 def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
126 def SDT_X86ENQCMD : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
127 SDTCisPtrTy<1>, SDTCisSameAs<1, 2>]>;
129 def SDT_X86AESENCDECKL : SDTypeProfile<2, 2, [SDTCisVT<0, v2i64>,
134 def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
135 [SDNPHasChain,SDNPSideEffect]>;
136 def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
140 def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
141 def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
142 def X86fshl : SDNode<"X86ISD::FSHL", SDTIntShiftDOp>;
143 def X86fshr : SDNode<"X86ISD::FSHR", SDTIntShiftDOp>;
145 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
146 def X86fcmp : SDNode<"X86ISD::FCMP", SDTX86FCmp>;
147 def X86strict_fcmp : SDNode<"X86ISD::STRICT_FCMP", SDTX86FCmp, [SDNPHasChain]>;
148 def X86strict_fcmps : SDNode<"X86ISD::STRICT_FCMPS", SDTX86FCmp, [SDNPHasChain]>;
149 def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
151 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
152 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
154 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
155 def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
157 def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand,
158 [SDNPHasChain, SDNPSideEffect]>;
160 def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand,
161 [SDNPHasChain, SDNPSideEffect]>;
163 def X86rdpkru : SDNode<"X86ISD::RDPKRU", SDTX86rdpkru,
164 [SDNPHasChain, SDNPSideEffect]>;
165 def X86wrpkru : SDNode<"X86ISD::WRPKRU", SDTX86wrpkru,
166 [SDNPHasChain, SDNPSideEffect]>;
168 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
169 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
170 SDNPMayLoad, SDNPMemOperand]>;
171 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86cas8pair,
172 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
173 SDNPMayLoad, SDNPMemOperand]>;
174 def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86cas16pair,
175 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
176 SDNPMayLoad, SDNPMemOperand]>;
178 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
179 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
180 def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret,
181 [SDNPHasChain, SDNPOptInGlue]>;
183 def X86vastart_save_xmm_regs :
184 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
185 SDT_X86VASTART_SAVE_XMM_REGS,
186 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPVariadic]>;
188 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG,
189 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
192 SDNode<"X86ISD::VAARG_X32", SDT_X86VAARG,
193 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
195 def X86callseq_start :
196 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
197 [SDNPHasChain, SDNPOutGlue]>;
199 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
200 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
202 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
203 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
206 def X86call_rvmarker : SDNode<"X86ISD::CALL_RVMARKER", SDT_X86Call,
207 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
211 def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call,
212 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
214 def X86NoTrackBrind : SDNode<"X86ISD::NT_BRIND", SDT_X86NtBrind,
217 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
218 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>;
219 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
220 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
223 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
224 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
226 def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER",
227 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
230 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
231 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
233 def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
234 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
236 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
239 def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
240 SDTypeProfile<1, 1, [SDTCisInt<0>,
242 [SDNPHasChain, SDNPSideEffect]>;
243 def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
244 SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
245 [SDNPHasChain, SDNPSideEffect]>;
246 def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
247 SDTypeProfile<0, 0, []>,
248 [SDNPHasChain, SDNPSideEffect]>;
250 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
251 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
253 def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
255 def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
256 def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
258 def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
260 def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>;
261 def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>;
263 def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
265 def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
267 def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
270 def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
271 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
273 def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
274 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
276 def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
277 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
279 def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
280 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
282 def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
283 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
286 def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
287 def X86bextri : SDNode<"X86ISD::BEXTRI", SDTIntBinOp>;
289 def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntBinOp>;
291 def X86pdep : SDNode<"X86ISD::PDEP", SDTIntBinOp>;
292 def X86pext : SDNode<"X86ISD::PEXT", SDTIntBinOp>;
294 def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
296 def X86DynAlloca : SDNode<"X86ISD::DYN_ALLOCA", SDT_X86DYN_ALLOCA,
297 [SDNPHasChain, SDNPOutGlue]>;
299 def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
302 def X86ProbedAlloca : SDNode<"X86ISD::PROBED_ALLOCA", SDT_X86PROBED_ALLOCA,
305 def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
306 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
308 def X86lwpins : SDNode<"X86ISD::LWPINS",
309 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
310 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
311 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>;
313 def X86umwait : SDNode<"X86ISD::UMWAIT",
314 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
315 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
316 [SDNPHasChain, SDNPSideEffect]>;
318 def X86tpause : SDNode<"X86ISD::TPAUSE",
319 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
320 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
321 [SDNPHasChain, SDNPSideEffect]>;
323 def X86enqcmd : SDNode<"X86ISD::ENQCMD", SDT_X86ENQCMD,
324 [SDNPHasChain, SDNPSideEffect]>;
325 def X86enqcmds : SDNode<"X86ISD::ENQCMDS", SDT_X86ENQCMD,
326 [SDNPHasChain, SDNPSideEffect]>;
327 def X86testui : SDNode<"X86ISD::TESTUI",
328 SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>,
329 [SDNPHasChain, SDNPSideEffect]>;
331 def X86aesenc128kl : SDNode<"X86ISD::AESENC128KL", SDT_X86AESENCDECKL,
332 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
334 def X86aesdec128kl : SDNode<"X86ISD::AESDEC128KL", SDT_X86AESENCDECKL,
335 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
337 def X86aesenc256kl : SDNode<"X86ISD::AESENC256KL", SDT_X86AESENCDECKL,
338 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
340 def X86aesdec256kl : SDNode<"X86ISD::AESDEC256KL", SDT_X86AESENCDECKL,
341 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
344 //===----------------------------------------------------------------------===//
345 // X86 Operand Definitions.
348 // A version of ptr_rc which excludes SP, ESP, and RSP. This is used for
349 // the index operand of an address, to conform to x86 encoding restrictions.
350 def ptr_rc_nosp : PointerLikeRegClass<1>;
352 // *mem - Operand definitions for the funky X86 addressing mode operands.
354 def X86MemAsmOperand : AsmOperandClass {
357 let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
358 def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
359 def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
360 def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
361 def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; }
362 def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; }
363 def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; }
364 def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
365 def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
366 // Gather mem operands
367 def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
368 def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
369 def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
370 def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
371 def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
373 def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
374 def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
375 def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
376 def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
377 def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
378 def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
379 def X86Mem256_RC512Operand : AsmOperandClass { let Name = "Mem256_RC512"; }
380 def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
382 def X86SibMemOperand : AsmOperandClass { let Name = "SibMem"; }
385 def X86AbsMemAsmOperand : AsmOperandClass {
387 let SuperClasses = [X86MemAsmOperand];
390 class X86MemOperand<string printMethod,
391 AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
392 let PrintMethod = printMethod;
393 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
394 let ParserMatchClass = parserMatchClass;
395 let OperandType = "OPERAND_MEMORY";
398 // Gather mem operands
399 class X86VMemOperand<RegisterClass RC, string printMethod,
400 AsmOperandClass parserMatchClass>
401 : X86MemOperand<printMethod, parserMatchClass> {
402 let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
405 def anymem : X86MemOperand<"printMemReference">;
406 def X86any_fcmp : PatFrags<(ops node:$lhs, node:$rhs),
407 [(X86strict_fcmp node:$lhs, node:$rhs),
408 (X86fcmp node:$lhs, node:$rhs)]>;
410 // FIXME: Right now we allow any size during parsing, but we might want to
411 // restrict to only unsized memory.
412 def opaquemem : X86MemOperand<"printMemReference">;
414 def sibmem: X86MemOperand<"printMemReference", X86SibMemOperand>;
416 def i8mem : X86MemOperand<"printbytemem", X86Mem8AsmOperand>;
417 def i16mem : X86MemOperand<"printwordmem", X86Mem16AsmOperand>;
418 def i32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand>;
419 def i64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand>;
420 def i128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand>;
421 def i256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand>;
422 def i512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand>;
423 def f16mem : X86MemOperand<"printwordmem", X86Mem16AsmOperand>;
424 def f32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand>;
425 def f64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand>;
426 def f80mem : X86MemOperand<"printtbytemem", X86Mem80AsmOperand>;
427 def f128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand>;
428 def f256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand>;
429 def f512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand>;
431 // Gather mem operands
432 def vx64mem : X86VMemOperand<VR128, "printqwordmem", X86Mem64_RC128Operand>;
433 def vx128mem : X86VMemOperand<VR128, "printxmmwordmem", X86Mem128_RC128Operand>;
434 def vx256mem : X86VMemOperand<VR128, "printymmwordmem", X86Mem256_RC128Operand>;
435 def vy128mem : X86VMemOperand<VR256, "printxmmwordmem", X86Mem128_RC256Operand>;
436 def vy256mem : X86VMemOperand<VR256, "printymmwordmem", X86Mem256_RC256Operand>;
438 def vx64xmem : X86VMemOperand<VR128X, "printqwordmem", X86Mem64_RC128XOperand>;
439 def vx128xmem : X86VMemOperand<VR128X, "printxmmwordmem", X86Mem128_RC128XOperand>;
440 def vx256xmem : X86VMemOperand<VR128X, "printymmwordmem", X86Mem256_RC128XOperand>;
441 def vy128xmem : X86VMemOperand<VR256X, "printxmmwordmem", X86Mem128_RC256XOperand>;
442 def vy256xmem : X86VMemOperand<VR256X, "printymmwordmem", X86Mem256_RC256XOperand>;
443 def vy512xmem : X86VMemOperand<VR256X, "printzmmwordmem", X86Mem512_RC256XOperand>;
444 def vz256mem : X86VMemOperand<VR512, "printymmwordmem", X86Mem256_RC512Operand>;
445 def vz512mem : X86VMemOperand<VR512, "printzmmwordmem", X86Mem512_RC512Operand>;
447 // A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
448 // of a plain GPR, so that it doesn't potentially require a REX prefix.
449 def ptr_rc_norex : PointerLikeRegClass<2>;
450 def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
452 def i8mem_NOREX : Operand<iPTR> {
453 let PrintMethod = "printbytemem";
454 let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
456 let ParserMatchClass = X86Mem8AsmOperand;
457 let OperandType = "OPERAND_MEMORY";
460 // GPRs available for tailcall.
461 // It represents GR32_TC, GR64_TC or GR64_TCW64.
462 def ptr_rc_tailcall : PointerLikeRegClass<4>;
464 // Special i32mem for addresses of load folding tail calls. These are not
465 // allowed to use callee-saved registers since they must be scheduled
466 // after callee-saved register are popped.
467 def i32mem_TC : Operand<i32> {
468 let PrintMethod = "printdwordmem";
469 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
470 i32imm, SEGMENT_REG);
471 let ParserMatchClass = X86Mem32AsmOperand;
472 let OperandType = "OPERAND_MEMORY";
475 // Special i64mem for addresses of load folding tail calls. These are not
476 // allowed to use callee-saved registers since they must be scheduled
477 // after callee-saved register are popped.
478 def i64mem_TC : Operand<i64> {
479 let PrintMethod = "printqwordmem";
480 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
481 ptr_rc_tailcall, i32imm, SEGMENT_REG);
482 let ParserMatchClass = X86Mem64AsmOperand;
483 let OperandType = "OPERAND_MEMORY";
486 // Special parser to detect 16-bit mode to select 16-bit displacement.
487 def X86AbsMem16AsmOperand : AsmOperandClass {
488 let Name = "AbsMem16";
489 let RenderMethod = "addAbsMemOperands";
490 let SuperClasses = [X86AbsMemAsmOperand];
493 // Branch targets print as pc-relative values.
494 class BranchTargetOperand<ValueType ty> : Operand<ty> {
495 let OperandType = "OPERAND_PCREL";
496 let PrintMethod = "printPCRelImm";
497 let ParserMatchClass = X86AbsMemAsmOperand;
500 def i32imm_brtarget : BranchTargetOperand<i32>;
501 def i16imm_brtarget : BranchTargetOperand<i16>;
503 // 64-bits but only 32 bits are significant, and those bits are treated as being
505 def i64i32imm_brtarget : BranchTargetOperand<i64>;
507 def brtarget : BranchTargetOperand<OtherVT>;
508 def brtarget8 : BranchTargetOperand<OtherVT>;
509 def brtarget16 : BranchTargetOperand<OtherVT> {
510 let ParserMatchClass = X86AbsMem16AsmOperand;
512 def brtarget32 : BranchTargetOperand<OtherVT>;
514 let RenderMethod = "addSrcIdxOperands" in {
515 def X86SrcIdx8Operand : AsmOperandClass {
516 let Name = "SrcIdx8";
517 let SuperClasses = [X86Mem8AsmOperand];
519 def X86SrcIdx16Operand : AsmOperandClass {
520 let Name = "SrcIdx16";
521 let SuperClasses = [X86Mem16AsmOperand];
523 def X86SrcIdx32Operand : AsmOperandClass {
524 let Name = "SrcIdx32";
525 let SuperClasses = [X86Mem32AsmOperand];
527 def X86SrcIdx64Operand : AsmOperandClass {
528 let Name = "SrcIdx64";
529 let SuperClasses = [X86Mem64AsmOperand];
531 } // RenderMethod = "addSrcIdxOperands"
533 let RenderMethod = "addDstIdxOperands" in {
534 def X86DstIdx8Operand : AsmOperandClass {
535 let Name = "DstIdx8";
536 let SuperClasses = [X86Mem8AsmOperand];
538 def X86DstIdx16Operand : AsmOperandClass {
539 let Name = "DstIdx16";
540 let SuperClasses = [X86Mem16AsmOperand];
542 def X86DstIdx32Operand : AsmOperandClass {
543 let Name = "DstIdx32";
544 let SuperClasses = [X86Mem32AsmOperand];
546 def X86DstIdx64Operand : AsmOperandClass {
547 let Name = "DstIdx64";
548 let SuperClasses = [X86Mem64AsmOperand];
550 } // RenderMethod = "addDstIdxOperands"
552 let RenderMethod = "addMemOffsOperands" in {
553 def X86MemOffs16_8AsmOperand : AsmOperandClass {
554 let Name = "MemOffs16_8";
555 let SuperClasses = [X86Mem8AsmOperand];
557 def X86MemOffs16_16AsmOperand : AsmOperandClass {
558 let Name = "MemOffs16_16";
559 let SuperClasses = [X86Mem16AsmOperand];
561 def X86MemOffs16_32AsmOperand : AsmOperandClass {
562 let Name = "MemOffs16_32";
563 let SuperClasses = [X86Mem32AsmOperand];
565 def X86MemOffs32_8AsmOperand : AsmOperandClass {
566 let Name = "MemOffs32_8";
567 let SuperClasses = [X86Mem8AsmOperand];
569 def X86MemOffs32_16AsmOperand : AsmOperandClass {
570 let Name = "MemOffs32_16";
571 let SuperClasses = [X86Mem16AsmOperand];
573 def X86MemOffs32_32AsmOperand : AsmOperandClass {
574 let Name = "MemOffs32_32";
575 let SuperClasses = [X86Mem32AsmOperand];
577 def X86MemOffs32_64AsmOperand : AsmOperandClass {
578 let Name = "MemOffs32_64";
579 let SuperClasses = [X86Mem64AsmOperand];
581 def X86MemOffs64_8AsmOperand : AsmOperandClass {
582 let Name = "MemOffs64_8";
583 let SuperClasses = [X86Mem8AsmOperand];
585 def X86MemOffs64_16AsmOperand : AsmOperandClass {
586 let Name = "MemOffs64_16";
587 let SuperClasses = [X86Mem16AsmOperand];
589 def X86MemOffs64_32AsmOperand : AsmOperandClass {
590 let Name = "MemOffs64_32";
591 let SuperClasses = [X86Mem32AsmOperand];
593 def X86MemOffs64_64AsmOperand : AsmOperandClass {
594 let Name = "MemOffs64_64";
595 let SuperClasses = [X86Mem64AsmOperand];
597 } // RenderMethod = "addMemOffsOperands"
599 class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
600 : X86MemOperand<printMethod, parserMatchClass> {
601 let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
604 class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
605 : X86MemOperand<printMethod, parserMatchClass> {
606 let MIOperandInfo = (ops ptr_rc);
609 def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>;
610 def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>;
611 def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>;
612 def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>;
613 def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>;
614 def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>;
615 def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>;
616 def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
618 class X86MemOffsOperand<Operand immOperand, string printMethod,
619 AsmOperandClass parserMatchClass>
620 : X86MemOperand<printMethod, parserMatchClass> {
621 let MIOperandInfo = (ops immOperand, SEGMENT_REG);
624 def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
625 X86MemOffs16_8AsmOperand>;
626 def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16",
627 X86MemOffs16_16AsmOperand>;
628 def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32",
629 X86MemOffs16_32AsmOperand>;
630 def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8",
631 X86MemOffs32_8AsmOperand>;
632 def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16",
633 X86MemOffs32_16AsmOperand>;
634 def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32",
635 X86MemOffs32_32AsmOperand>;
636 def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64",
637 X86MemOffs32_64AsmOperand>;
638 def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8",
639 X86MemOffs64_8AsmOperand>;
640 def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16",
641 X86MemOffs64_16AsmOperand>;
642 def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32",
643 X86MemOffs64_32AsmOperand>;
644 def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64",
645 X86MemOffs64_64AsmOperand>;
647 def ccode : Operand<i8> {
648 let PrintMethod = "printCondCode";
649 let OperandNamespace = "X86";
650 let OperandType = "OPERAND_COND_CODE";
653 class ImmSExtAsmOperandClass : AsmOperandClass {
654 let SuperClasses = [ImmAsmOperand];
655 let RenderMethod = "addImmOperands";
658 def X86GR32orGR64AsmOperand : AsmOperandClass {
659 let Name = "GR32orGR64";
661 def GR32orGR64 : RegisterOperand<GR32> {
662 let ParserMatchClass = X86GR32orGR64AsmOperand;
665 def X86GR16orGR32orGR64AsmOperand : AsmOperandClass {
666 let Name = "GR16orGR32orGR64";
668 def GR16orGR32orGR64 : RegisterOperand<GR16> {
669 let ParserMatchClass = X86GR16orGR32orGR64AsmOperand;
672 def AVX512RCOperand : AsmOperandClass {
673 let Name = "AVX512RC";
675 def AVX512RC : Operand<i32> {
676 let PrintMethod = "printRoundingControl";
677 let OperandNamespace = "X86";
678 let OperandType = "OPERAND_ROUNDING_CONTROL";
679 let ParserMatchClass = AVX512RCOperand;
682 // Sign-extended immediate classes. We don't need to define the full lattice
683 // here because there is no instruction with an ambiguity between ImmSExti64i32
686 // The strange ranges come from the fact that the assembler always works with
687 // 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
688 // (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
691 // [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
692 def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
693 let Name = "ImmSExti64i32";
696 // [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
697 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
698 def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
699 let Name = "ImmSExti16i8";
700 let SuperClasses = [ImmSExti64i32AsmOperand];
703 // [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
704 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
705 def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
706 let Name = "ImmSExti32i8";
710 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
711 def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
712 let Name = "ImmSExti64i8";
713 let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
714 ImmSExti64i32AsmOperand];
717 // 4-bit immediate used by some XOP instructions
719 def ImmUnsignedi4AsmOperand : AsmOperandClass {
720 let Name = "ImmUnsignedi4";
721 let RenderMethod = "addImmOperands";
722 let DiagnosticType = "InvalidImmUnsignedi4";
725 // Unsigned immediate used by SSE/AVX instructions
727 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
728 def ImmUnsignedi8AsmOperand : AsmOperandClass {
729 let Name = "ImmUnsignedi8";
730 let RenderMethod = "addImmOperands";
733 // A couple of more descriptive operand definitions.
734 // 16-bits but only 8 bits are significant.
735 def i16i8imm : Operand<i16> {
736 let ParserMatchClass = ImmSExti16i8AsmOperand;
737 let OperandType = "OPERAND_IMMEDIATE";
739 // 32-bits but only 8 bits are significant.
740 def i32i8imm : Operand<i32> {
741 let ParserMatchClass = ImmSExti32i8AsmOperand;
742 let OperandType = "OPERAND_IMMEDIATE";
745 // 64-bits but only 32 bits are significant.
746 def i64i32imm : Operand<i64> {
747 let ParserMatchClass = ImmSExti64i32AsmOperand;
748 let OperandType = "OPERAND_IMMEDIATE";
751 // 64-bits but only 8 bits are significant.
752 def i64i8imm : Operand<i64> {
753 let ParserMatchClass = ImmSExti64i8AsmOperand;
754 let OperandType = "OPERAND_IMMEDIATE";
757 // Unsigned 4-bit immediate used by some XOP instructions.
758 def u4imm : Operand<i8> {
759 let PrintMethod = "printU8Imm";
760 let ParserMatchClass = ImmUnsignedi4AsmOperand;
761 let OperandType = "OPERAND_IMMEDIATE";
764 // Unsigned 8-bit immediate used by SSE/AVX instructions.
765 def u8imm : Operand<i8> {
766 let PrintMethod = "printU8Imm";
767 let ParserMatchClass = ImmUnsignedi8AsmOperand;
768 let OperandType = "OPERAND_IMMEDIATE";
771 // 16-bit immediate but only 8-bits are significant and they are unsigned.
772 // Used by BT instructions.
773 def i16u8imm : Operand<i16> {
774 let PrintMethod = "printU8Imm";
775 let ParserMatchClass = ImmUnsignedi8AsmOperand;
776 let OperandType = "OPERAND_IMMEDIATE";
779 // 32-bit immediate but only 8-bits are significant and they are unsigned.
780 // Used by some SSE/AVX instructions that use intrinsics.
781 def i32u8imm : Operand<i32> {
782 let PrintMethod = "printU8Imm";
783 let ParserMatchClass = ImmUnsignedi8AsmOperand;
784 let OperandType = "OPERAND_IMMEDIATE";
787 // 64-bit immediate but only 8-bits are significant and they are unsigned.
788 // Used by BT instructions.
789 def i64u8imm : Operand<i64> {
790 let PrintMethod = "printU8Imm";
791 let ParserMatchClass = ImmUnsignedi8AsmOperand;
792 let OperandType = "OPERAND_IMMEDIATE";
795 def lea64_32mem : Operand<i32> {
796 let PrintMethod = "printMemReference";
797 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
798 let ParserMatchClass = X86MemAsmOperand;
801 // Memory operands that use 64-bit pointers in both ILP32 and LP64.
802 def lea64mem : Operand<i64> {
803 let PrintMethod = "printMemReference";
804 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
805 let ParserMatchClass = X86MemAsmOperand;
808 let RenderMethod = "addMaskPairOperands" in {
809 def VK1PairAsmOperand : AsmOperandClass { let Name = "VK1Pair"; }
810 def VK2PairAsmOperand : AsmOperandClass { let Name = "VK2Pair"; }
811 def VK4PairAsmOperand : AsmOperandClass { let Name = "VK4Pair"; }
812 def VK8PairAsmOperand : AsmOperandClass { let Name = "VK8Pair"; }
813 def VK16PairAsmOperand : AsmOperandClass { let Name = "VK16Pair"; }
816 def VK1Pair : RegisterOperand<VK1PAIR, "printVKPair"> {
817 let ParserMatchClass = VK1PairAsmOperand;
820 def VK2Pair : RegisterOperand<VK2PAIR, "printVKPair"> {
821 let ParserMatchClass = VK2PairAsmOperand;
824 def VK4Pair : RegisterOperand<VK4PAIR, "printVKPair"> {
825 let ParserMatchClass = VK4PairAsmOperand;
828 def VK8Pair : RegisterOperand<VK8PAIR, "printVKPair"> {
829 let ParserMatchClass = VK8PairAsmOperand;
832 def VK16Pair : RegisterOperand<VK16PAIR, "printVKPair"> {
833 let ParserMatchClass = VK16PairAsmOperand;
836 //===----------------------------------------------------------------------===//
837 // X86 Complex Pattern Definitions.
840 // Define X86-specific addressing mode.
841 def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>;
842 def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr",
843 [add, sub, mul, X86mul_imm, shl, or, frameindex],
845 // In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
846 def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr",
847 [add, sub, mul, X86mul_imm, shl, or,
848 frameindex, X86WrapperRIP],
851 def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
852 [tglobaltlsaddr], []>;
854 def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
855 [tglobaltlsaddr], []>;
857 def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr",
858 [add, sub, mul, X86mul_imm, shl, or, frameindex,
861 def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
862 [tglobaltlsaddr], []>;
864 def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
865 [tglobaltlsaddr], []>;
867 def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>;
869 // A relocatable immediate is an operand that can be relocated by the linker to
870 // an immediate, such as a regular symbol in non-PIC code.
871 def relocImm : ComplexPattern<iAny, 1, "selectRelocImm",
872 [X86Wrapper], [], 0>;
874 //===----------------------------------------------------------------------===//
875 // X86 Instruction Predicate Definitions.
876 def TruePredicate : Predicate<"true">;
878 def HasCMov : Predicate<"Subtarget->hasCMov()">;
879 def NoCMov : Predicate<"!Subtarget->hasCMov()">;
881 def HasMMX : Predicate<"Subtarget->hasMMX()">;
882 def Has3DNow : Predicate<"Subtarget->has3DNow()">;
883 def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
884 def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
885 def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
886 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
887 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
888 def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
889 def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
890 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
891 def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
892 def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
893 def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">;
894 def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
895 def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
896 def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
897 def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
898 def NoAVX : Predicate<"!Subtarget->hasAVX()">;
899 def HasAVX : Predicate<"Subtarget->hasAVX()">;
900 def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
901 def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
902 def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
903 def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
904 def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
905 def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
906 def HasCDI : Predicate<"Subtarget->hasCDI()">;
907 def HasVPOPCNTDQ : Predicate<"Subtarget->hasVPOPCNTDQ()">;
908 def HasPFI : Predicate<"Subtarget->hasPFI()">;
909 def HasERI : Predicate<"Subtarget->hasERI()">;
910 def HasDQI : Predicate<"Subtarget->hasDQI()">;
911 def NoDQI : Predicate<"!Subtarget->hasDQI()">;
912 def HasBWI : Predicate<"Subtarget->hasBWI()">;
913 def NoBWI : Predicate<"!Subtarget->hasBWI()">;
914 def HasVLX : Predicate<"Subtarget->hasVLX()">;
915 def NoVLX : Predicate<"!Subtarget->hasVLX()">;
916 def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
917 def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
918 def PKU : Predicate<"Subtarget->hasPKU()">;
919 def HasVNNI : Predicate<"Subtarget->hasVNNI()">;
920 def HasVP2INTERSECT : Predicate<"Subtarget->hasVP2INTERSECT()">;
921 def HasBF16 : Predicate<"Subtarget->hasBF16()">;
922 def HasFP16 : Predicate<"Subtarget->hasFP16()">;
923 def HasAVXVNNI : Predicate <"Subtarget->hasAVXVNNI()">;
924 def NoVLX_Or_NoVNNI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVNNI()">;
926 def HasBITALG : Predicate<"Subtarget->hasBITALG()">;
927 def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
928 def HasAES : Predicate<"Subtarget->hasAES()">;
929 def HasVAES : Predicate<"Subtarget->hasVAES()">;
930 def NoVLX_Or_NoVAES : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVAES()">;
931 def HasFXSR : Predicate<"Subtarget->hasFXSR()">;
932 def HasXSAVE : Predicate<"Subtarget->hasXSAVE()">;
933 def HasXSAVEOPT : Predicate<"Subtarget->hasXSAVEOPT()">;
934 def HasXSAVEC : Predicate<"Subtarget->hasXSAVEC()">;
935 def HasXSAVES : Predicate<"Subtarget->hasXSAVES()">;
936 def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
937 def NoVLX_Or_NoVPCLMULQDQ :
938 Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">;
939 def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">;
940 def HasGFNI : Predicate<"Subtarget->hasGFNI()">;
941 def HasFMA : Predicate<"Subtarget->hasFMA()">;
942 def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
943 def NoFMA4 : Predicate<"!Subtarget->hasFMA4()">;
944 def HasXOP : Predicate<"Subtarget->hasXOP()">;
945 def HasTBM : Predicate<"Subtarget->hasTBM()">;
946 def NoTBM : Predicate<"!Subtarget->hasTBM()">;
947 def HasLWP : Predicate<"Subtarget->hasLWP()">;
948 def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
949 def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
950 def HasF16C : Predicate<"Subtarget->hasF16C()">;
951 def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
952 def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
953 def HasBMI : Predicate<"Subtarget->hasBMI()">;
954 def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
955 def NoBMI2 : Predicate<"!Subtarget->hasBMI2()">;
956 def HasVBMI : Predicate<"Subtarget->hasVBMI()">;
957 def HasVBMI2 : Predicate<"Subtarget->hasVBMI2()">;
958 def HasIFMA : Predicate<"Subtarget->hasIFMA()">;
959 def HasRTM : Predicate<"Subtarget->hasRTM()">;
960 def HasADX : Predicate<"Subtarget->hasADX()">;
961 def HasSHA : Predicate<"Subtarget->hasSHA()">;
962 def HasSGX : Predicate<"Subtarget->hasSGX()">;
963 def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
964 def HasSSEPrefetch : Predicate<"Subtarget->hasSSEPrefetch()">;
965 def NoSSEPrefetch : Predicate<"!Subtarget->hasSSEPrefetch()">;
966 def HasPrefetchW : Predicate<"Subtarget->hasPrefetchW()">;
967 def HasPREFETCHWT1 : Predicate<"Subtarget->hasPREFETCHWT1()">;
968 def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
969 def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
970 def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">;
971 def HasCLDEMOTE : Predicate<"Subtarget->hasCLDEMOTE()">;
972 def HasMOVDIRI : Predicate<"Subtarget->hasMOVDIRI()">;
973 def HasMOVDIR64B : Predicate<"Subtarget->hasMOVDIR64B()">;
974 def HasPTWRITE : Predicate<"Subtarget->hasPTWRITE()">;
975 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
976 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
977 def HasSHSTK : Predicate<"Subtarget->hasSHSTK()">;
978 def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">;
979 def HasCLWB : Predicate<"Subtarget->hasCLWB()">;
980 def HasWBNOINVD : Predicate<"Subtarget->hasWBNOINVD()">;
981 def HasRDPID : Predicate<"Subtarget->hasRDPID()">;
982 def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">;
983 def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">;
984 def HasCmpxchg8b : Predicate<"Subtarget->hasCmpxchg8b()">;
985 def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
986 def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">;
987 def HasENQCMD : Predicate<"Subtarget->hasENQCMD()">;
988 def HasKL : Predicate<"Subtarget->hasKL()">;
989 def HasWIDEKL : Predicate<"Subtarget->hasWIDEKL()">;
990 def HasHRESET : Predicate<"Subtarget->hasHRESET()">;
991 def HasSERIALIZE : Predicate<"Subtarget->hasSERIALIZE()">;
992 def HasTSXLDTRK : Predicate<"Subtarget->hasTSXLDTRK()">;
993 def HasAMXTILE : Predicate<"Subtarget->hasAMXTILE()">;
994 def HasAMXBF16 : Predicate<"Subtarget->hasAMXBF16()">;
995 def HasAMXINT8 : Predicate<"Subtarget->hasAMXINT8()">;
996 def HasUINTR : Predicate<"Subtarget->hasUINTR()">;
997 def HasCRC32 : Predicate<"Subtarget->hasCRC32()">;
998 def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
999 AssemblerPredicate<(all_of (not Mode64Bit)), "Not 64-bit mode">;
1000 def In64BitMode : Predicate<"Subtarget->is64Bit()">,
1001 AssemblerPredicate<(all_of Mode64Bit), "64-bit mode">;
1002 def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">;
1003 def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">;
1004 def In16BitMode : Predicate<"Subtarget->is16Bit()">,
1005 AssemblerPredicate<(all_of Mode16Bit), "16-bit mode">;
1006 def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
1007 AssemblerPredicate<(all_of (not Mode16Bit)), "Not 16-bit mode">;
1008 def In32BitMode : Predicate<"Subtarget->is32Bit()">,
1009 AssemblerPredicate<(all_of Mode32Bit), "32-bit mode">;
1010 def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
1011 def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
1012 def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
1013 "Subtarget->getFrameLowering()->hasFP(*MF)"> {
1014 let RecomputePerFunction = 1;
1016 def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
1017 def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
1018 def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
1019 def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
1020 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
1021 def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
1022 def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
1023 "TM.getCodeModel() == CodeModel::Kernel">;
1024 def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
1026 // We could compute these on a per-module basis but doing so requires accessing
1027 // the Function object through the <Target>Subtarget and objections were raised
1028 // to that (see post-commit review comments for r301750).
1029 let RecomputePerFunction = 1 in {
1030 def OptForSize : Predicate<"shouldOptForSize(MF)">;
1031 def OptForMinSize : Predicate<"MF->getFunction().hasMinSize()">;
1032 def OptForSpeed : Predicate<"!shouldOptForSize(MF)">;
1033 def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
1034 "shouldOptForSize(MF)">;
1035 def NoSSE41_Or_OptForSize : Predicate<"shouldOptForSize(MF) || "
1036 "!Subtarget->hasSSE41()">;
1039 def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
1040 def FavorMemIndirectCall : Predicate<"!Subtarget->slowTwoMemOps()">;
1041 def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
1042 def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
1043 def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
1044 def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
1045 def HasFSRM : Predicate<"Subtarget->hasFSRM()">;
1046 def HasMFence : Predicate<"Subtarget->hasMFence()">;
1047 def UseIndirectThunkCalls : Predicate<"Subtarget->useIndirectThunkCalls()">;
1048 def NotUseIndirectThunkCalls : Predicate<"!Subtarget->useIndirectThunkCalls()">;
1050 //===----------------------------------------------------------------------===//
1051 // X86 Instruction Format Definitions.
1054 include "X86InstrFormats.td"
1056 //===----------------------------------------------------------------------===//
1057 // Pattern fragments.
1060 // X86 specific condition code. These correspond to CondCode in
1061 // X86InstrInfo.h. They must be kept in synch.
1062 def X86_COND_O : PatLeaf<(i8 0)>;
1063 def X86_COND_NO : PatLeaf<(i8 1)>;
1064 def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
1065 def X86_COND_AE : PatLeaf<(i8 3)>; // alt. COND_NC
1066 def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
1067 def X86_COND_NE : PatLeaf<(i8 5)>; // alt. COND_NZ
1068 def X86_COND_BE : PatLeaf<(i8 6)>; // alt. COND_NA
1069 def X86_COND_A : PatLeaf<(i8 7)>; // alt. COND_NBE
1070 def X86_COND_S : PatLeaf<(i8 8)>;
1071 def X86_COND_NS : PatLeaf<(i8 9)>;
1072 def X86_COND_P : PatLeaf<(i8 10)>; // alt. COND_PE
1073 def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
1074 def X86_COND_L : PatLeaf<(i8 12)>; // alt. COND_NGE
1075 def X86_COND_GE : PatLeaf<(i8 13)>; // alt. COND_NL
1076 def X86_COND_LE : PatLeaf<(i8 14)>; // alt. COND_NG
1077 def X86_COND_G : PatLeaf<(i8 15)>; // alt. COND_NLE
1079 def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
1080 def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
1081 def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
1082 def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
1083 def i64timmSExt32 : TImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
1085 def i16relocImmSExt8 : PatLeaf<(i16 relocImm), [{
1086 return isSExtAbsoluteSymbolRef(8, N);
1088 def i32relocImmSExt8 : PatLeaf<(i32 relocImm), [{
1089 return isSExtAbsoluteSymbolRef(8, N);
1091 def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
1092 return isSExtAbsoluteSymbolRef(8, N);
1094 def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
1095 return isSExtAbsoluteSymbolRef(32, N);
1098 // If we have multiple users of an immediate, it's much smaller to reuse
1099 // the register, rather than encode the immediate in every instruction.
1100 // This has the risk of increasing register pressure from stretched live
1101 // ranges, however, the immediates should be trivial to rematerialize by
1102 // the RA in the event of high register pressure.
1103 // TODO : This is currently enabled for stores and binary ops. There are more
1104 // cases for which this can be enabled, though this catches the bulk of the
1106 // TODO2 : This should really also be enabled under O2, but there's currently
1107 // an issue with RA where we don't pull the constants into their users
1108 // when we rematerialize them. I'll follow-up on enabling O2 after we fix that
1110 // TODO3 : This is currently limited to single basic blocks (DAG creation
1111 // pulls block immediates to the top and merges them if necessary).
1112 // Eventually, it would be nice to allow ConstantHoisting to merge constants
1113 // globally for potentially added savings.
1115 def imm_su : PatLeaf<(imm), [{
1116 return !shouldAvoidImmediateInstFormsForSize(N);
1118 def i64immSExt32_su : PatLeaf<(i64immSExt32), [{
1119 return !shouldAvoidImmediateInstFormsForSize(N);
1122 def relocImm8_su : PatLeaf<(i8 relocImm), [{
1123 return !shouldAvoidImmediateInstFormsForSize(N);
1125 def relocImm16_su : PatLeaf<(i16 relocImm), [{
1126 return !shouldAvoidImmediateInstFormsForSize(N);
1128 def relocImm32_su : PatLeaf<(i32 relocImm), [{
1129 return !shouldAvoidImmediateInstFormsForSize(N);
1132 def i16relocImmSExt8_su : PatLeaf<(i16relocImmSExt8), [{
1133 return !shouldAvoidImmediateInstFormsForSize(N);
1135 def i32relocImmSExt8_su : PatLeaf<(i32relocImmSExt8), [{
1136 return !shouldAvoidImmediateInstFormsForSize(N);
1138 def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
1139 return !shouldAvoidImmediateInstFormsForSize(N);
1141 def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
1142 return !shouldAvoidImmediateInstFormsForSize(N);
1145 def i16immSExt8_su : PatLeaf<(i16immSExt8), [{
1146 return !shouldAvoidImmediateInstFormsForSize(N);
1148 def i32immSExt8_su : PatLeaf<(i32immSExt8), [{
1149 return !shouldAvoidImmediateInstFormsForSize(N);
1151 def i64immSExt8_su : PatLeaf<(i64immSExt8), [{
1152 return !shouldAvoidImmediateInstFormsForSize(N);
1155 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
1157 def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
1159 def i64immZExt32SExt8 : ImmLeaf<i64, [{
1160 return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm));
1163 // Helper fragments for loads.
1165 // It's safe to fold a zextload/extload from i1 as a regular i8 load. The
1166 // upper bits are guaranteed to be zero and we were going to emit a MOV8rm
1167 // which might get folded during peephole anyway.
1168 def loadi8 : PatFrag<(ops node:$ptr), (i8 (unindexedload node:$ptr)), [{
1169 LoadSDNode *LD = cast<LoadSDNode>(N);
1170 ISD::LoadExtType ExtType = LD->getExtensionType();
1171 return ExtType == ISD::NON_EXTLOAD || ExtType == ISD::EXTLOAD ||
1172 ExtType == ISD::ZEXTLOAD;
1175 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
1176 // known to be 32-bit aligned or better. Ditto for i8 to i16.
1177 def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
1178 LoadSDNode *LD = cast<LoadSDNode>(N);
1179 ISD::LoadExtType ExtType = LD->getExtensionType();
1180 if (ExtType == ISD::NON_EXTLOAD)
1182 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad)
1183 return LD->getAlignment() >= 2 && LD->isSimple();
1187 def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
1188 LoadSDNode *LD = cast<LoadSDNode>(N);
1189 ISD::LoadExtType ExtType = LD->getExtensionType();
1190 if (ExtType == ISD::NON_EXTLOAD)
1192 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad)
1193 return LD->getAlignment() >= 4 && LD->isSimple();
1197 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
1198 def loadf16 : PatFrag<(ops node:$ptr), (f16 (load node:$ptr))>;
1199 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
1200 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
1201 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
1202 def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>;
1203 def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1204 LoadSDNode *Ld = cast<LoadSDNode>(N);
1205 return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1207 def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1208 LoadSDNode *Ld = cast<LoadSDNode>(N);
1209 return Subtarget->hasSSEUnalignedMem() ||
1210 Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1213 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
1214 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
1215 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
1216 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
1217 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
1218 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
1220 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
1221 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
1222 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
1223 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
1224 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
1225 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
1226 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
1227 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
1228 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
1229 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
1231 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
1232 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
1233 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
1234 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
1235 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
1236 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
1237 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
1238 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
1239 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
1241 // We can treat an i8/i16 extending load to i64 as a 32 bit load if its known
1242 // to be 4 byte aligned or better.
1243 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (unindexedload node:$ptr)), [{
1244 LoadSDNode *LD = cast<LoadSDNode>(N);
1245 ISD::LoadExtType ExtType = LD->getExtensionType();
1246 if (ExtType != ISD::EXTLOAD)
1248 if (LD->getMemoryVT() == MVT::i32)
1251 return LD->getAlignment() >= 4 && LD->isSimple();
1255 // An 'and' node with a single use.
1256 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
1257 return N->hasOneUse();
1259 // An 'srl' node with a single use.
1260 def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
1261 return N->hasOneUse();
1263 // An 'trunc' node with a single use.
1264 def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
1265 return N->hasOneUse();
1268 //===----------------------------------------------------------------------===//
1269 // Instruction list.
1273 let hasSideEffects = 0, SchedRW = [WriteNop] in {
1274 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
1275 def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
1276 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1277 def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
1278 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1279 def NOOPQ : RI<0x1f, MRMXm, (outs), (ins i64mem:$zero),
1280 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1281 Requires<[In64BitMode]>;
1282 // Also allow register so we can assemble/disassemble
1283 def NOOPWr : I<0x1f, MRMXr, (outs), (ins GR16:$zero),
1284 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1285 def NOOPLr : I<0x1f, MRMXr, (outs), (ins GR32:$zero),
1286 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1287 def NOOPQr : RI<0x1f, MRMXr, (outs), (ins GR64:$zero),
1288 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1289 Requires<[In64BitMode]>;
1293 // Constructing a stack frame.
1294 def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
1295 "enter\t$len, $lvl", []>, Sched<[WriteMicrocoded]>;
1297 let SchedRW = [WriteALU] in {
1298 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
1299 def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1300 Requires<[Not64BitMode]>;
1302 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
1303 def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1304 Requires<[In64BitMode]>;
1307 //===----------------------------------------------------------------------===//
1308 // Miscellaneous Instructions.
1311 let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1,
1312 SchedRW = [WriteSystem] in
1313 def Int_eh_sjlj_setup_dispatch
1314 : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
1316 let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
1317 let mayLoad = 1, SchedRW = [WriteLoad] in {
1318 def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1320 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1321 OpSize32, Requires<[Not64BitMode]>;
1322 // Long form for the disassembler.
1323 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1324 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1325 OpSize16, NotMemoryFoldable;
1326 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1327 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1328 } // isCodeGenOnly = 1, ForceDisassemble = 1
1329 } // mayLoad, SchedRW
1330 let mayStore = 1, mayLoad = 1, SchedRW = [WriteCopy] in {
1331 def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", []>,
1333 def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", []>,
1334 OpSize32, Requires<[Not64BitMode]>;
1335 } // mayStore, mayLoad, SchedRW
1337 let mayStore = 1, SchedRW = [WriteStore] in {
1338 def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1340 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1341 OpSize32, Requires<[Not64BitMode]>;
1342 // Long form for the disassembler.
1343 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1344 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1345 OpSize16, NotMemoryFoldable;
1346 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1347 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1348 } // isCodeGenOnly = 1, ForceDisassemble = 1
1350 def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
1351 "push{w}\t$imm", []>, OpSize16;
1352 def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
1353 "push{w}\t$imm", []>, OpSize16;
1355 def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
1356 "push{l}\t$imm", []>, OpSize32,
1357 Requires<[Not64BitMode]>;
1358 def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
1359 "push{l}\t$imm", []>, OpSize32,
1360 Requires<[Not64BitMode]>;
1361 } // mayStore, SchedRW
1363 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1364 def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src", []>,
1366 def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src", []>,
1367 OpSize32, Requires<[Not64BitMode]>;
1368 } // mayLoad, mayStore, SchedRW
1372 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1373 SchedRW = [WriteRMW], Defs = [ESP] in {
1375 def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
1376 [(set GR32:$dst, (int_x86_flags_read_u32))]>,
1377 Requires<[Not64BitMode]>;
1380 def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
1381 [(set GR64:$dst, (int_x86_flags_read_u64))]>,
1382 Requires<[In64BitMode]>;
1385 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1386 SchedRW = [WriteRMW] in {
1387 let Defs = [ESP, EFLAGS, DF], Uses = [ESP] in
1388 def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
1389 [(int_x86_flags_write_u32 GR32:$src)]>,
1390 Requires<[Not64BitMode]>;
1392 let Defs = [RSP, EFLAGS, DF], Uses = [RSP] in
1393 def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
1394 [(int_x86_flags_write_u64 GR64:$src)]>,
1395 Requires<[In64BitMode]>;
1398 let Defs = [ESP, EFLAGS, DF], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
1399 SchedRW = [WriteLoad] in {
1400 def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize16;
1401 def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>, OpSize32,
1402 Requires<[Not64BitMode]>;
1405 let Defs = [ESP], Uses = [ESP, EFLAGS, DF], mayStore = 1, hasSideEffects=0,
1406 SchedRW = [WriteStore] in {
1407 def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize16;
1408 def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>, OpSize32,
1409 Requires<[Not64BitMode]>;
1412 let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
1413 let mayLoad = 1, SchedRW = [WriteLoad] in {
1414 def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1415 OpSize32, Requires<[In64BitMode]>;
1416 // Long form for the disassembler.
1417 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1418 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1419 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1420 } // isCodeGenOnly = 1, ForceDisassemble = 1
1421 } // mayLoad, SchedRW
1422 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
1423 def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", []>,
1424 OpSize32, Requires<[In64BitMode]>;
1425 let mayStore = 1, SchedRW = [WriteStore] in {
1426 def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1427 OpSize32, Requires<[In64BitMode]>;
1428 // Long form for the disassembler.
1429 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1430 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1431 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1432 } // isCodeGenOnly = 1, ForceDisassemble = 1
1433 } // mayStore, SchedRW
1434 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1435 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
1436 OpSize32, Requires<[In64BitMode]>;
1437 } // mayLoad, mayStore, SchedRW
1440 let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
1441 SchedRW = [WriteStore] in {
1442 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
1443 "push{q}\t$imm", []>, OpSize32,
1444 Requires<[In64BitMode]>;
1445 def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
1446 "push{q}\t$imm", []>, OpSize32,
1447 Requires<[In64BitMode]>;
1450 let Defs = [RSP, EFLAGS, DF], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
1451 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
1452 OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
1453 let Defs = [RSP], Uses = [RSP, EFLAGS, DF], mayStore = 1, hasSideEffects=0 in
1454 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
1455 OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
1457 let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
1458 mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
1459 def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", []>,
1460 OpSize32, Requires<[Not64BitMode]>;
1461 def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", []>,
1462 OpSize16, Requires<[Not64BitMode]>;
1464 let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
1465 mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
1466 def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", []>,
1467 OpSize32, Requires<[Not64BitMode]>;
1468 def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", []>,
1469 OpSize16, Requires<[Not64BitMode]>;
1472 let Constraints = "$src = $dst", SchedRW = [WriteBSWAP32] in {
1473 // This instruction is a consequence of BSWAP32r observing operand size. The
1474 // encoding is valid, but the behavior is undefined.
1475 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
1476 def BSWAP16r_BAD : I<0xC8, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
1477 "bswap{w}\t$dst", []>, OpSize16, TB;
1478 // GR32 = bswap GR32
1479 def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
1481 [(set GR32:$dst, (bswap GR32:$src))]>, OpSize32, TB;
1483 let SchedRW = [WriteBSWAP64] in
1484 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
1486 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
1487 } // Constraints = "$src = $dst", SchedRW
1489 // Bit scan instructions.
1490 let Defs = [EFLAGS] in {
1491 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1492 "bsf{w}\t{$src, $dst|$dst, $src}",
1493 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
1494 PS, OpSize16, Sched<[WriteBSF]>;
1495 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1496 "bsf{w}\t{$src, $dst|$dst, $src}",
1497 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
1498 PS, OpSize16, Sched<[WriteBSFLd]>;
1499 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1500 "bsf{l}\t{$src, $dst|$dst, $src}",
1501 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
1502 PS, OpSize32, Sched<[WriteBSF]>;
1503 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1504 "bsf{l}\t{$src, $dst|$dst, $src}",
1505 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
1506 PS, OpSize32, Sched<[WriteBSFLd]>;
1507 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1508 "bsf{q}\t{$src, $dst|$dst, $src}",
1509 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
1510 PS, Sched<[WriteBSF]>;
1511 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1512 "bsf{q}\t{$src, $dst|$dst, $src}",
1513 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
1514 PS, Sched<[WriteBSFLd]>;
1516 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1517 "bsr{w}\t{$src, $dst|$dst, $src}",
1518 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
1519 PS, OpSize16, Sched<[WriteBSR]>;
1520 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1521 "bsr{w}\t{$src, $dst|$dst, $src}",
1522 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
1523 PS, OpSize16, Sched<[WriteBSRLd]>;
1524 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1525 "bsr{l}\t{$src, $dst|$dst, $src}",
1526 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
1527 PS, OpSize32, Sched<[WriteBSR]>;
1528 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1529 "bsr{l}\t{$src, $dst|$dst, $src}",
1530 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
1531 PS, OpSize32, Sched<[WriteBSRLd]>;
1532 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1533 "bsr{q}\t{$src, $dst|$dst, $src}",
1534 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
1535 PS, Sched<[WriteBSR]>;
1536 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1537 "bsr{q}\t{$src, $dst|$dst, $src}",
1538 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
1539 PS, Sched<[WriteBSRLd]>;
1540 } // Defs = [EFLAGS]
1542 let SchedRW = [WriteMicrocoded] in {
1543 let Defs = [EDI,ESI], Uses = [EDI,ESI,DF] in {
1544 def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1545 "movsb\t{$src, $dst|$dst, $src}", []>;
1546 def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1547 "movsw\t{$src, $dst|$dst, $src}", []>, OpSize16;
1548 def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1549 "movs{l|d}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1550 def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1551 "movsq\t{$src, $dst|$dst, $src}", []>,
1552 Requires<[In64BitMode]>;
1555 let Defs = [EDI], Uses = [AL,EDI,DF] in
1556 def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
1557 "stosb\t{%al, $dst|$dst, al}", []>;
1558 let Defs = [EDI], Uses = [AX,EDI,DF] in
1559 def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
1560 "stosw\t{%ax, $dst|$dst, ax}", []>, OpSize16;
1561 let Defs = [EDI], Uses = [EAX,EDI,DF] in
1562 def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
1563 "stos{l|d}\t{%eax, $dst|$dst, eax}", []>, OpSize32;
1564 let Defs = [RDI], Uses = [RAX,RDI,DF] in
1565 def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
1566 "stosq\t{%rax, $dst|$dst, rax}", []>,
1567 Requires<[In64BitMode]>;
1569 let Defs = [EDI,EFLAGS], Uses = [AL,EDI,DF] in
1570 def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
1571 "scasb\t{$dst, %al|al, $dst}", []>;
1572 let Defs = [EDI,EFLAGS], Uses = [AX,EDI,DF] in
1573 def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
1574 "scasw\t{$dst, %ax|ax, $dst}", []>, OpSize16;
1575 let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,DF] in
1576 def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
1577 "scas{l|d}\t{$dst, %eax|eax, $dst}", []>, OpSize32;
1578 let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,DF] in
1579 def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
1580 "scasq\t{$dst, %rax|rax, $dst}", []>,
1581 Requires<[In64BitMode]>;
1583 let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,DF] in {
1584 def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1585 "cmpsb\t{$dst, $src|$src, $dst}", []>;
1586 def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1587 "cmpsw\t{$dst, $src|$src, $dst}", []>, OpSize16;
1588 def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1589 "cmps{l|d}\t{$dst, $src|$src, $dst}", []>, OpSize32;
1590 def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1591 "cmpsq\t{$dst, $src|$src, $dst}", []>,
1592 Requires<[In64BitMode]>;
1596 //===----------------------------------------------------------------------===//
1597 // Move Instructions.
1599 let SchedRW = [WriteMove] in {
1600 let hasSideEffects = 0, isMoveReg = 1 in {
1601 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
1602 "mov{b}\t{$src, $dst|$dst, $src}", []>;
1603 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1604 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
1605 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1606 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1607 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1608 "mov{q}\t{$src, $dst|$dst, $src}", []>;
1611 let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
1612 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
1613 "mov{b}\t{$src, $dst|$dst, $src}",
1614 [(set GR8:$dst, imm:$src)]>;
1615 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
1616 "mov{w}\t{$src, $dst|$dst, $src}",
1617 [(set GR16:$dst, imm:$src)]>, OpSize16;
1618 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
1619 "mov{l}\t{$src, $dst|$dst, $src}",
1620 [(set GR32:$dst, imm:$src)]>, OpSize32;
1621 def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
1622 "mov{q}\t{$src, $dst|$dst, $src}",
1623 [(set GR64:$dst, i64immSExt32:$src)]>;
1625 let isReMaterializable = 1, isMoveImm = 1 in {
1626 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
1627 "movabs{q}\t{$src, $dst|$dst, $src}",
1628 [(set GR64:$dst, imm:$src)]>;
1631 // Longer forms that use a ModR/M byte. Needed for disassembler
1632 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1633 def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
1634 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1635 FoldGenData<"MOV8ri">;
1636 def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
1637 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1638 FoldGenData<"MOV16ri">;
1639 def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
1640 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1641 FoldGenData<"MOV32ri">;
1645 let SchedRW = [WriteStore] in {
1646 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
1647 "mov{b}\t{$src, $dst|$dst, $src}",
1648 [(store (i8 imm_su:$src), addr:$dst)]>;
1649 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
1650 "mov{w}\t{$src, $dst|$dst, $src}",
1651 [(store (i16 imm_su:$src), addr:$dst)]>, OpSize16;
1652 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
1653 "mov{l}\t{$src, $dst|$dst, $src}",
1654 [(store (i32 imm_su:$src), addr:$dst)]>, OpSize32;
1655 def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1656 "mov{q}\t{$src, $dst|$dst, $src}",
1657 [(store i64immSExt32_su:$src, addr:$dst)]>,
1658 Requires<[In64BitMode]>;
1661 def : Pat<(i32 relocImm:$src), (MOV32ri relocImm:$src)>;
1662 def : Pat<(i64 relocImm:$src), (MOV64ri relocImm:$src)>;
1664 def : Pat<(store (i8 relocImm8_su:$src), addr:$dst),
1665 (MOV8mi addr:$dst, relocImm8_su:$src)>;
1666 def : Pat<(store (i16 relocImm16_su:$src), addr:$dst),
1667 (MOV16mi addr:$dst, relocImm16_su:$src)>;
1668 def : Pat<(store (i32 relocImm32_su:$src), addr:$dst),
1669 (MOV32mi addr:$dst, relocImm32_su:$src)>;
1670 def : Pat<(store (i64 i64relocImmSExt32_su:$src), addr:$dst),
1671 (MOV64mi32 addr:$dst, i64immSExt32_su:$src)>;
1673 let hasSideEffects = 0 in {
1675 /// Memory offset versions of moves. The immediate is an address mode sized
1676 /// offset from the segment base.
1677 let SchedRW = [WriteALU] in {
1678 let mayLoad = 1 in {
1680 def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
1681 "mov{b}\t{$src, %al|al, $src}", []>,
1684 def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
1685 "mov{w}\t{$src, %ax|ax, $src}", []>,
1688 def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
1689 "mov{l}\t{$src, %eax|eax, $src}", []>,
1692 def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
1693 "mov{q}\t{$src, %rax|rax, $src}", []>,
1697 def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
1698 "mov{b}\t{$src, %al|al, $src}", []>, AdSize16;
1700 def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
1701 "mov{w}\t{$src, %ax|ax, $src}", []>,
1704 def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
1705 "mov{l}\t{$src, %eax|eax, $src}", []>,
1708 let mayStore = 1 in {
1710 def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
1711 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize32;
1713 def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
1714 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1717 def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
1718 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1721 def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
1722 "mov{q}\t{%rax, $dst|$dst, rax}", []>,
1726 def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
1727 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize16;
1729 def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
1730 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1733 def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
1734 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1738 // These forms all have full 64-bit absolute addresses in their instructions
1739 // and use the movabs mnemonic to indicate this specific form.
1740 let mayLoad = 1 in {
1742 def MOV8ao64 : Ii64<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
1743 "movabs{b}\t{$src, %al|al, $src}", []>,
1746 def MOV16ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
1747 "movabs{w}\t{$src, %ax|ax, $src}", []>,
1750 def MOV32ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
1751 "movabs{l}\t{$src, %eax|eax, $src}", []>,
1754 def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
1755 "movabs{q}\t{$src, %rax|rax, $src}", []>,
1759 let mayStore = 1 in {
1761 def MOV8o64a : Ii64<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
1762 "movabs{b}\t{%al, $dst|$dst, al}", []>,
1765 def MOV16o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
1766 "movabs{w}\t{%ax, $dst|$dst, ax}", []>,
1769 def MOV32o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
1770 "movabs{l}\t{%eax, $dst|$dst, eax}", []>,
1773 def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
1774 "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
1778 } // hasSideEffects = 0
1780 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
1781 SchedRW = [WriteMove], isMoveReg = 1 in {
1782 def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
1783 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1784 FoldGenData<"MOV8rr">;
1785 def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1786 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1787 FoldGenData<"MOV16rr">;
1788 def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1789 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1790 FoldGenData<"MOV32rr">;
1791 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1792 "mov{q}\t{$src, $dst|$dst, $src}", []>,
1793 FoldGenData<"MOV64rr">;
1796 // Reversed version with ".s" suffix for GAS compatibility.
1797 def : InstAlias<"mov{b}.s\t{$src, $dst|$dst, $src}",
1798 (MOV8rr_REV GR8:$dst, GR8:$src), 0>;
1799 def : InstAlias<"mov{w}.s\t{$src, $dst|$dst, $src}",
1800 (MOV16rr_REV GR16:$dst, GR16:$src), 0>;
1801 def : InstAlias<"mov{l}.s\t{$src, $dst|$dst, $src}",
1802 (MOV32rr_REV GR32:$dst, GR32:$src), 0>;
1803 def : InstAlias<"mov{q}.s\t{$src, $dst|$dst, $src}",
1804 (MOV64rr_REV GR64:$dst, GR64:$src), 0>;
1805 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1806 (MOV8rr_REV GR8:$dst, GR8:$src), 0, "att">;
1807 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1808 (MOV16rr_REV GR16:$dst, GR16:$src), 0, "att">;
1809 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1810 (MOV32rr_REV GR32:$dst, GR32:$src), 0, "att">;
1811 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1812 (MOV64rr_REV GR64:$dst, GR64:$src), 0, "att">;
1814 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1815 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
1816 "mov{b}\t{$src, $dst|$dst, $src}",
1817 [(set GR8:$dst, (loadi8 addr:$src))]>;
1818 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1819 "mov{w}\t{$src, $dst|$dst, $src}",
1820 [(set GR16:$dst, (loadi16 addr:$src))]>, OpSize16;
1821 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1822 "mov{l}\t{$src, $dst|$dst, $src}",
1823 [(set GR32:$dst, (loadi32 addr:$src))]>, OpSize32;
1824 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1825 "mov{q}\t{$src, $dst|$dst, $src}",
1826 [(set GR64:$dst, (load addr:$src))]>;
1829 let SchedRW = [WriteStore] in {
1830 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
1831 "mov{b}\t{$src, $dst|$dst, $src}",
1832 [(store GR8:$src, addr:$dst)]>;
1833 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1834 "mov{w}\t{$src, $dst|$dst, $src}",
1835 [(store GR16:$src, addr:$dst)]>, OpSize16;
1836 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1837 "mov{l}\t{$src, $dst|$dst, $src}",
1838 [(store GR32:$src, addr:$dst)]>, OpSize32;
1839 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1840 "mov{q}\t{$src, $dst|$dst, $src}",
1841 [(store GR64:$src, addr:$dst)]>;
1844 // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
1845 // that they can be used for copying and storing h registers, which can't be
1846 // encoded when a REX prefix is present.
1847 let isCodeGenOnly = 1 in {
1848 let hasSideEffects = 0, isMoveReg = 1 in
1849 def MOV8rr_NOREX : I<0x88, MRMDestReg,
1850 (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
1851 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1853 let mayStore = 1, hasSideEffects = 0 in
1854 def MOV8mr_NOREX : I<0x88, MRMDestMem,
1855 (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
1856 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1857 Sched<[WriteStore]>;
1858 let mayLoad = 1, hasSideEffects = 0,
1859 canFoldAsLoad = 1, isReMaterializable = 1 in
1860 def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
1861 (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
1862 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1867 // Condition code ops, incl. set if equal/not equal/...
1868 let SchedRW = [WriteLAHFSAHF] in {
1869 let Defs = [EFLAGS], Uses = [AH], hasSideEffects = 0 in
1870 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>, // flags = AH
1871 Requires<[HasLAHFSAHF]>;
1872 let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
1873 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>, // AH = flags
1874 Requires<[HasLAHFSAHF]>;
1877 //===----------------------------------------------------------------------===//
1878 // Bit tests instructions: BT, BTS, BTR, BTC.
1880 let Defs = [EFLAGS] in {
1881 let SchedRW = [WriteBitTest] in {
1882 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1883 "bt{w}\t{$src2, $src1|$src1, $src2}",
1884 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>,
1885 OpSize16, TB, NotMemoryFoldable;
1886 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1887 "bt{l}\t{$src2, $src1|$src1, $src2}",
1888 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>,
1889 OpSize32, TB, NotMemoryFoldable;
1890 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1891 "bt{q}\t{$src2, $src1|$src1, $src2}",
1892 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB,
1896 // Unlike with the register+register form, the memory+register form of the
1897 // bt instruction does not ignore the high bits of the index. From ISel's
1898 // perspective, this is pretty bizarre. Make these instructions disassembly
1899 // only for now. These instructions are also slow on modern CPUs so that's
1900 // another reason to avoid generating them.
1902 let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteBitTestRegLd] in {
1903 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1904 "bt{w}\t{$src2, $src1|$src1, $src2}",
1905 []>, OpSize16, TB, NotMemoryFoldable;
1906 def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1907 "bt{l}\t{$src2, $src1|$src1, $src2}",
1908 []>, OpSize32, TB, NotMemoryFoldable;
1909 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1910 "bt{q}\t{$src2, $src1|$src1, $src2}",
1911 []>, TB, NotMemoryFoldable;
1914 let SchedRW = [WriteBitTest] in {
1915 def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16u8imm:$src2),
1916 "bt{w}\t{$src2, $src1|$src1, $src2}",
1917 [(set EFLAGS, (X86bt GR16:$src1, imm:$src2))]>,
1919 def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32u8imm:$src2),
1920 "bt{l}\t{$src2, $src1|$src1, $src2}",
1921 [(set EFLAGS, (X86bt GR32:$src1, imm:$src2))]>,
1923 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64u8imm:$src2),
1924 "bt{q}\t{$src2, $src1|$src1, $src2}",
1925 [(set EFLAGS, (X86bt GR64:$src1, imm:$src2))]>, TB;
1928 // Note that these instructions aren't slow because that only applies when the
1929 // other operand is in a register. When it's an immediate, bt is still fast.
1930 let SchedRW = [WriteBitTestImmLd] in {
1931 def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
1932 "bt{w}\t{$src2, $src1|$src1, $src2}",
1933 [(set EFLAGS, (X86bt (loadi16 addr:$src1),
1936 def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
1937 "bt{l}\t{$src2, $src1|$src1, $src2}",
1938 [(set EFLAGS, (X86bt (loadi32 addr:$src1),
1941 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
1942 "bt{q}\t{$src2, $src1|$src1, $src2}",
1943 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1945 Requires<[In64BitMode]>;
1948 let hasSideEffects = 0 in {
1949 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1950 def BTC16rr : I<0xBB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1951 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1952 OpSize16, TB, NotMemoryFoldable;
1953 def BTC32rr : I<0xBB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1954 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1955 OpSize32, TB, NotMemoryFoldable;
1956 def BTC64rr : RI<0xBB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1957 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1961 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
1962 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1963 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1964 OpSize16, TB, NotMemoryFoldable;
1965 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1966 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1967 OpSize32, TB, NotMemoryFoldable;
1968 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1969 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1973 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1974 def BTC16ri8 : Ii8<0xBA, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
1975 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1976 def BTC32ri8 : Ii8<0xBA, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
1977 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1978 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
1979 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1982 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
1983 def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
1984 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1985 def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
1986 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1987 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
1988 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1989 Requires<[In64BitMode]>;
1992 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1993 def BTR16rr : I<0xB3, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1994 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1995 OpSize16, TB, NotMemoryFoldable;
1996 def BTR32rr : I<0xB3, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1997 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1998 OpSize32, TB, NotMemoryFoldable;
1999 def BTR64rr : RI<0xB3, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
2000 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2004 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
2005 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
2006 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2007 OpSize16, TB, NotMemoryFoldable;
2008 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
2009 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2010 OpSize32, TB, NotMemoryFoldable;
2011 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
2012 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2016 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2017 def BTR16ri8 : Ii8<0xBA, MRM6r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
2018 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2020 def BTR32ri8 : Ii8<0xBA, MRM6r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
2021 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2023 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
2024 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2027 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
2028 def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
2029 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2031 def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
2032 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2034 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
2035 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2036 Requires<[In64BitMode]>;
2039 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2040 def BTS16rr : I<0xAB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
2041 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
2042 OpSize16, TB, NotMemoryFoldable;
2043 def BTS32rr : I<0xAB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
2044 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
2045 OpSize32, TB, NotMemoryFoldable;
2046 def BTS64rr : RI<0xAB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
2047 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2051 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
2052 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
2053 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
2054 OpSize16, TB, NotMemoryFoldable;
2055 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
2056 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
2057 OpSize32, TB, NotMemoryFoldable;
2058 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
2059 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2063 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2064 def BTS16ri8 : Ii8<0xBA, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
2065 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
2066 def BTS32ri8 : Ii8<0xBA, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
2067 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
2068 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
2069 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2072 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
2073 def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
2074 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
2075 def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
2076 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
2077 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
2078 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2079 Requires<[In64BitMode]>;
2081 } // hasSideEffects = 0
2082 } // Defs = [EFLAGS]
2085 //===----------------------------------------------------------------------===//
2089 // Atomic swap. These are just normal xchg instructions. But since a memory
2090 // operand is referenced, the atomicity is ensured.
2091 multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag> {
2092 let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in {
2093 def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
2094 (ins GR8:$val, i8mem:$ptr),
2095 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
2098 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
2099 def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
2100 (ins GR16:$val, i16mem:$ptr),
2101 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
2104 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
2106 def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
2107 (ins GR32:$val, i32mem:$ptr),
2108 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
2111 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
2113 def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
2114 (ins GR64:$val, i64mem:$ptr),
2115 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
2118 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
2122 defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">, NotMemoryFoldable;
2124 // Swap between registers.
2125 let SchedRW = [WriteXCHG] in {
2126 let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in {
2127 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2),
2128 (ins GR8:$src1, GR8:$src2),
2129 "xchg{b}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2130 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2),
2131 (ins GR16:$src1, GR16:$src2),
2132 "xchg{w}\t{$src2, $src1|$src1, $src2}", []>,
2133 OpSize16, NotMemoryFoldable;
2134 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2),
2135 (ins GR32:$src1, GR32:$src2),
2136 "xchg{l}\t{$src2, $src1|$src1, $src2}", []>,
2137 OpSize32, NotMemoryFoldable;
2138 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2),
2139 (ins GR64:$src1 ,GR64:$src2),
2140 "xchg{q}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2143 // Swap between EAX and other registers.
2144 let Constraints = "$src = $dst", hasSideEffects = 0 in {
2145 let Uses = [AX], Defs = [AX] in
2146 def XCHG16ar : I<0x90, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
2147 "xchg{w}\t{$src, %ax|ax, $src}", []>, OpSize16;
2148 let Uses = [EAX], Defs = [EAX] in
2149 def XCHG32ar : I<0x90, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
2150 "xchg{l}\t{$src, %eax|eax, $src}", []>, OpSize32;
2151 let Uses = [RAX], Defs = [RAX] in
2152 def XCHG64ar : RI<0x90, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
2153 "xchg{q}\t{$src, %rax|rax, $src}", []>;
2157 let hasSideEffects = 0, Constraints = "$src1 = $dst1, $src2 = $dst2",
2158 Defs = [EFLAGS], SchedRW = [WriteXCHG] in {
2159 def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst1, GR8:$dst2),
2160 (ins GR8:$src1, GR8:$src2),
2161 "xadd{b}\t{$src2, $src1|$src1, $src2}", []>, TB;
2162 def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst1, GR16:$dst2),
2163 (ins GR16:$src1, GR16:$src2),
2164 "xadd{w}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize16;
2165 def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst1, GR32:$dst2),
2166 (ins GR32:$src1, GR32:$src2),
2167 "xadd{l}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize32;
2168 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst1, GR64:$dst2),
2169 (ins GR64:$src1, GR64:$src2),
2170 "xadd{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2173 let mayLoad = 1, mayStore = 1, hasSideEffects = 0, Constraints = "$val = $dst",
2174 Defs = [EFLAGS], SchedRW = [WriteALULd, WriteRMW] in {
2175 def XADD8rm : I<0xC0, MRMSrcMem, (outs GR8:$dst),
2176 (ins GR8:$val, i8mem:$ptr),
2177 "xadd{b}\t{$val, $ptr|$ptr, $val}", []>, TB;
2178 def XADD16rm : I<0xC1, MRMSrcMem, (outs GR16:$dst),
2179 (ins GR16:$val, i16mem:$ptr),
2180 "xadd{w}\t{$val, $ptr|$ptr, $val}", []>, TB,
2182 def XADD32rm : I<0xC1, MRMSrcMem, (outs GR32:$dst),
2183 (ins GR32:$val, i32mem:$ptr),
2184 "xadd{l}\t{$val, $ptr|$ptr, $val}", []>, TB,
2186 def XADD64rm : RI<0xC1, MRMSrcMem, (outs GR64:$dst),
2187 (ins GR64:$val, i64mem:$ptr),
2188 "xadd{q}\t{$val, $ptr|$ptr, $val}", []>, TB;
2192 let SchedRW = [WriteCMPXCHG], hasSideEffects = 0 in {
2193 let Defs = [AL, EFLAGS], Uses = [AL] in
2194 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
2195 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2197 let Defs = [AX, EFLAGS], Uses = [AX] in
2198 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2199 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2201 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2202 def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
2203 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2205 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2206 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
2207 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2209 } // SchedRW, hasSideEffects
2211 let SchedRW = [WriteCMPXCHGRMW], mayLoad = 1, mayStore = 1,
2212 hasSideEffects = 0 in {
2213 let Defs = [AL, EFLAGS], Uses = [AL] in
2214 def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
2215 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2217 let Defs = [AX, EFLAGS], Uses = [AX] in
2218 def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2219 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2221 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2222 def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2223 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2225 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2226 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2227 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2230 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
2231 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
2232 "cmpxchg8b\t$dst", []>, TB, Requires<[HasCmpxchg8b]>;
2234 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
2235 // NOTE: In64BitMode check needed for the AssemblerPredicate.
2236 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
2237 "cmpxchg16b\t$dst", []>,
2238 TB, Requires<[HasCmpxchg16b,In64BitMode]>;
2239 } // SchedRW, mayLoad, mayStore, hasSideEffects
2242 // Lock instruction prefix
2243 let SchedRW = [WriteMicrocoded] in
2244 def LOCK_PREFIX : I<0xF0, PrefixByte, (outs), (ins), "lock", []>;
2246 let SchedRW = [WriteNop] in {
2248 // Rex64 instruction prefix
2249 def REX64_PREFIX : I<0x48, PrefixByte, (outs), (ins), "rex64", []>,
2250 Requires<[In64BitMode]>;
2252 // Data16 instruction prefix
2253 def DATA16_PREFIX : I<0x66, PrefixByte, (outs), (ins), "data16", []>;
2256 // Repeat string operation instruction prefixes
2257 let Defs = [ECX], Uses = [ECX,DF], SchedRW = [WriteMicrocoded] in {
2258 // Repeat (used with INS, OUTS, MOVS, LODS and STOS)
2259 def REP_PREFIX : I<0xF3, PrefixByte, (outs), (ins), "rep", []>;
2260 // Repeat while not equal (used with CMPS and SCAS)
2261 def REPNE_PREFIX : I<0xF2, PrefixByte, (outs), (ins), "repne", []>;
2264 // String manipulation instructions
2265 let SchedRW = [WriteMicrocoded] in {
2266 let Defs = [AL,ESI], Uses = [ESI,DF] in
2267 def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
2268 "lodsb\t{$src, %al|al, $src}", []>;
2269 let Defs = [AX,ESI], Uses = [ESI,DF] in
2270 def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
2271 "lodsw\t{$src, %ax|ax, $src}", []>, OpSize16;
2272 let Defs = [EAX,ESI], Uses = [ESI,DF] in
2273 def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
2274 "lods{l|d}\t{$src, %eax|eax, $src}", []>, OpSize32;
2275 let Defs = [RAX,ESI], Uses = [ESI,DF] in
2276 def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
2277 "lodsq\t{$src, %rax|rax, $src}", []>,
2278 Requires<[In64BitMode]>;
2281 let SchedRW = [WriteSystem] in {
2282 let Defs = [ESI], Uses = [DX,ESI,DF] in {
2283 def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
2284 "outsb\t{$src, %dx|dx, $src}", []>;
2285 def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
2286 "outsw\t{$src, %dx|dx, $src}", []>, OpSize16;
2287 def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
2288 "outs{l|d}\t{$src, %dx|dx, $src}", []>, OpSize32;
2291 let Defs = [EDI], Uses = [DX,EDI,DF] in {
2292 def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
2293 "insb\t{%dx, $dst|$dst, dx}", []>;
2294 def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
2295 "insw\t{%dx, $dst|$dst, dx}", []>, OpSize16;
2296 def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
2297 "ins{l|d}\t{%dx, $dst|$dst, dx}", []>, OpSize32;
2301 // EFLAGS management instructions.
2302 let SchedRW = [WriteALU], Defs = [EFLAGS], Uses = [EFLAGS] in {
2303 def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
2304 def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
2305 def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
2308 // DF management instructions.
2309 let SchedRW = [WriteALU], Defs = [DF] in {
2310 def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
2311 def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
2314 // Table lookup instructions
2315 let Uses = [AL,EBX], Defs = [AL], hasSideEffects = 0, mayLoad = 1 in
2316 def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>, Sched<[WriteLoad]>;
2318 let SchedRW = [WriteMicrocoded] in {
2319 // ASCII Adjust After Addition
2320 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2321 def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>,
2322 Requires<[Not64BitMode]>;
2324 // ASCII Adjust AX Before Division
2325 let Uses = [AX], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2326 def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
2327 "aad\t$src", []>, Requires<[Not64BitMode]>;
2329 // ASCII Adjust AX After Multiply
2330 let Uses = [AL], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2331 def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
2332 "aam\t$src", []>, Requires<[Not64BitMode]>;
2334 // ASCII Adjust AL After Subtraction - sets
2335 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2336 def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>,
2337 Requires<[Not64BitMode]>;
2339 // Decimal Adjust AL after Addition
2340 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2341 def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>,
2342 Requires<[Not64BitMode]>;
2344 // Decimal Adjust AL after Subtraction
2345 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2346 def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>,
2347 Requires<[Not64BitMode]>;
2350 let SchedRW = [WriteSystem] in {
2351 // Check Array Index Against Bounds
2352 // Note: "bound" does not have reversed operands in at&t syntax.
2353 def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2354 "bound\t$dst, $src", []>, OpSize16,
2355 Requires<[Not64BitMode]>;
2356 def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2357 "bound\t$dst, $src", []>, OpSize32,
2358 Requires<[Not64BitMode]>;
2360 // Adjust RPL Field of Segment Selector
2361 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2362 "arpl\t{$src, $dst|$dst, $src}", []>,
2363 Requires<[Not64BitMode]>, NotMemoryFoldable;
2365 def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2366 "arpl\t{$src, $dst|$dst, $src}", []>,
2367 Requires<[Not64BitMode]>, NotMemoryFoldable;
2370 //===----------------------------------------------------------------------===//
2371 // MOVBE Instructions
2373 let Predicates = [HasMOVBE] in {
2374 let SchedRW = [WriteALULd] in {
2375 def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2376 "movbe{w}\t{$src, $dst|$dst, $src}",
2377 [(set GR16:$dst, (bswap (loadi16 addr:$src)))]>,
2379 def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2380 "movbe{l}\t{$src, $dst|$dst, $src}",
2381 [(set GR32:$dst, (bswap (loadi32 addr:$src)))]>,
2383 def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2384 "movbe{q}\t{$src, $dst|$dst, $src}",
2385 [(set GR64:$dst, (bswap (loadi64 addr:$src)))]>,
2388 let SchedRW = [WriteStore] in {
2389 def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2390 "movbe{w}\t{$src, $dst|$dst, $src}",
2391 [(store (bswap GR16:$src), addr:$dst)]>,
2393 def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2394 "movbe{l}\t{$src, $dst|$dst, $src}",
2395 [(store (bswap GR32:$src), addr:$dst)]>,
2397 def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2398 "movbe{q}\t{$src, $dst|$dst, $src}",
2399 [(store (bswap GR64:$src), addr:$dst)]>,
2404 //===----------------------------------------------------------------------===//
2405 // RDRAND Instruction
2407 let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2408 def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
2409 "rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
2411 def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
2412 "rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
2414 def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
2415 "rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
2419 //===----------------------------------------------------------------------===//
2420 // RDSEED Instruction
2422 let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2423 def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
2424 [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, PS;
2425 def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
2426 [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, PS;
2427 def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
2428 [(set GR64:$dst, EFLAGS, (X86rdseed))]>, PS;
2431 //===----------------------------------------------------------------------===//
2432 // LZCNT Instruction
2434 let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
2435 def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2436 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2437 [(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>,
2438 XS, OpSize16, Sched<[WriteLZCNT]>;
2439 def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2440 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2441 [(set GR16:$dst, (ctlz (loadi16 addr:$src))),
2442 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteLZCNTLd]>;
2444 def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2445 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2446 [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>,
2447 XS, OpSize32, Sched<[WriteLZCNT]>;
2448 def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2449 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2450 [(set GR32:$dst, (ctlz (loadi32 addr:$src))),
2451 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteLZCNTLd]>;
2453 def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2454 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2455 [(set GR64:$dst, (ctlz GR64:$src)), (implicit EFLAGS)]>,
2456 XS, Sched<[WriteLZCNT]>;
2457 def LZCNT64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2458 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2459 [(set GR64:$dst, (ctlz (loadi64 addr:$src))),
2460 (implicit EFLAGS)]>, XS, Sched<[WriteLZCNTLd]>;
2463 //===----------------------------------------------------------------------===//
2466 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2467 def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2468 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2469 [(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>,
2470 XS, OpSize16, Sched<[WriteTZCNT]>;
2471 def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2472 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2473 [(set GR16:$dst, (cttz (loadi16 addr:$src))),
2474 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteTZCNTLd]>;
2476 def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2477 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2478 [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>,
2479 XS, OpSize32, Sched<[WriteTZCNT]>;
2480 def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2481 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2482 [(set GR32:$dst, (cttz (loadi32 addr:$src))),
2483 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteTZCNTLd]>;
2485 def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2486 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2487 [(set GR64:$dst, (cttz GR64:$src)), (implicit EFLAGS)]>,
2488 XS, Sched<[WriteTZCNT]>;
2489 def TZCNT64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2490 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2491 [(set GR64:$dst, (cttz (loadi64 addr:$src))),
2492 (implicit EFLAGS)]>, XS, Sched<[WriteTZCNTLd]>;
2495 multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
2496 RegisterClass RC, X86MemOperand x86memop,
2497 X86FoldableSchedWrite sched> {
2498 let hasSideEffects = 0 in {
2499 def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
2500 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2501 T8PS, VEX_4V, Sched<[sched]>;
2503 def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
2504 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2505 T8PS, VEX_4V, Sched<[sched.Folded]>;
2509 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2510 defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem, WriteBLS>;
2511 defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem, WriteBLS>, VEX_W;
2512 defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem, WriteBLS>;
2513 defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem, WriteBLS>, VEX_W;
2514 defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem, WriteBLS>;
2515 defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem, WriteBLS>, VEX_W;
2518 //===----------------------------------------------------------------------===//
2519 // Pattern fragments to auto generate BMI instructions.
2520 //===----------------------------------------------------------------------===//
2522 def or_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2523 (X86or_flag node:$lhs, node:$rhs), [{
2524 return hasNoCarryFlagUses(SDValue(N, 1));
2527 def xor_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2528 (X86xor_flag node:$lhs, node:$rhs), [{
2529 return hasNoCarryFlagUses(SDValue(N, 1));
2532 def and_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2533 (X86and_flag node:$lhs, node:$rhs), [{
2534 return hasNoCarryFlagUses(SDValue(N, 1));
2537 let Predicates = [HasBMI] in {
2538 // FIXME: patterns for the load versions are not implemented
2539 def : Pat<(and GR32:$src, (add GR32:$src, -1)),
2540 (BLSR32rr GR32:$src)>;
2541 def : Pat<(and GR64:$src, (add GR64:$src, -1)),
2542 (BLSR64rr GR64:$src)>;
2544 def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
2545 (BLSMSK32rr GR32:$src)>;
2546 def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
2547 (BLSMSK64rr GR64:$src)>;
2549 def : Pat<(and GR32:$src, (ineg GR32:$src)),
2550 (BLSI32rr GR32:$src)>;
2551 def : Pat<(and GR64:$src, (ineg GR64:$src)),
2552 (BLSI64rr GR64:$src)>;
2554 // Versions to match flag producing ops.
2555 def : Pat<(and_flag_nocf GR32:$src, (add GR32:$src, -1)),
2556 (BLSR32rr GR32:$src)>;
2557 def : Pat<(and_flag_nocf GR64:$src, (add GR64:$src, -1)),
2558 (BLSR64rr GR64:$src)>;
2560 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, -1)),
2561 (BLSMSK32rr GR32:$src)>;
2562 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, -1)),
2563 (BLSMSK64rr GR64:$src)>;
2565 def : Pat<(and_flag_nocf GR32:$src, (ineg GR32:$src)),
2566 (BLSI32rr GR32:$src)>;
2567 def : Pat<(and_flag_nocf GR64:$src, (ineg GR64:$src)),
2568 (BLSI64rr GR64:$src)>;
2571 multiclass bmi_bextr<bits<8> opc, string mnemonic, RegisterClass RC,
2572 X86MemOperand x86memop, SDNode OpNode,
2573 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2574 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2575 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2576 [(set RC:$dst, (OpNode RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2577 T8PS, VEX, Sched<[Sched]>;
2578 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2579 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2580 [(set RC:$dst, (OpNode (ld_frag addr:$src1), RC:$src2)),
2581 (implicit EFLAGS)]>, T8PS, VEX,
2582 Sched<[Sched.Folded,
2584 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2587 Sched.ReadAfterFold]>;
2590 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2591 defm BEXTR32 : bmi_bextr<0xF7, "bextr{l}", GR32, i32mem,
2592 X86bextr, loadi32, WriteBEXTR>;
2593 defm BEXTR64 : bmi_bextr<0xF7, "bextr{q}", GR64, i64mem,
2594 X86bextr, loadi64, WriteBEXTR>, VEX_W;
2597 multiclass bmi_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
2598 X86MemOperand x86memop, SDNode Int,
2599 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2600 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2601 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2602 [(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2603 T8PS, VEX, Sched<[Sched]>;
2604 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2605 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2606 [(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
2607 (implicit EFLAGS)]>, T8PS, VEX,
2608 Sched<[Sched.Folded,
2610 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2613 Sched.ReadAfterFold]>;
2616 let Predicates = [HasBMI2], Defs = [EFLAGS] in {
2617 defm BZHI32 : bmi_bzhi<0xF5, "bzhi{l}", GR32, i32mem,
2618 X86bzhi, loadi32, WriteBZHI>;
2619 defm BZHI64 : bmi_bzhi<0xF5, "bzhi{q}", GR64, i64mem,
2620 X86bzhi, loadi64, WriteBZHI>, VEX_W;
2623 def CountTrailingOnes : SDNodeXForm<imm, [{
2624 // Count the trailing ones in the immediate.
2625 return getI8Imm(countTrailingOnes(N->getZExtValue()), SDLoc(N));
2628 def BEXTRMaskXForm : SDNodeXForm<imm, [{
2629 unsigned Length = countTrailingOnes(N->getZExtValue());
2630 return getI32Imm(Length << 8, SDLoc(N));
2633 def AndMask64 : ImmLeaf<i64, [{
2634 return isMask_64(Imm) && !isUInt<32>(Imm);
2637 // Use BEXTR for 64-bit 'and' with large immediate 'mask'.
2638 let Predicates = [HasBMI, NoBMI2, NoTBM] in {
2639 def : Pat<(and GR64:$src, AndMask64:$mask),
2640 (BEXTR64rr GR64:$src,
2641 (SUBREG_TO_REG (i64 0),
2642 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2643 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2644 (BEXTR64rm addr:$src,
2645 (SUBREG_TO_REG (i64 0),
2646 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2649 // Use BZHI for 64-bit 'and' with large immediate 'mask'.
2650 let Predicates = [HasBMI2, NoTBM] in {
2651 def : Pat<(and GR64:$src, AndMask64:$mask),
2652 (BZHI64rr GR64:$src,
2653 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2654 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2655 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2656 (BZHI64rm addr:$src,
2657 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2658 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2661 multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
2662 X86MemOperand x86memop, SDNode OpNode,
2664 def rr : I<0xF5, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2665 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2666 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>,
2667 VEX_4V, Sched<[WriteALU]>;
2668 def rm : I<0xF5, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2669 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2670 [(set RC:$dst, (OpNode RC:$src1, (ld_frag addr:$src2)))]>,
2671 VEX_4V, Sched<[WriteALU.Folded, WriteALU.ReadAfterFold]>;
2674 let Predicates = [HasBMI2] in {
2675 defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
2676 X86pdep, loadi32>, T8XD;
2677 defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
2678 X86pdep, loadi64>, T8XD, VEX_W;
2679 defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
2680 X86pext, loadi32>, T8XS;
2681 defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
2682 X86pext, loadi64>, T8XS, VEX_W;
2685 //===----------------------------------------------------------------------===//
2688 let Predicates = [HasTBM], Defs = [EFLAGS] in {
2690 multiclass tbm_bextri<bits<8> opc, RegisterClass RC, string OpcodeStr,
2691 X86MemOperand x86memop, PatFrag ld_frag,
2692 SDNode OpNode, Operand immtype,
2693 SDPatternOperator immoperator,
2694 X86FoldableSchedWrite Sched> {
2695 def ri : Ii32<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, immtype:$cntl),
2696 !strconcat(OpcodeStr,
2697 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2698 [(set RC:$dst, (OpNode RC:$src1, immoperator:$cntl))]>,
2699 XOP, XOPA, Sched<[Sched]>;
2700 def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
2701 (ins x86memop:$src1, immtype:$cntl),
2702 !strconcat(OpcodeStr,
2703 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2704 [(set RC:$dst, (OpNode (ld_frag addr:$src1), immoperator:$cntl))]>,
2705 XOP, XOPA, Sched<[Sched.Folded]>;
2708 defm BEXTRI32 : tbm_bextri<0x10, GR32, "bextr{l}", i32mem, loadi32,
2709 X86bextri, i32imm, timm, WriteBEXTR>;
2710 let ImmT = Imm32S in
2711 defm BEXTRI64 : tbm_bextri<0x10, GR64, "bextr{q}", i64mem, loadi64,
2712 X86bextri, i64i32imm,
2713 i64timmSExt32, WriteBEXTR>, VEX_W;
2715 multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
2716 RegisterClass RC, string OpcodeStr,
2717 X86MemOperand x86memop, X86FoldableSchedWrite Sched> {
2718 let hasSideEffects = 0 in {
2719 def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
2720 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2721 XOP_4V, XOP9, Sched<[Sched]>;
2723 def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
2724 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2725 XOP_4V, XOP9, Sched<[Sched.Folded]>;
2729 multiclass tbm_binary_intr<bits<8> opc, string OpcodeStr,
2730 X86FoldableSchedWrite Sched,
2731 Format FormReg, Format FormMem> {
2732 defm NAME#32 : tbm_binary_rm<opc, FormReg, FormMem, GR32, OpcodeStr#"{l}",
2734 defm NAME#64 : tbm_binary_rm<opc, FormReg, FormMem, GR64, OpcodeStr#"{q}",
2735 i64mem, Sched>, VEX_W;
2738 defm BLCFILL : tbm_binary_intr<0x01, "blcfill", WriteALU, MRM1r, MRM1m>;
2739 defm BLCI : tbm_binary_intr<0x02, "blci", WriteALU, MRM6r, MRM6m>;
2740 defm BLCIC : tbm_binary_intr<0x01, "blcic", WriteALU, MRM5r, MRM5m>;
2741 defm BLCMSK : tbm_binary_intr<0x02, "blcmsk", WriteALU, MRM1r, MRM1m>;
2742 defm BLCS : tbm_binary_intr<0x01, "blcs", WriteALU, MRM3r, MRM3m>;
2743 defm BLSFILL : tbm_binary_intr<0x01, "blsfill", WriteALU, MRM2r, MRM2m>;
2744 defm BLSIC : tbm_binary_intr<0x01, "blsic", WriteALU, MRM6r, MRM6m>;
2745 defm T1MSKC : tbm_binary_intr<0x01, "t1mskc", WriteALU, MRM7r, MRM7m>;
2746 defm TZMSK : tbm_binary_intr<0x01, "tzmsk", WriteALU, MRM4r, MRM4m>;
2749 // Use BEXTRI for 64-bit 'and' with large immediate 'mask'.
2750 let Predicates = [HasTBM] in {
2751 def : Pat<(and GR64:$src, AndMask64:$mask),
2752 (BEXTRI64ri GR64:$src, (BEXTRMaskXForm imm:$mask))>;
2754 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2755 (BEXTRI64mi addr:$src, (BEXTRMaskXForm imm:$mask))>;
2758 //===----------------------------------------------------------------------===//
2759 // Lightweight Profiling Instructions
2761 let Predicates = [HasLWP], SchedRW = [WriteSystem] in {
2763 def LLWPCB : I<0x12, MRM0r, (outs), (ins GR32:$src), "llwpcb\t$src",
2764 [(int_x86_llwpcb GR32:$src)]>, XOP, XOP9;
2765 def SLWPCB : I<0x12, MRM1r, (outs GR32:$dst), (ins), "slwpcb\t$dst",
2766 [(set GR32:$dst, (int_x86_slwpcb))]>, XOP, XOP9;
2768 def LLWPCB64 : I<0x12, MRM0r, (outs), (ins GR64:$src), "llwpcb\t$src",
2769 [(int_x86_llwpcb GR64:$src)]>, XOP, XOP9, VEX_W;
2770 def SLWPCB64 : I<0x12, MRM1r, (outs GR64:$dst), (ins), "slwpcb\t$dst",
2771 [(set GR64:$dst, (int_x86_slwpcb))]>, XOP, XOP9, VEX_W;
2773 multiclass lwpins_intr<RegisterClass RC> {
2774 def rri : Ii32<0x12, MRM0r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2775 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2776 [(set EFLAGS, (X86lwpins RC:$src0, GR32:$src1, timm:$cntl))]>,
2779 def rmi : Ii32<0x12, MRM0m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2780 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2781 [(set EFLAGS, (X86lwpins RC:$src0, (loadi32 addr:$src1), timm:$cntl))]>,
2785 let Defs = [EFLAGS] in {
2786 defm LWPINS32 : lwpins_intr<GR32>;
2787 defm LWPINS64 : lwpins_intr<GR64>, VEX_W;
2790 multiclass lwpval_intr<RegisterClass RC, Intrinsic Int> {
2791 def rri : Ii32<0x12, MRM1r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2792 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2793 [(Int RC:$src0, GR32:$src1, timm:$cntl)]>, XOP_4V, XOPA;
2795 def rmi : Ii32<0x12, MRM1m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2796 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2797 [(Int RC:$src0, (loadi32 addr:$src1), timm:$cntl)]>,
2801 defm LWPVAL32 : lwpval_intr<GR32, int_x86_lwpval32>;
2802 defm LWPVAL64 : lwpval_intr<GR64, int_x86_lwpval64>, VEX_W;
2804 } // HasLWP, SchedRW
2806 //===----------------------------------------------------------------------===//
2807 // MONITORX/MWAITX Instructions
2809 let SchedRW = [ WriteSystem ] in {
2810 let Uses = [ EAX, ECX, EDX ] in
2811 def MONITORX32rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2812 TB, Requires<[ HasMWAITX, Not64BitMode ]>;
2813 let Uses = [ RAX, ECX, EDX ] in
2814 def MONITORX64rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2815 TB, Requires<[ HasMWAITX, In64BitMode ]>;
2817 let Uses = [ ECX, EAX, EBX ] in {
2818 def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
2819 []>, TB, Requires<[ HasMWAITX ]>;
2823 def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
2824 Requires<[ Not64BitMode ]>;
2825 def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
2826 Requires<[ In64BitMode ]>;
2828 def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORX32rrr)>,
2829 Requires<[ Not64BitMode ]>;
2830 def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORX64rrr)>,
2831 Requires<[ In64BitMode ]>;
2833 //===----------------------------------------------------------------------===//
2834 // WAITPKG Instructions
2836 let SchedRW = [WriteSystem] in {
2837 def UMONITOR16 : I<0xAE, MRM6r, (outs), (ins GR16:$src),
2838 "umonitor\t$src", [(int_x86_umonitor GR16:$src)]>,
2839 XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
2840 def UMONITOR32 : I<0xAE, MRM6r, (outs), (ins GR32:$src),
2841 "umonitor\t$src", [(int_x86_umonitor GR32:$src)]>,
2842 XS, AdSize32, Requires<[HasWAITPKG]>;
2843 def UMONITOR64 : I<0xAE, MRM6r, (outs), (ins GR64:$src),
2844 "umonitor\t$src", [(int_x86_umonitor GR64:$src)]>,
2845 XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
2846 let Uses = [EAX, EDX], Defs = [EFLAGS] in {
2847 def UMWAIT : I<0xAE, MRM6r,
2848 (outs), (ins GR32orGR64:$src), "umwait\t$src",
2849 [(set EFLAGS, (X86umwait GR32orGR64:$src, EDX, EAX))]>,
2850 XD, Requires<[HasWAITPKG]>;
2851 def TPAUSE : I<0xAE, MRM6r,
2852 (outs), (ins GR32orGR64:$src), "tpause\t$src",
2853 [(set EFLAGS, (X86tpause GR32orGR64:$src, EDX, EAX))]>,
2854 PD, Requires<[HasWAITPKG]>, NotMemoryFoldable;
2858 //===----------------------------------------------------------------------===//
2859 // MOVDIRI - Move doubleword/quadword as direct store
2861 let SchedRW = [WriteStore] in {
2862 def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2863 "movdiri\t{$src, $dst|$dst, $src}",
2864 [(int_x86_directstore32 addr:$dst, GR32:$src)]>,
2865 T8PS, Requires<[HasMOVDIRI]>;
2866 def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2867 "movdiri\t{$src, $dst|$dst, $src}",
2868 [(int_x86_directstore64 addr:$dst, GR64:$src)]>,
2869 T8PS, Requires<[In64BitMode, HasMOVDIRI]>;
2872 //===----------------------------------------------------------------------===//
2873 // MOVDIR64B - Move 64 bytes as direct store
2875 let SchedRW = [WriteStore] in {
2876 def MOVDIR64B16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2877 "movdir64b\t{$src, $dst|$dst, $src}", []>,
2878 T8PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
2879 def MOVDIR64B32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2880 "movdir64b\t{$src, $dst|$dst, $src}",
2881 [(int_x86_movdir64b GR32:$dst, addr:$src)]>,
2882 T8PD, AdSize32, Requires<[HasMOVDIR64B]>;
2883 def MOVDIR64B64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2884 "movdir64b\t{$src, $dst|$dst, $src}",
2885 [(int_x86_movdir64b GR64:$dst, addr:$src)]>,
2886 T8PD, AdSize64, Requires<[HasMOVDIR64B, In64BitMode]>;
2889 //===----------------------------------------------------------------------===//
2890 // ENQCMD/S - Enqueue 64-byte command as user with 64-byte write atomicity
2892 let SchedRW = [WriteStore], Defs = [EFLAGS] in {
2893 def ENQCMD16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2894 "enqcmd\t{$src, $dst|$dst, $src}",
2895 [(set EFLAGS, (X86enqcmd GR16:$dst, addr:$src))]>,
2896 T8XD, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
2897 def ENQCMD32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2898 "enqcmd\t{$src, $dst|$dst, $src}",
2899 [(set EFLAGS, (X86enqcmd GR32:$dst, addr:$src))]>,
2900 T8XD, AdSize32, Requires<[HasENQCMD]>;
2901 def ENQCMD64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2902 "enqcmd\t{$src, $dst|$dst, $src}",
2903 [(set EFLAGS, (X86enqcmd GR64:$dst, addr:$src))]>,
2904 T8XD, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
2906 def ENQCMDS16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2907 "enqcmds\t{$src, $dst|$dst, $src}",
2908 [(set EFLAGS, (X86enqcmds GR16:$dst, addr:$src))]>,
2909 T8XS, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
2910 def ENQCMDS32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2911 "enqcmds\t{$src, $dst|$dst, $src}",
2912 [(set EFLAGS, (X86enqcmds GR32:$dst, addr:$src))]>,
2913 T8XS, AdSize32, Requires<[HasENQCMD]>;
2914 def ENQCMDS64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2915 "enqcmds\t{$src, $dst|$dst, $src}",
2916 [(set EFLAGS, (X86enqcmds GR64:$dst, addr:$src))]>,
2917 T8XS, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
2920 //===----------------------------------------------------------------------===//
2921 // CLZERO Instruction
2923 let SchedRW = [WriteLoad] in {
2925 def CLZERO32r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2926 TB, Requires<[HasCLZERO, Not64BitMode]>;
2928 def CLZERO64r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2929 TB, Requires<[HasCLZERO, In64BitMode]>;
2932 def : InstAlias<"clzero\t{%eax|eax}", (CLZERO32r)>, Requires<[Not64BitMode]>;
2933 def : InstAlias<"clzero\t{%rax|rax}", (CLZERO64r)>, Requires<[In64BitMode]>;
2935 //===----------------------------------------------------------------------===//
2936 // INVLPGB Instruction
2939 let SchedRW = [WriteSystem] in {
2940 let Uses = [EAX, EDX] in
2941 def INVLPGB32 : I<0x01, MRM_FE, (outs), (ins),
2943 PS, Requires<[Not64BitMode]>;
2944 let Uses = [RAX, EDX] in
2945 def INVLPGB64 : I<0x01, MRM_FE, (outs), (ins),
2947 PS, Requires<[In64BitMode]>;
2950 def : InstAlias<"invlpgb\t{%eax, %edx|eax, edx}", (INVLPGB32)>, Requires<[Not64BitMode]>;
2951 def : InstAlias<"invlpgb\t{%rax, %edx|rax, edx}", (INVLPGB64)>, Requires<[In64BitMode]>;
2953 //===----------------------------------------------------------------------===//
2954 // TLBSYNC Instruction
2957 let SchedRW = [WriteSystem] in {
2958 def TLBSYNC : I<0x01, MRM_FF, (outs), (ins),
2963 //===----------------------------------------------------------------------===//
2964 // HRESET Instruction
2966 let Uses = [EAX], SchedRW = [WriteSystem] in
2967 def HRESET : Ii8<0xF0, MRM_C0, (outs), (ins i32u8imm:$imm), "hreset\t$imm", []>,
2968 Requires<[HasHRESET]>, TAXS;
2970 //===----------------------------------------------------------------------===//
2971 // SERIALIZE Instruction
2973 let SchedRW = [WriteSystem] in
2974 def SERIALIZE : I<0x01, MRM_E8, (outs), (ins), "serialize",
2975 [(int_x86_serialize)]>, PS,
2976 Requires<[HasSERIALIZE]>;
2978 //===----------------------------------------------------------------------===//
2979 // TSXLDTRK - TSX Suspend Load Address Tracking
2981 let Predicates = [HasTSXLDTRK], SchedRW = [WriteSystem] in {
2982 def XSUSLDTRK : I<0x01, MRM_E8, (outs), (ins), "xsusldtrk",
2983 [(int_x86_xsusldtrk)]>, XD;
2984 def XRESLDTRK : I<0x01, MRM_E9, (outs), (ins), "xresldtrk",
2985 [(int_x86_xresldtrk)]>, XD;
2988 //===----------------------------------------------------------------------===//
2989 // UINTR Instructions
2991 let Predicates = [HasUINTR, In64BitMode], SchedRW = [WriteSystem] in {
2992 def UIRET : I<0x01, MRM_EC, (outs), (ins), "uiret",
2994 def CLUI : I<0x01, MRM_EE, (outs), (ins), "clui",
2995 [(int_x86_clui)]>, XS;
2996 def STUI : I<0x01, MRM_EF, (outs), (ins), "stui",
2997 [(int_x86_stui)]>, XS;
2999 def SENDUIPI : I<0xC7, MRM6r, (outs), (ins GR64:$arg), "senduipi\t$arg",
3000 [(int_x86_senduipi GR64:$arg)]>, XS;
3002 let Defs = [EFLAGS] in
3003 def TESTUI : I<0x01, MRM_ED, (outs), (ins), "testui",
3004 [(set EFLAGS, (X86testui))]>, XS;
3007 //===----------------------------------------------------------------------===//
3008 // Pattern fragments to auto generate TBM instructions.
3009 //===----------------------------------------------------------------------===//
3011 let Predicates = [HasTBM] in {
3012 // FIXME: patterns for the load versions are not implemented
3013 def : Pat<(and GR32:$src, (add GR32:$src, 1)),
3014 (BLCFILL32rr GR32:$src)>;
3015 def : Pat<(and GR64:$src, (add GR64:$src, 1)),
3016 (BLCFILL64rr GR64:$src)>;
3018 def : Pat<(or GR32:$src, (not (add GR32:$src, 1))),
3019 (BLCI32rr GR32:$src)>;
3020 def : Pat<(or GR64:$src, (not (add GR64:$src, 1))),
3021 (BLCI64rr GR64:$src)>;
3023 // Extra patterns because opt can optimize the above patterns to this.
3024 def : Pat<(or GR32:$src, (sub -2, GR32:$src)),
3025 (BLCI32rr GR32:$src)>;
3026 def : Pat<(or GR64:$src, (sub -2, GR64:$src)),
3027 (BLCI64rr GR64:$src)>;
3029 def : Pat<(and (not GR32:$src), (add GR32:$src, 1)),
3030 (BLCIC32rr GR32:$src)>;
3031 def : Pat<(and (not GR64:$src), (add GR64:$src, 1)),
3032 (BLCIC64rr GR64:$src)>;
3034 def : Pat<(xor GR32:$src, (add GR32:$src, 1)),
3035 (BLCMSK32rr GR32:$src)>;
3036 def : Pat<(xor GR64:$src, (add GR64:$src, 1)),
3037 (BLCMSK64rr GR64:$src)>;
3039 def : Pat<(or GR32:$src, (add GR32:$src, 1)),
3040 (BLCS32rr GR32:$src)>;
3041 def : Pat<(or GR64:$src, (add GR64:$src, 1)),
3042 (BLCS64rr GR64:$src)>;
3044 def : Pat<(or GR32:$src, (add GR32:$src, -1)),
3045 (BLSFILL32rr GR32:$src)>;
3046 def : Pat<(or GR64:$src, (add GR64:$src, -1)),
3047 (BLSFILL64rr GR64:$src)>;
3049 def : Pat<(or (not GR32:$src), (add GR32:$src, -1)),
3050 (BLSIC32rr GR32:$src)>;
3051 def : Pat<(or (not GR64:$src), (add GR64:$src, -1)),
3052 (BLSIC64rr GR64:$src)>;
3054 def : Pat<(or (not GR32:$src), (add GR32:$src, 1)),
3055 (T1MSKC32rr GR32:$src)>;
3056 def : Pat<(or (not GR64:$src), (add GR64:$src, 1)),
3057 (T1MSKC64rr GR64:$src)>;
3059 def : Pat<(and (not GR32:$src), (add GR32:$src, -1)),
3060 (TZMSK32rr GR32:$src)>;
3061 def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
3062 (TZMSK64rr GR64:$src)>;
3064 // Patterns to match flag producing ops.
3065 def : Pat<(and_flag_nocf GR32:$src, (add GR32:$src, 1)),
3066 (BLCFILL32rr GR32:$src)>;
3067 def : Pat<(and_flag_nocf GR64:$src, (add GR64:$src, 1)),
3068 (BLCFILL64rr GR64:$src)>;
3070 def : Pat<(or_flag_nocf GR32:$src, (not (add GR32:$src, 1))),
3071 (BLCI32rr GR32:$src)>;
3072 def : Pat<(or_flag_nocf GR64:$src, (not (add GR64:$src, 1))),
3073 (BLCI64rr GR64:$src)>;
3075 // Extra patterns because opt can optimize the above patterns to this.
3076 def : Pat<(or_flag_nocf GR32:$src, (sub -2, GR32:$src)),
3077 (BLCI32rr GR32:$src)>;
3078 def : Pat<(or_flag_nocf GR64:$src, (sub -2, GR64:$src)),
3079 (BLCI64rr GR64:$src)>;
3081 def : Pat<(and_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
3082 (BLCIC32rr GR32:$src)>;
3083 def : Pat<(and_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
3084 (BLCIC64rr GR64:$src)>;
3086 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, 1)),
3087 (BLCMSK32rr GR32:$src)>;
3088 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, 1)),
3089 (BLCMSK64rr GR64:$src)>;
3091 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, 1)),
3092 (BLCS32rr GR32:$src)>;
3093 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, 1)),
3094 (BLCS64rr GR64:$src)>;
3096 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, -1)),
3097 (BLSFILL32rr GR32:$src)>;
3098 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, -1)),
3099 (BLSFILL64rr GR64:$src)>;
3101 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
3102 (BLSIC32rr GR32:$src)>;
3103 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
3104 (BLSIC64rr GR64:$src)>;
3106 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
3107 (T1MSKC32rr GR32:$src)>;
3108 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
3109 (T1MSKC64rr GR64:$src)>;
3111 def : Pat<(and_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
3112 (TZMSK32rr GR32:$src)>;
3113 def : Pat<(and_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
3114 (TZMSK64rr GR64:$src)>;
3117 //===----------------------------------------------------------------------===//
3118 // Memory Instructions
3121 let Predicates = [HasCLFLUSHOPT], SchedRW = [WriteLoad] in
3122 def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3123 "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
3125 let Predicates = [HasCLWB], SchedRW = [WriteLoad] in
3126 def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
3127 [(int_x86_clwb addr:$src)]>, PD, NotMemoryFoldable;
3129 let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
3130 def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
3131 [(int_x86_cldemote addr:$src)]>, PS;
3133 //===----------------------------------------------------------------------===//
3135 //===----------------------------------------------------------------------===//
3137 include "X86InstrArithmetic.td"
3138 include "X86InstrCMovSetCC.td"
3139 include "X86InstrExtension.td"
3140 include "X86InstrControl.td"
3141 include "X86InstrShiftRotate.td"
3143 // X87 Floating Point Stack.
3144 include "X86InstrFPStack.td"
3146 // SIMD support (SSE, MMX and AVX)
3147 include "X86InstrFragmentsSIMD.td"
3149 // FMA - Fused Multiply-Add support (requires FMA)
3150 include "X86InstrFMA.td"
3153 include "X86InstrXOP.td"
3155 // SSE, MMX and 3DNow! vector support.
3156 include "X86InstrSSE.td"
3157 include "X86InstrAVX512.td"
3158 include "X86InstrMMX.td"
3159 include "X86Instr3DNow.td"
3161 include "X86InstrVMX.td"
3162 include "X86InstrSVM.td"
3163 include "X86InstrSNP.td"
3165 include "X86InstrTSX.td"
3166 include "X86InstrSGX.td"
3168 include "X86InstrTDX.td"
3170 // Key Locker instructions
3171 include "X86InstrKL.td"
3174 include "X86InstrAMX.td"
3176 // System instructions.
3177 include "X86InstrSystem.td"
3179 // Compiler Pseudo Instructions and Pat Patterns
3180 include "X86InstrCompiler.td"
3181 include "X86InstrVecCompiler.td"
3183 //===----------------------------------------------------------------------===//
3184 // Assembler Mnemonic Aliases
3185 //===----------------------------------------------------------------------===//
3187 def : MnemonicAlias<"call", "callw", "att">, Requires<[In16BitMode]>;
3188 def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>;
3189 def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>;
3191 def : MnemonicAlias<"cbw", "cbtw", "att">;
3192 def : MnemonicAlias<"cwde", "cwtl", "att">;
3193 def : MnemonicAlias<"cwd", "cwtd", "att">;
3194 def : MnemonicAlias<"cdq", "cltd", "att">;
3195 def : MnemonicAlias<"cdqe", "cltq", "att">;
3196 def : MnemonicAlias<"cqo", "cqto", "att">;
3198 // In 64-bit mode lret maps to lretl; it is not ambiguous with lretq.
3199 def : MnemonicAlias<"lret", "lretw", "att">, Requires<[In16BitMode]>;
3200 def : MnemonicAlias<"lret", "lretl", "att">, Requires<[Not16BitMode]>;
3202 def : MnemonicAlias<"leavel", "leave", "att">, Requires<[Not64BitMode]>;
3203 def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>;
3205 def : MnemonicAlias<"loopz", "loope">;
3206 def : MnemonicAlias<"loopnz", "loopne">;
3208 def : MnemonicAlias<"pop", "popw", "att">, Requires<[In16BitMode]>;
3209 def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>;
3210 def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>;
3211 def : MnemonicAlias<"popf", "popfw", "att">, Requires<[In16BitMode]>;
3212 def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>;
3213 def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>;
3214 def : MnemonicAlias<"popf", "popfq", "intel">, Requires<[In64BitMode]>;
3215 def : MnemonicAlias<"popfd", "popfl", "att">;
3216 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In32BitMode]>;
3217 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In64BitMode]>;
3219 // FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
3220 // all modes. However: "push (addr)" and "push $42" should default to
3221 // pushl/pushq depending on the current mode. Similar for "pop %bx"
3222 def : MnemonicAlias<"push", "pushw", "att">, Requires<[In16BitMode]>;
3223 def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>;
3224 def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>;
3225 def : MnemonicAlias<"pushf", "pushfw", "att">, Requires<[In16BitMode]>;
3226 def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
3227 def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
3228 def : MnemonicAlias<"pushf", "pushfq", "intel">, Requires<[In64BitMode]>;
3229 def : MnemonicAlias<"pushfd", "pushfl", "att">;
3230 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In32BitMode]>;
3231 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In64BitMode]>;
3233 def : MnemonicAlias<"popad", "popal", "intel">, Requires<[Not64BitMode]>;
3234 def : MnemonicAlias<"pushad", "pushal", "intel">, Requires<[Not64BitMode]>;
3235 def : MnemonicAlias<"popa", "popaw", "intel">, Requires<[In16BitMode]>;
3236 def : MnemonicAlias<"pusha", "pushaw", "intel">, Requires<[In16BitMode]>;
3237 def : MnemonicAlias<"popa", "popal", "intel">, Requires<[In32BitMode]>;
3238 def : MnemonicAlias<"pusha", "pushal", "intel">, Requires<[In32BitMode]>;
3240 def : MnemonicAlias<"popa", "popaw", "att">, Requires<[In16BitMode]>;
3241 def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>;
3242 def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>;
3243 def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>;
3245 def : MnemonicAlias<"repe", "rep">;
3246 def : MnemonicAlias<"repz", "rep">;
3247 def : MnemonicAlias<"repnz", "repne">;
3249 def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>;
3250 def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>;
3251 def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>;
3253 // Apply 'ret' behavior to 'retn'
3254 def : MnemonicAlias<"retn", "retw", "att">, Requires<[In16BitMode]>;
3255 def : MnemonicAlias<"retn", "retl", "att">, Requires<[In32BitMode]>;
3256 def : MnemonicAlias<"retn", "retq", "att">, Requires<[In64BitMode]>;
3257 def : MnemonicAlias<"retn", "ret", "intel">;
3259 def : MnemonicAlias<"sal", "shl", "intel">;
3260 def : MnemonicAlias<"salb", "shlb", "att">;
3261 def : MnemonicAlias<"salw", "shlw", "att">;
3262 def : MnemonicAlias<"sall", "shll", "att">;
3263 def : MnemonicAlias<"salq", "shlq", "att">;
3265 def : MnemonicAlias<"smovb", "movsb", "att">;
3266 def : MnemonicAlias<"smovw", "movsw", "att">;
3267 def : MnemonicAlias<"smovl", "movsl", "att">;
3268 def : MnemonicAlias<"smovq", "movsq", "att">;
3270 def : MnemonicAlias<"ud2a", "ud2", "att">;
3271 def : MnemonicAlias<"ud2bw", "ud1w", "att">;
3272 def : MnemonicAlias<"ud2bl", "ud1l", "att">;
3273 def : MnemonicAlias<"ud2bq", "ud1q", "att">;
3274 def : MnemonicAlias<"verrw", "verr", "att">;
3276 // MS recognizes 'xacquire'/'xrelease' as 'acquire'/'release'
3277 def : MnemonicAlias<"acquire", "xacquire", "intel">;
3278 def : MnemonicAlias<"release", "xrelease", "intel">;
3280 // System instruction aliases.
3281 def : MnemonicAlias<"iret", "iretw", "att">, Requires<[In16BitMode]>;
3282 def : MnemonicAlias<"iret", "iretl", "att">, Requires<[Not16BitMode]>;
3283 def : MnemonicAlias<"sysret", "sysretl", "att">;
3284 def : MnemonicAlias<"sysexit", "sysexitl", "att">;
3286 def : MnemonicAlias<"lgdt", "lgdtw", "att">, Requires<[In16BitMode]>;
3287 def : MnemonicAlias<"lgdt", "lgdtl", "att">, Requires<[In32BitMode]>;
3288 def : MnemonicAlias<"lgdt", "lgdtq", "att">, Requires<[In64BitMode]>;
3289 def : MnemonicAlias<"lidt", "lidtw", "att">, Requires<[In16BitMode]>;
3290 def : MnemonicAlias<"lidt", "lidtl", "att">, Requires<[In32BitMode]>;
3291 def : MnemonicAlias<"lidt", "lidtq", "att">, Requires<[In64BitMode]>;
3292 def : MnemonicAlias<"sgdt", "sgdtw", "att">, Requires<[In16BitMode]>;
3293 def : MnemonicAlias<"sgdt", "sgdtl", "att">, Requires<[In32BitMode]>;
3294 def : MnemonicAlias<"sgdt", "sgdtq", "att">, Requires<[In64BitMode]>;
3295 def : MnemonicAlias<"sidt", "sidtw", "att">, Requires<[In16BitMode]>;
3296 def : MnemonicAlias<"sidt", "sidtl", "att">, Requires<[In32BitMode]>;
3297 def : MnemonicAlias<"sidt", "sidtq", "att">, Requires<[In64BitMode]>;
3298 def : MnemonicAlias<"lgdt", "lgdtw", "intel">, Requires<[In16BitMode]>;
3299 def : MnemonicAlias<"lgdt", "lgdtd", "intel">, Requires<[In32BitMode]>;
3300 def : MnemonicAlias<"lidt", "lidtw", "intel">, Requires<[In16BitMode]>;
3301 def : MnemonicAlias<"lidt", "lidtd", "intel">, Requires<[In32BitMode]>;
3302 def : MnemonicAlias<"sgdt", "sgdtw", "intel">, Requires<[In16BitMode]>;
3303 def : MnemonicAlias<"sgdt", "sgdtd", "intel">, Requires<[In32BitMode]>;
3304 def : MnemonicAlias<"sidt", "sidtw", "intel">, Requires<[In16BitMode]>;
3305 def : MnemonicAlias<"sidt", "sidtd", "intel">, Requires<[In32BitMode]>;
3308 // Floating point stack aliases.
3309 def : MnemonicAlias<"fcmovz", "fcmove", "att">;
3310 def : MnemonicAlias<"fcmova", "fcmovnbe", "att">;
3311 def : MnemonicAlias<"fcmovnae", "fcmovb", "att">;
3312 def : MnemonicAlias<"fcmovna", "fcmovbe", "att">;
3313 def : MnemonicAlias<"fcmovae", "fcmovnb", "att">;
3314 def : MnemonicAlias<"fcomip", "fcompi">;
3315 def : MnemonicAlias<"fildq", "fildll", "att">;
3316 def : MnemonicAlias<"fistpq", "fistpll", "att">;
3317 def : MnemonicAlias<"fisttpq", "fisttpll", "att">;
3318 def : MnemonicAlias<"fldcww", "fldcw", "att">;
3319 def : MnemonicAlias<"fnstcww", "fnstcw", "att">;
3320 def : MnemonicAlias<"fnstsww", "fnstsw", "att">;
3321 def : MnemonicAlias<"fucomip", "fucompi">;
3322 def : MnemonicAlias<"fwait", "wait">;
3324 def : MnemonicAlias<"fxsaveq", "fxsave64", "att">;
3325 def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">;
3326 def : MnemonicAlias<"xsaveq", "xsave64", "att">;
3327 def : MnemonicAlias<"xrstorq", "xrstor64", "att">;
3328 def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">;
3329 def : MnemonicAlias<"xrstorsq", "xrstors64", "att">;
3330 def : MnemonicAlias<"xsavecq", "xsavec64", "att">;
3331 def : MnemonicAlias<"xsavesq", "xsaves64", "att">;
3333 class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
3335 : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
3336 !strconcat(Prefix, NewCond, Suffix), VariantName>;
3338 /// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
3339 /// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
3340 /// example "setz" -> "sete".
3341 multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix,
3343 def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb
3344 def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete
3345 def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe
3346 def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae
3347 def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae
3348 def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle
3349 def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge
3350 def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne
3351 def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp
3352 def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp
3354 def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb
3355 def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta
3356 def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl
3357 def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg
3360 // Aliases for set<CC>
3361 defm : IntegerCondCodeMnemonicAlias<"set", "">;
3362 // Aliases for j<CC>
3363 defm : IntegerCondCodeMnemonicAlias<"j", "">;
3364 // Aliases for cmov<CC>{w,l,q}
3365 defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">;
3366 defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">;
3367 defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">;
3368 // No size suffix for intel-style asm.
3369 defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">;
3372 //===----------------------------------------------------------------------===//
3373 // Assembler Instruction Aliases
3374 //===----------------------------------------------------------------------===//
3376 // aad/aam default to base 10 if no operand is specified.
3377 def : InstAlias<"aad", (AAD8i8 10)>, Requires<[Not64BitMode]>;
3378 def : InstAlias<"aam", (AAM8i8 10)>, Requires<[Not64BitMode]>;
3380 // Disambiguate the mem/imm form of bt-without-a-suffix as btl.
3381 // Likewise for btc/btr/bts.
3382 def : InstAlias<"bt\t{$imm, $mem|$mem, $imm}",
3383 (BT32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3384 def : InstAlias<"btc\t{$imm, $mem|$mem, $imm}",
3385 (BTC32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3386 def : InstAlias<"btr\t{$imm, $mem|$mem, $imm}",
3387 (BTR32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3388 def : InstAlias<"bts\t{$imm, $mem|$mem, $imm}",
3389 (BTS32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3392 def : InstAlias<"clr{b}\t$reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>;
3393 def : InstAlias<"clr{w}\t$reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
3394 def : InstAlias<"clr{l}\t$reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
3395 def : InstAlias<"clr{q}\t$reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
3397 // lods aliases. Accept the destination being omitted because it's implicit
3398 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3399 // in the destination.
3400 def : InstAlias<"lodsb\t$src", (LODSB srcidx8:$src), 0>;
3401 def : InstAlias<"lodsw\t$src", (LODSW srcidx16:$src), 0>;
3402 def : InstAlias<"lods{l|d}\t$src", (LODSL srcidx32:$src), 0>;
3403 def : InstAlias<"lodsq\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3404 def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
3405 def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
3406 def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
3407 def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3408 def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0, "intel">;
3409 def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0, "intel">;
3410 def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0, "intel">;
3411 def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3414 // stos aliases. Accept the source being omitted because it's implicit in
3415 // the mnemonic, or the mnemonic suffix being omitted because it's implicit
3417 def : InstAlias<"stosb\t$dst", (STOSB dstidx8:$dst), 0>;
3418 def : InstAlias<"stosw\t$dst", (STOSW dstidx16:$dst), 0>;
3419 def : InstAlias<"stos{l|d}\t$dst", (STOSL dstidx32:$dst), 0>;
3420 def : InstAlias<"stosq\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3421 def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
3422 def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
3423 def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
3424 def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3425 def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0, "intel">;
3426 def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0, "intel">;
3427 def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0, "intel">;
3428 def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3431 // scas aliases. Accept the destination being omitted because it's implicit
3432 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3433 // in the destination.
3434 def : InstAlias<"scasb\t$dst", (SCASB dstidx8:$dst), 0>;
3435 def : InstAlias<"scasw\t$dst", (SCASW dstidx16:$dst), 0>;
3436 def : InstAlias<"scas{l|d}\t$dst", (SCASL dstidx32:$dst), 0>;
3437 def : InstAlias<"scasq\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3438 def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
3439 def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
3440 def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
3441 def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3442 def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0, "intel">;
3443 def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0, "intel">;
3444 def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0, "intel">;
3445 def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3447 // cmps aliases. Mnemonic suffix being omitted because it's implicit
3448 // in the destination.
3449 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3450 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3451 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3452 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3454 // movs aliases. Mnemonic suffix being omitted because it's implicit
3455 // in the destination.
3456 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3457 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3458 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3459 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3461 // div and idiv aliases for explicit A register.
3462 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
3463 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
3464 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>;
3465 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>;
3466 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>;
3467 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>;
3468 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>;
3469 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>;
3470 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>;
3471 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>;
3472 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>;
3473 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>;
3474 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>;
3475 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>;
3476 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>;
3477 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>;
3481 // Various unary fpstack operations default to operating on ST1.
3482 // For example, "fxch" -> "fxch %st(1)"
3483 def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
3484 def: InstAlias<"fadd", (ADD_FPrST0 ST1), 0>;
3485 def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>;
3486 def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>;
3487 def : InstAlias<"fmul", (MUL_FPrST0 ST1), 0>;
3488 def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>;
3489 def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>;
3490 def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>;
3491 def : InstAlias<"fxch", (XCH_F ST1), 0>;
3492 def : InstAlias<"fcom", (COM_FST0r ST1), 0>;
3493 def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>;
3494 def : InstAlias<"fcomi", (COM_FIr ST1), 0>;
3495 def : InstAlias<"fcompi", (COM_FIPr ST1), 0>;
3496 def : InstAlias<"fucom", (UCOM_Fr ST1), 0>;
3497 def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>;
3498 def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>;
3499 def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>;
3501 // Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
3502 // For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
3503 // instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
3505 multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
3506 def : InstAlias<!strconcat(Mnemonic, "\t$op"),
3507 (Inst RSTi:$op), EmitAlias>;
3508 def : InstAlias<!strconcat(Mnemonic, "\t{%st, %st|st, st}"),
3509 (Inst ST0), EmitAlias>;
3512 defm : FpUnaryAlias<"fadd", ADD_FST0r, 0>;
3513 defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
3514 defm : FpUnaryAlias<"fsub", SUB_FST0r, 0>;
3515 defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0, 0>;
3516 defm : FpUnaryAlias<"fsubr", SUBR_FST0r, 0>;
3517 defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0, 0>;
3518 defm : FpUnaryAlias<"fmul", MUL_FST0r, 0>;
3519 defm : FpUnaryAlias<"fmulp", MUL_FPrST0, 0>;
3520 defm : FpUnaryAlias<"fdiv", DIV_FST0r, 0>;
3521 defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0, 0>;
3522 defm : FpUnaryAlias<"fdivr", DIVR_FST0r, 0>;
3523 defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0, 0>;
3524 defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
3525 defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
3526 defm : FpUnaryAlias<"fcompi", COM_FIPr, 0>;
3527 defm : FpUnaryAlias<"fucompi", UCOM_FIPr, 0>;
3530 // Handle "f{mulp,addp} $op, %st(0)" the same as "f{mulp,addp} $op", since they
3531 // commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
3532 // solely because gas supports it.
3533 def : InstAlias<"faddp\t{$op, %st|st, $op}", (ADD_FPrST0 RSTi:$op), 0>;
3534 def : InstAlias<"fmulp\t{$op, %st|st, $op}", (MUL_FPrST0 RSTi:$op), 0>;
3535 def : InstAlias<"fsub{|r}p\t{$op, %st|st, $op}", (SUBR_FPrST0 RSTi:$op), 0>;
3536 def : InstAlias<"fsub{r|}p\t{$op, %st|st, $op}", (SUB_FPrST0 RSTi:$op), 0>;
3537 def : InstAlias<"fdiv{|r}p\t{$op, %st|st, $op}", (DIVR_FPrST0 RSTi:$op), 0>;
3538 def : InstAlias<"fdiv{r|}p\t{$op, %st|st, $op}", (DIV_FPrST0 RSTi:$op), 0>;
3540 def : InstAlias<"fnstsw" , (FNSTSW16r), 0>;
3542 // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
3543 // this is compatible with what GAS does.
3544 def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3545 def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3546 def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3547 def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3548 def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3549 def : InstAlias<"ljmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3550 def : InstAlias<"lcall\t{*}$dst", (FARCALL16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3551 def : InstAlias<"ljmp\t{*}$dst", (FARJMP16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3553 def : InstAlias<"jmp\t{*}$dst", (JMP64m i64mem:$dst), 0, "att">, Requires<[In64BitMode]>;
3554 def : InstAlias<"jmp\t{*}$dst", (JMP32m i32mem:$dst), 0, "att">, Requires<[In32BitMode]>;
3555 def : InstAlias<"jmp\t{*}$dst", (JMP16m i16mem:$dst), 0, "att">, Requires<[In16BitMode]>;
3558 // "imul <imm>, B" is an alias for "imul <imm>, B, B".
3559 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>;
3560 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>;
3561 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>;
3562 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>;
3563 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
3564 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
3566 // ins aliases. Accept the mnemonic suffix being omitted because it's implicit
3567 // in the destination.
3568 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0, "intel">;
3569 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0, "intel">;
3570 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0, "intel">;
3572 // outs aliases. Accept the mnemonic suffix being omitted because it's implicit
3574 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0, "intel">;
3575 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0, "intel">;
3576 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0, "intel">;
3578 // inb %dx -> inb %al, %dx
3579 def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
3580 def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
3581 def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>;
3582 def : InstAlias<"inb\t$port", (IN8ri u8imm:$port), 0>;
3583 def : InstAlias<"inw\t$port", (IN16ri u8imm:$port), 0>;
3584 def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
3587 // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
3588 def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3589 def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3590 def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3591 def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3592 def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3593 def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3594 def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3595 def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3597 // Match 'movq <largeimm>, <reg>' as an alias for movabsq.
3598 def : InstAlias<"mov{q}\t{$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
3600 // Match 'movd GR64, MMX' as an alias for movq to be compatible with gas,
3601 // which supports this due to an old AMD documentation bug when 64-bit mode was
3603 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3604 (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
3605 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3606 (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
3609 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3610 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3611 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3612 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3613 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3614 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3615 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0, "att">;
3618 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3619 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3620 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3621 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3622 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3623 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3624 // Note: No GR32->GR64 movzx form.
3626 // outb %dx -> outb %al, %dx
3627 def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>;
3628 def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>;
3629 def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>;
3630 def : InstAlias<"outb\t$port", (OUT8ir u8imm:$port), 0>;
3631 def : InstAlias<"outw\t$port", (OUT16ir u8imm:$port), 0>;
3632 def : InstAlias<"outl\t$port", (OUT32ir u8imm:$port), 0>;
3634 // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
3635 // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
3636 // errors, since its encoding is the most compact.
3637 def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>;
3639 // shld/shrd op,op -> shld op, op, CL
3640 def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
3641 def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>;
3642 def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>;
3643 def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>;
3644 def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>;
3645 def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>;
3647 def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>;
3648 def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>;
3649 def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>;
3650 def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>;
3651 def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>;
3652 def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>;
3654 /* FIXME: This is disabled because the asm matcher is currently incapable of
3655 * matching a fixed immediate like $1.
3656 // "shl X, $1" is an alias for "shl X".
3657 multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
3658 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3659 (!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
3660 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3661 (!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
3662 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3663 (!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
3664 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3665 (!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
3666 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3667 (!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
3668 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3669 (!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
3670 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3671 (!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
3672 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3673 (!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
3676 defm : ShiftRotateByOneAlias<"rcl", "RCL">;
3677 defm : ShiftRotateByOneAlias<"rcr", "RCR">;
3678 defm : ShiftRotateByOneAlias<"rol", "ROL">;
3679 defm : ShiftRotateByOneAlias<"ror", "ROR">;
3682 // test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
3683 def : InstAlias<"test{b}\t{$mem, $val|$val, $mem}",
3684 (TEST8mr i8mem :$mem, GR8 :$val), 0>;
3685 def : InstAlias<"test{w}\t{$mem, $val|$val, $mem}",
3686 (TEST16mr i16mem:$mem, GR16:$val), 0>;
3687 def : InstAlias<"test{l}\t{$mem, $val|$val, $mem}",
3688 (TEST32mr i32mem:$mem, GR32:$val), 0>;
3689 def : InstAlias<"test{q}\t{$mem, $val|$val, $mem}",
3690 (TEST64mr i64mem:$mem, GR64:$val), 0>;
3692 // xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
3693 def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
3694 (XCHG8rm GR8 :$val, i8mem :$mem), 0>;
3695 def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
3696 (XCHG16rm GR16:$val, i16mem:$mem), 0>;
3697 def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
3698 (XCHG32rm GR32:$val, i32mem:$mem), 0>;
3699 def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
3700 (XCHG64rm GR64:$val, i64mem:$mem), 0>;
3702 // xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
3703 def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
3704 def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src), 0>;
3705 def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;
3707 // In 64-bit mode, xchg %eax, %eax can't be encoded with the 0x90 opcode we
3708 // would get by default because it's defined as NOP. But xchg %eax, %eax implies
3709 // implicit zeroing of the upper 32 bits. So alias to the longer encoding.
3710 def : InstAlias<"xchg{l}\t{%eax, %eax|eax, eax}",
3711 (XCHG32rr EAX, EAX), 0>, Requires<[In64BitMode]>;
3713 // xchg %rax, %rax is a nop in x86-64 and can be encoded as such. Without this
3714 // we emit an unneeded REX.w prefix.
3715 def : InstAlias<"xchg{q}\t{%rax, %rax|rax, rax}", (NOOP), 0>;
3717 // These aliases exist to get the parser to prioritize matching 8-bit
3718 // immediate encodings over matching the implicit ax/eax/rax encodings. By
3719 // explicitly mentioning the A register here, these entries will be ordered
3720 // first due to the more explicit immediate type.
3721 def : InstAlias<"adc{w}\t{$imm, %ax|ax, $imm}", (ADC16ri8 AX, i16i8imm:$imm), 0>;
3722 def : InstAlias<"add{w}\t{$imm, %ax|ax, $imm}", (ADD16ri8 AX, i16i8imm:$imm), 0>;
3723 def : InstAlias<"and{w}\t{$imm, %ax|ax, $imm}", (AND16ri8 AX, i16i8imm:$imm), 0>;
3724 def : InstAlias<"cmp{w}\t{$imm, %ax|ax, $imm}", (CMP16ri8 AX, i16i8imm:$imm), 0>;
3725 def : InstAlias<"or{w}\t{$imm, %ax|ax, $imm}", (OR16ri8 AX, i16i8imm:$imm), 0>;
3726 def : InstAlias<"sbb{w}\t{$imm, %ax|ax, $imm}", (SBB16ri8 AX, i16i8imm:$imm), 0>;
3727 def : InstAlias<"sub{w}\t{$imm, %ax|ax, $imm}", (SUB16ri8 AX, i16i8imm:$imm), 0>;
3728 def : InstAlias<"xor{w}\t{$imm, %ax|ax, $imm}", (XOR16ri8 AX, i16i8imm:$imm), 0>;
3730 def : InstAlias<"adc{l}\t{$imm, %eax|eax, $imm}", (ADC32ri8 EAX, i32i8imm:$imm), 0>;
3731 def : InstAlias<"add{l}\t{$imm, %eax|eax, $imm}", (ADD32ri8 EAX, i32i8imm:$imm), 0>;
3732 def : InstAlias<"and{l}\t{$imm, %eax|eax, $imm}", (AND32ri8 EAX, i32i8imm:$imm), 0>;
3733 def : InstAlias<"cmp{l}\t{$imm, %eax|eax, $imm}", (CMP32ri8 EAX, i32i8imm:$imm), 0>;
3734 def : InstAlias<"or{l}\t{$imm, %eax|eax, $imm}", (OR32ri8 EAX, i32i8imm:$imm), 0>;
3735 def : InstAlias<"sbb{l}\t{$imm, %eax|eax, $imm}", (SBB32ri8 EAX, i32i8imm:$imm), 0>;
3736 def : InstAlias<"sub{l}\t{$imm, %eax|eax, $imm}", (SUB32ri8 EAX, i32i8imm:$imm), 0>;
3737 def : InstAlias<"xor{l}\t{$imm, %eax|eax, $imm}", (XOR32ri8 EAX, i32i8imm:$imm), 0>;
3739 def : InstAlias<"adc{q}\t{$imm, %rax|rax, $imm}", (ADC64ri8 RAX, i64i8imm:$imm), 0>;
3740 def : InstAlias<"add{q}\t{$imm, %rax|rax, $imm}", (ADD64ri8 RAX, i64i8imm:$imm), 0>;
3741 def : InstAlias<"and{q}\t{$imm, %rax|rax, $imm}", (AND64ri8 RAX, i64i8imm:$imm), 0>;
3742 def : InstAlias<"cmp{q}\t{$imm, %rax|rax, $imm}", (CMP64ri8 RAX, i64i8imm:$imm), 0>;
3743 def : InstAlias<"or{q}\t{$imm, %rax|rax, $imm}", (OR64ri8 RAX, i64i8imm:$imm), 0>;
3744 def : InstAlias<"sbb{q}\t{$imm, %rax|rax, $imm}", (SBB64ri8 RAX, i64i8imm:$imm), 0>;
3745 def : InstAlias<"sub{q}\t{$imm, %rax|rax, $imm}", (SUB64ri8 RAX, i64i8imm:$imm), 0>;
3746 def : InstAlias<"xor{q}\t{$imm, %rax|rax, $imm}", (XOR64ri8 RAX, i64i8imm:$imm), 0>;