1 //===-- X86InstrInfo.td - Main X86 Instruction Definition --*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the X86 instruction set, defining the instructions, and
10 // properties of the instructions which are needed for code generation, machine
11 // code emission, and analysis.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // X86 specific DAG Nodes.
19 def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
21 def SDTX86FCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisFP<1>,
24 def SDTX86Cmov : SDTypeProfile<1, 4,
25 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
26 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
28 // Unary and binary operator instructions that set EFLAGS as a side-effect.
29 def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
31 SDTCisInt<0>, SDTCisVT<1, i32>]>;
33 def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
36 SDTCisInt<0>, SDTCisVT<1, i32>]>;
38 // SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
39 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
45 // RES1, RES2, FLAGS = op LHS, RHS
46 def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
50 SDTCisInt<0>, SDTCisVT<1, i32>]>;
51 def SDTX86BrCond : SDTypeProfile<0, 3,
52 [SDTCisVT<0, OtherVT>,
53 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
55 def SDTX86SetCC : SDTypeProfile<1, 2,
57 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
58 def SDTX86SetCC_C : SDTypeProfile<1, 2,
60 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
62 def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>;
64 def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
66 def SDTX86rdpkru : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
67 def SDTX86wrpkru : SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
70 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
72 def SDTX86cas8pair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
73 def SDTX86cas16pair : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i64>]>;
75 def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
79 def SDTLockUnaryArithWithFlags : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
82 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
84 def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
86 def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
89 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
91 def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
93 def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
96 def SDT_X86VAARG : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
102 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
104 def SDTX86Void : SDTypeProfile<0, 0, []>;
106 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
108 def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
110 def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
112 def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
114 def SDT_X86DYN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
116 def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
118 def SDT_X86PROBED_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
120 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
122 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
124 def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
126 def SDT_X86ENQCMD : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
127 SDTCisPtrTy<1>, SDTCisSameAs<1, 2>]>;
129 def SDT_X86AESENCDECKL : SDTypeProfile<2, 2, [SDTCisVT<0, v2i64>,
134 def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
135 [SDNPHasChain,SDNPSideEffect]>;
136 def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
140 def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
141 def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
142 def X86fshl : SDNode<"X86ISD::FSHL", SDTIntShiftDOp>;
143 def X86fshr : SDNode<"X86ISD::FSHR", SDTIntShiftDOp>;
145 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
146 def X86fcmp : SDNode<"X86ISD::FCMP", SDTX86FCmp>;
147 def X86strict_fcmp : SDNode<"X86ISD::STRICT_FCMP", SDTX86FCmp, [SDNPHasChain]>;
148 def X86strict_fcmps : SDNode<"X86ISD::STRICT_FCMPS", SDTX86FCmp, [SDNPHasChain]>;
149 def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
151 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
152 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
154 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
155 def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
157 def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand,
158 [SDNPHasChain, SDNPSideEffect]>;
160 def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand,
161 [SDNPHasChain, SDNPSideEffect]>;
163 def X86rdpkru : SDNode<"X86ISD::RDPKRU", SDTX86rdpkru,
164 [SDNPHasChain, SDNPSideEffect]>;
165 def X86wrpkru : SDNode<"X86ISD::WRPKRU", SDTX86wrpkru,
166 [SDNPHasChain, SDNPSideEffect]>;
168 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
169 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
170 SDNPMayLoad, SDNPMemOperand]>;
171 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86cas8pair,
172 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
173 SDNPMayLoad, SDNPMemOperand]>;
174 def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86cas16pair,
175 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
176 SDNPMayLoad, SDNPMemOperand]>;
178 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
179 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
180 def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret,
181 [SDNPHasChain, SDNPOptInGlue]>;
183 def X86vastart_save_xmm_regs :
184 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
185 SDT_X86VASTART_SAVE_XMM_REGS,
186 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPVariadic]>;
188 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG,
189 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
192 SDNode<"X86ISD::VAARG_X32", SDT_X86VAARG,
193 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
195 def X86callseq_start :
196 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
197 [SDNPHasChain, SDNPOutGlue]>;
199 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
200 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
202 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
203 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
206 def X86call_rvmarker : SDNode<"X86ISD::CALL_RVMARKER", SDT_X86Call,
207 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
211 def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call,
212 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
214 def X86NoTrackBrind : SDNode<"X86ISD::NT_BRIND", SDT_X86NtBrind,
217 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
218 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>;
219 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
220 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
223 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
224 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
226 def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER",
227 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
230 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
231 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
233 def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
234 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
236 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
239 def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
240 SDTypeProfile<1, 1, [SDTCisInt<0>,
242 [SDNPHasChain, SDNPSideEffect]>;
243 def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
244 SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
245 [SDNPHasChain, SDNPSideEffect]>;
246 def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
247 SDTypeProfile<0, 0, []>,
248 [SDNPHasChain, SDNPSideEffect]>;
250 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
251 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
253 def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
255 def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
256 def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
258 def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
260 def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>;
261 def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>;
263 def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
265 def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
267 def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
270 def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
271 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
273 def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
274 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
276 def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
277 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
279 def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
280 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
282 def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
283 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
286 def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
287 def X86bextri : SDNode<"X86ISD::BEXTRI", SDTIntBinOp>;
289 def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntBinOp>;
291 def X86pdep : SDNode<"X86ISD::PDEP", SDTIntBinOp>;
292 def X86pext : SDNode<"X86ISD::PEXT", SDTIntBinOp>;
294 def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
296 def X86DynAlloca : SDNode<"X86ISD::DYN_ALLOCA", SDT_X86DYN_ALLOCA,
297 [SDNPHasChain, SDNPOutGlue]>;
299 def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
302 def X86ProbedAlloca : SDNode<"X86ISD::PROBED_ALLOCA", SDT_X86PROBED_ALLOCA,
305 def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
306 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
308 def X86lwpins : SDNode<"X86ISD::LWPINS",
309 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
310 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
311 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>;
313 def X86umwait : SDNode<"X86ISD::UMWAIT",
314 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
315 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
316 [SDNPHasChain, SDNPSideEffect]>;
318 def X86tpause : SDNode<"X86ISD::TPAUSE",
319 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
320 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
321 [SDNPHasChain, SDNPSideEffect]>;
323 def X86enqcmd : SDNode<"X86ISD::ENQCMD", SDT_X86ENQCMD,
324 [SDNPHasChain, SDNPSideEffect]>;
325 def X86enqcmds : SDNode<"X86ISD::ENQCMDS", SDT_X86ENQCMD,
326 [SDNPHasChain, SDNPSideEffect]>;
327 def X86testui : SDNode<"X86ISD::TESTUI",
328 SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>,
329 [SDNPHasChain, SDNPSideEffect]>;
331 def X86aesenc128kl : SDNode<"X86ISD::AESENC128KL", SDT_X86AESENCDECKL,
332 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
334 def X86aesdec128kl : SDNode<"X86ISD::AESDEC128KL", SDT_X86AESENCDECKL,
335 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
337 def X86aesenc256kl : SDNode<"X86ISD::AESENC256KL", SDT_X86AESENCDECKL,
338 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
340 def X86aesdec256kl : SDNode<"X86ISD::AESDEC256KL", SDT_X86AESENCDECKL,
341 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect,
344 //===----------------------------------------------------------------------===//
345 // X86 Operand Definitions.
348 // A version of ptr_rc which excludes SP, ESP, and RSP. This is used for
349 // the index operand of an address, to conform to x86 encoding restrictions.
350 def ptr_rc_nosp : PointerLikeRegClass<1>;
352 // *mem - Operand definitions for the funky X86 addressing mode operands.
354 def X86MemAsmOperand : AsmOperandClass {
357 let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
358 def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
359 def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
360 def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
361 def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; }
362 def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; }
363 def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; }
364 def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
365 def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
366 // Gather mem operands
367 def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
368 def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
369 def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
370 def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
371 def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
373 def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
374 def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
375 def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
376 def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
377 def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
378 def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
379 def X86Mem256_RC512Operand : AsmOperandClass { let Name = "Mem256_RC512"; }
380 def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
382 def X86SibMemOperand : AsmOperandClass { let Name = "SibMem"; }
385 def X86AbsMemAsmOperand : AsmOperandClass {
387 let SuperClasses = [X86MemAsmOperand];
390 class X86MemOperand<string printMethod,
391 AsmOperandClass parserMatchClass = X86MemAsmOperand,
392 int size = 0> : Operand<iPTR> {
393 let PrintMethod = printMethod;
394 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
395 let ParserMatchClass = parserMatchClass;
396 let OperandType = "OPERAND_MEMORY";
400 // Gather mem operands
401 class X86VMemOperand<RegisterClass RC, string printMethod,
402 AsmOperandClass parserMatchClass, int size = 0>
403 : X86MemOperand<printMethod, parserMatchClass, size> {
404 let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
407 def anymem : X86MemOperand<"printMemReference">;
408 def X86any_fcmp : PatFrags<(ops node:$lhs, node:$rhs),
409 [(X86strict_fcmp node:$lhs, node:$rhs),
410 (X86fcmp node:$lhs, node:$rhs)]>;
412 // FIXME: Right now we allow any size during parsing, but we might want to
413 // restrict to only unsized memory.
414 def opaquemem : X86MemOperand<"printMemReference">;
416 def sibmem: X86MemOperand<"printMemReference", X86SibMemOperand>;
418 def i8mem : X86MemOperand<"printbytemem", X86Mem8AsmOperand, 8>;
419 def i16mem : X86MemOperand<"printwordmem", X86Mem16AsmOperand, 16>;
420 def i32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand, 32>;
421 def i64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand, 64>;
422 def i128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand, 128>;
423 def i256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand, 256>;
424 def i512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand, 512>;
425 def f16mem : X86MemOperand<"printwordmem", X86Mem16AsmOperand, 16>;
426 def f32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand, 32>;
427 def f64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand, 64>;
428 def f80mem : X86MemOperand<"printtbytemem", X86Mem80AsmOperand, 80>;
429 def f128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand, 128>;
430 def f256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand, 256>;
431 def f512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand, 512>;
433 // Gather mem operands
434 def vx64mem : X86VMemOperand<VR128, "printqwordmem", X86Mem64_RC128Operand, 64>;
435 def vx128mem : X86VMemOperand<VR128, "printxmmwordmem", X86Mem128_RC128Operand, 128>;
436 def vx256mem : X86VMemOperand<VR128, "printymmwordmem", X86Mem256_RC128Operand, 256>;
437 def vy128mem : X86VMemOperand<VR256, "printxmmwordmem", X86Mem128_RC256Operand, 128>;
438 def vy256mem : X86VMemOperand<VR256, "printymmwordmem", X86Mem256_RC256Operand, 256>;
440 def vx64xmem : X86VMemOperand<VR128X, "printqwordmem", X86Mem64_RC128XOperand, 64>;
441 def vx128xmem : X86VMemOperand<VR128X, "printxmmwordmem", X86Mem128_RC128XOperand, 128>;
442 def vx256xmem : X86VMemOperand<VR128X, "printymmwordmem", X86Mem256_RC128XOperand, 256>;
443 def vy128xmem : X86VMemOperand<VR256X, "printxmmwordmem", X86Mem128_RC256XOperand, 128>;
444 def vy256xmem : X86VMemOperand<VR256X, "printymmwordmem", X86Mem256_RC256XOperand, 256>;
445 def vy512xmem : X86VMemOperand<VR256X, "printzmmwordmem", X86Mem512_RC256XOperand, 512>;
446 def vz256mem : X86VMemOperand<VR512, "printymmwordmem", X86Mem256_RC512Operand, 256>;
447 def vz512mem : X86VMemOperand<VR512, "printzmmwordmem", X86Mem512_RC512Operand, 512>;
449 // A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
450 // of a plain GPR, so that it doesn't potentially require a REX prefix.
451 def ptr_rc_norex : PointerLikeRegClass<2>;
452 def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
454 def i8mem_NOREX : X86MemOperand<"printbytemem", X86Mem8AsmOperand, 8> {
455 let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
459 // GPRs available for tailcall.
460 // It represents GR32_TC, GR64_TC or GR64_TCW64.
461 def ptr_rc_tailcall : PointerLikeRegClass<4>;
463 // Special i32mem for addresses of load folding tail calls. These are not
464 // allowed to use callee-saved registers since they must be scheduled
465 // after callee-saved register are popped.
466 def i32mem_TC : Operand<i32> {
467 let PrintMethod = "printdwordmem";
468 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
469 i32imm, SEGMENT_REG);
470 let ParserMatchClass = X86Mem32AsmOperand;
471 let OperandType = "OPERAND_MEMORY";
474 // Special i64mem for addresses of load folding tail calls. These are not
475 // allowed to use callee-saved registers since they must be scheduled
476 // after callee-saved register are popped.
477 def i64mem_TC : Operand<i64> {
478 let PrintMethod = "printqwordmem";
479 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
480 ptr_rc_tailcall, i32imm, SEGMENT_REG);
481 let ParserMatchClass = X86Mem64AsmOperand;
482 let OperandType = "OPERAND_MEMORY";
485 // Special parser to detect 16-bit mode to select 16-bit displacement.
486 def X86AbsMem16AsmOperand : AsmOperandClass {
487 let Name = "AbsMem16";
488 let RenderMethod = "addAbsMemOperands";
489 let SuperClasses = [X86AbsMemAsmOperand];
492 // Branch targets print as pc-relative values.
493 class BranchTargetOperand<ValueType ty> : Operand<ty> {
494 let OperandType = "OPERAND_PCREL";
495 let PrintMethod = "printPCRelImm";
496 let ParserMatchClass = X86AbsMemAsmOperand;
499 def i32imm_brtarget : BranchTargetOperand<i32>;
500 def i16imm_brtarget : BranchTargetOperand<i16>;
502 // 64-bits but only 32 bits are significant, and those bits are treated as being
504 def i64i32imm_brtarget : BranchTargetOperand<i64>;
506 def brtarget : BranchTargetOperand<OtherVT>;
507 def brtarget8 : BranchTargetOperand<OtherVT>;
508 def brtarget16 : BranchTargetOperand<OtherVT> {
509 let ParserMatchClass = X86AbsMem16AsmOperand;
511 def brtarget32 : BranchTargetOperand<OtherVT>;
513 let RenderMethod = "addSrcIdxOperands" in {
514 def X86SrcIdx8Operand : AsmOperandClass {
515 let Name = "SrcIdx8";
516 let SuperClasses = [X86Mem8AsmOperand];
518 def X86SrcIdx16Operand : AsmOperandClass {
519 let Name = "SrcIdx16";
520 let SuperClasses = [X86Mem16AsmOperand];
522 def X86SrcIdx32Operand : AsmOperandClass {
523 let Name = "SrcIdx32";
524 let SuperClasses = [X86Mem32AsmOperand];
526 def X86SrcIdx64Operand : AsmOperandClass {
527 let Name = "SrcIdx64";
528 let SuperClasses = [X86Mem64AsmOperand];
530 } // RenderMethod = "addSrcIdxOperands"
532 let RenderMethod = "addDstIdxOperands" in {
533 def X86DstIdx8Operand : AsmOperandClass {
534 let Name = "DstIdx8";
535 let SuperClasses = [X86Mem8AsmOperand];
537 def X86DstIdx16Operand : AsmOperandClass {
538 let Name = "DstIdx16";
539 let SuperClasses = [X86Mem16AsmOperand];
541 def X86DstIdx32Operand : AsmOperandClass {
542 let Name = "DstIdx32";
543 let SuperClasses = [X86Mem32AsmOperand];
545 def X86DstIdx64Operand : AsmOperandClass {
546 let Name = "DstIdx64";
547 let SuperClasses = [X86Mem64AsmOperand];
549 } // RenderMethod = "addDstIdxOperands"
551 let RenderMethod = "addMemOffsOperands" in {
552 def X86MemOffs16_8AsmOperand : AsmOperandClass {
553 let Name = "MemOffs16_8";
554 let SuperClasses = [X86Mem8AsmOperand];
556 def X86MemOffs16_16AsmOperand : AsmOperandClass {
557 let Name = "MemOffs16_16";
558 let SuperClasses = [X86Mem16AsmOperand];
560 def X86MemOffs16_32AsmOperand : AsmOperandClass {
561 let Name = "MemOffs16_32";
562 let SuperClasses = [X86Mem32AsmOperand];
564 def X86MemOffs32_8AsmOperand : AsmOperandClass {
565 let Name = "MemOffs32_8";
566 let SuperClasses = [X86Mem8AsmOperand];
568 def X86MemOffs32_16AsmOperand : AsmOperandClass {
569 let Name = "MemOffs32_16";
570 let SuperClasses = [X86Mem16AsmOperand];
572 def X86MemOffs32_32AsmOperand : AsmOperandClass {
573 let Name = "MemOffs32_32";
574 let SuperClasses = [X86Mem32AsmOperand];
576 def X86MemOffs32_64AsmOperand : AsmOperandClass {
577 let Name = "MemOffs32_64";
578 let SuperClasses = [X86Mem64AsmOperand];
580 def X86MemOffs64_8AsmOperand : AsmOperandClass {
581 let Name = "MemOffs64_8";
582 let SuperClasses = [X86Mem8AsmOperand];
584 def X86MemOffs64_16AsmOperand : AsmOperandClass {
585 let Name = "MemOffs64_16";
586 let SuperClasses = [X86Mem16AsmOperand];
588 def X86MemOffs64_32AsmOperand : AsmOperandClass {
589 let Name = "MemOffs64_32";
590 let SuperClasses = [X86Mem32AsmOperand];
592 def X86MemOffs64_64AsmOperand : AsmOperandClass {
593 let Name = "MemOffs64_64";
594 let SuperClasses = [X86Mem64AsmOperand];
596 } // RenderMethod = "addMemOffsOperands"
598 class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
599 : X86MemOperand<printMethod, parserMatchClass> {
600 let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
603 class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
604 : X86MemOperand<printMethod, parserMatchClass> {
605 let MIOperandInfo = (ops ptr_rc);
608 def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>;
609 def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>;
610 def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>;
611 def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>;
612 def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>;
613 def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>;
614 def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>;
615 def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
617 class X86MemOffsOperand<Operand immOperand, string printMethod,
618 AsmOperandClass parserMatchClass>
619 : X86MemOperand<printMethod, parserMatchClass> {
620 let MIOperandInfo = (ops immOperand, SEGMENT_REG);
623 def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
624 X86MemOffs16_8AsmOperand>;
625 def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16",
626 X86MemOffs16_16AsmOperand>;
627 def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32",
628 X86MemOffs16_32AsmOperand>;
629 def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8",
630 X86MemOffs32_8AsmOperand>;
631 def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16",
632 X86MemOffs32_16AsmOperand>;
633 def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32",
634 X86MemOffs32_32AsmOperand>;
635 def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64",
636 X86MemOffs32_64AsmOperand>;
637 def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8",
638 X86MemOffs64_8AsmOperand>;
639 def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16",
640 X86MemOffs64_16AsmOperand>;
641 def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32",
642 X86MemOffs64_32AsmOperand>;
643 def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64",
644 X86MemOffs64_64AsmOperand>;
646 def ccode : Operand<i8> {
647 let PrintMethod = "printCondCode";
648 let OperandNamespace = "X86";
649 let OperandType = "OPERAND_COND_CODE";
652 class ImmSExtAsmOperandClass : AsmOperandClass {
653 let SuperClasses = [ImmAsmOperand];
654 let RenderMethod = "addImmOperands";
657 def X86GR32orGR64AsmOperand : AsmOperandClass {
658 let Name = "GR32orGR64";
660 def GR32orGR64 : RegisterOperand<GR32> {
661 let ParserMatchClass = X86GR32orGR64AsmOperand;
664 def X86GR16orGR32orGR64AsmOperand : AsmOperandClass {
665 let Name = "GR16orGR32orGR64";
667 def GR16orGR32orGR64 : RegisterOperand<GR16> {
668 let ParserMatchClass = X86GR16orGR32orGR64AsmOperand;
671 def AVX512RCOperand : AsmOperandClass {
672 let Name = "AVX512RC";
674 def AVX512RC : Operand<i32> {
675 let PrintMethod = "printRoundingControl";
676 let OperandNamespace = "X86";
677 let OperandType = "OPERAND_ROUNDING_CONTROL";
678 let ParserMatchClass = AVX512RCOperand;
681 // Sign-extended immediate classes. We don't need to define the full lattice
682 // here because there is no instruction with an ambiguity between ImmSExti64i32
685 // The strange ranges come from the fact that the assembler always works with
686 // 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
687 // (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
690 // [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
691 def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
692 let Name = "ImmSExti64i32";
695 // [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
696 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
697 def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
698 let Name = "ImmSExti16i8";
699 let SuperClasses = [ImmSExti64i32AsmOperand];
702 // [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
703 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
704 def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
705 let Name = "ImmSExti32i8";
709 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
710 def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
711 let Name = "ImmSExti64i8";
712 let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
713 ImmSExti64i32AsmOperand];
716 // 4-bit immediate used by some XOP instructions
718 def ImmUnsignedi4AsmOperand : AsmOperandClass {
719 let Name = "ImmUnsignedi4";
720 let RenderMethod = "addImmOperands";
721 let DiagnosticType = "InvalidImmUnsignedi4";
724 // Unsigned immediate used by SSE/AVX instructions
726 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
727 def ImmUnsignedi8AsmOperand : AsmOperandClass {
728 let Name = "ImmUnsignedi8";
729 let RenderMethod = "addImmOperands";
732 // A couple of more descriptive operand definitions.
733 // 16-bits but only 8 bits are significant.
734 def i16i8imm : Operand<i16> {
735 let ParserMatchClass = ImmSExti16i8AsmOperand;
736 let OperandType = "OPERAND_IMMEDIATE";
738 // 32-bits but only 8 bits are significant.
739 def i32i8imm : Operand<i32> {
740 let ParserMatchClass = ImmSExti32i8AsmOperand;
741 let OperandType = "OPERAND_IMMEDIATE";
744 // 64-bits but only 32 bits are significant.
745 def i64i32imm : Operand<i64> {
746 let ParserMatchClass = ImmSExti64i32AsmOperand;
747 let OperandType = "OPERAND_IMMEDIATE";
750 // 64-bits but only 8 bits are significant.
751 def i64i8imm : Operand<i64> {
752 let ParserMatchClass = ImmSExti64i8AsmOperand;
753 let OperandType = "OPERAND_IMMEDIATE";
756 // Unsigned 4-bit immediate used by some XOP instructions.
757 def u4imm : Operand<i8> {
758 let PrintMethod = "printU8Imm";
759 let ParserMatchClass = ImmUnsignedi4AsmOperand;
760 let OperandType = "OPERAND_IMMEDIATE";
763 // Unsigned 8-bit immediate used by SSE/AVX instructions.
764 def u8imm : Operand<i8> {
765 let PrintMethod = "printU8Imm";
766 let ParserMatchClass = ImmUnsignedi8AsmOperand;
767 let OperandType = "OPERAND_IMMEDIATE";
770 // 16-bit immediate but only 8-bits are significant and they are unsigned.
771 // Used by BT instructions.
772 def i16u8imm : Operand<i16> {
773 let PrintMethod = "printU8Imm";
774 let ParserMatchClass = ImmUnsignedi8AsmOperand;
775 let OperandType = "OPERAND_IMMEDIATE";
778 // 32-bit immediate but only 8-bits are significant and they are unsigned.
779 // Used by some SSE/AVX instructions that use intrinsics.
780 def i32u8imm : Operand<i32> {
781 let PrintMethod = "printU8Imm";
782 let ParserMatchClass = ImmUnsignedi8AsmOperand;
783 let OperandType = "OPERAND_IMMEDIATE";
786 // 64-bit immediate but only 8-bits are significant and they are unsigned.
787 // Used by BT instructions.
788 def i64u8imm : Operand<i64> {
789 let PrintMethod = "printU8Imm";
790 let ParserMatchClass = ImmUnsignedi8AsmOperand;
791 let OperandType = "OPERAND_IMMEDIATE";
794 def lea64_32mem : Operand<i32> {
795 let PrintMethod = "printMemReference";
796 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
797 let ParserMatchClass = X86MemAsmOperand;
800 // Memory operands that use 64-bit pointers in both ILP32 and LP64.
801 def lea64mem : Operand<i64> {
802 let PrintMethod = "printMemReference";
803 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
804 let ParserMatchClass = X86MemAsmOperand;
807 let RenderMethod = "addMaskPairOperands" in {
808 def VK1PairAsmOperand : AsmOperandClass { let Name = "VK1Pair"; }
809 def VK2PairAsmOperand : AsmOperandClass { let Name = "VK2Pair"; }
810 def VK4PairAsmOperand : AsmOperandClass { let Name = "VK4Pair"; }
811 def VK8PairAsmOperand : AsmOperandClass { let Name = "VK8Pair"; }
812 def VK16PairAsmOperand : AsmOperandClass { let Name = "VK16Pair"; }
815 def VK1Pair : RegisterOperand<VK1PAIR, "printVKPair"> {
816 let ParserMatchClass = VK1PairAsmOperand;
819 def VK2Pair : RegisterOperand<VK2PAIR, "printVKPair"> {
820 let ParserMatchClass = VK2PairAsmOperand;
823 def VK4Pair : RegisterOperand<VK4PAIR, "printVKPair"> {
824 let ParserMatchClass = VK4PairAsmOperand;
827 def VK8Pair : RegisterOperand<VK8PAIR, "printVKPair"> {
828 let ParserMatchClass = VK8PairAsmOperand;
831 def VK16Pair : RegisterOperand<VK16PAIR, "printVKPair"> {
832 let ParserMatchClass = VK16PairAsmOperand;
835 //===----------------------------------------------------------------------===//
836 // X86 Complex Pattern Definitions.
839 // Define X86-specific addressing mode.
840 def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>;
841 def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr",
842 [add, sub, mul, X86mul_imm, shl, or, xor, frameindex],
844 // In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
845 def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr",
846 [add, sub, mul, X86mul_imm, shl, or, xor,
847 frameindex, X86WrapperRIP],
850 def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
851 [tglobaltlsaddr], []>;
853 def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
854 [tglobaltlsaddr], []>;
856 def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr",
857 [add, sub, mul, X86mul_imm, shl, or, xor, frameindex,
860 def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
861 [tglobaltlsaddr], []>;
863 def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
864 [tglobaltlsaddr], []>;
866 def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>;
868 // A relocatable immediate is an operand that can be relocated by the linker to
869 // an immediate, such as a regular symbol in non-PIC code.
870 def relocImm : ComplexPattern<iAny, 1, "selectRelocImm",
871 [X86Wrapper], [], 0>;
873 //===----------------------------------------------------------------------===//
874 // X86 Instruction Predicate Definitions.
875 def TruePredicate : Predicate<"true">;
877 def HasCMOV : Predicate<"Subtarget->canUseCMOV()">;
878 def NoCMOV : Predicate<"!Subtarget->canUseCMOV()">;
880 def HasMMX : Predicate<"Subtarget->hasMMX()">;
881 def Has3DNow : Predicate<"Subtarget->hasThreeDNow()">;
882 def Has3DNowA : Predicate<"Subtarget->hasThreeDNowA()">;
883 def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
884 def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
885 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
886 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
887 def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
888 def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
889 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
890 def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
891 def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
892 def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">;
893 def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
894 def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
895 def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
896 def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
897 def NoAVX : Predicate<"!Subtarget->hasAVX()">;
898 def HasAVX : Predicate<"Subtarget->hasAVX()">;
899 def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
900 def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
901 def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
902 def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
903 def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
904 def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
905 def HasCDI : Predicate<"Subtarget->hasCDI()">;
906 def HasVPOPCNTDQ : Predicate<"Subtarget->hasVPOPCNTDQ()">;
907 def HasPFI : Predicate<"Subtarget->hasPFI()">;
908 def HasERI : Predicate<"Subtarget->hasERI()">;
909 def HasDQI : Predicate<"Subtarget->hasDQI()">;
910 def NoDQI : Predicate<"!Subtarget->hasDQI()">;
911 def HasBWI : Predicate<"Subtarget->hasBWI()">;
912 def NoBWI : Predicate<"!Subtarget->hasBWI()">;
913 def HasVLX : Predicate<"Subtarget->hasVLX()">;
914 def NoVLX : Predicate<"!Subtarget->hasVLX()">;
915 def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
916 def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
917 def PKU : Predicate<"Subtarget->hasPKU()">;
918 def HasVNNI : Predicate<"Subtarget->hasVNNI()">;
919 def HasVP2INTERSECT : Predicate<"Subtarget->hasVP2INTERSECT()">;
920 def HasBF16 : Predicate<"Subtarget->hasBF16()">;
921 def HasFP16 : Predicate<"Subtarget->hasFP16()">;
922 def HasAVXVNNI : Predicate <"Subtarget->hasAVXVNNI()">;
923 def NoVLX_Or_NoVNNI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVNNI()">;
925 def HasBITALG : Predicate<"Subtarget->hasBITALG()">;
926 def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
927 def HasAES : Predicate<"Subtarget->hasAES()">;
928 def HasVAES : Predicate<"Subtarget->hasVAES()">;
929 def NoVLX_Or_NoVAES : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVAES()">;
930 def HasFXSR : Predicate<"Subtarget->hasFXSR()">;
931 def HasXSAVE : Predicate<"Subtarget->hasXSAVE()">;
932 def HasXSAVEOPT : Predicate<"Subtarget->hasXSAVEOPT()">;
933 def HasXSAVEC : Predicate<"Subtarget->hasXSAVEC()">;
934 def HasXSAVES : Predicate<"Subtarget->hasXSAVES()">;
935 def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
936 def NoVLX_Or_NoVPCLMULQDQ :
937 Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">;
938 def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">;
939 def HasGFNI : Predicate<"Subtarget->hasGFNI()">;
940 def HasFMA : Predicate<"Subtarget->hasFMA()">;
941 def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
942 def NoFMA4 : Predicate<"!Subtarget->hasFMA4()">;
943 def HasXOP : Predicate<"Subtarget->hasXOP()">;
944 def HasTBM : Predicate<"Subtarget->hasTBM()">;
945 def NoTBM : Predicate<"!Subtarget->hasTBM()">;
946 def HasLWP : Predicate<"Subtarget->hasLWP()">;
947 def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
948 def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
949 def HasF16C : Predicate<"Subtarget->hasF16C()">;
950 def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
951 def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
952 def HasBMI : Predicate<"Subtarget->hasBMI()">;
953 def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
954 def NoBMI2 : Predicate<"!Subtarget->hasBMI2()">;
955 def HasVBMI : Predicate<"Subtarget->hasVBMI()">;
956 def HasVBMI2 : Predicate<"Subtarget->hasVBMI2()">;
957 def HasIFMA : Predicate<"Subtarget->hasIFMA()">;
958 def HasRTM : Predicate<"Subtarget->hasRTM()">;
959 def HasADX : Predicate<"Subtarget->hasADX()">;
960 def HasSHA : Predicate<"Subtarget->hasSHA()">;
961 def HasSGX : Predicate<"Subtarget->hasSGX()">;
962 def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
963 def HasSSEPrefetch : Predicate<"Subtarget->hasSSEPrefetch()">;
964 def NoSSEPrefetch : Predicate<"!Subtarget->hasSSEPrefetch()">;
965 def HasPrefetchW : Predicate<"Subtarget->hasPrefetchW()">;
966 def HasPREFETCHWT1 : Predicate<"Subtarget->hasPREFETCHWT1()">;
967 def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
968 def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
969 def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">;
970 def HasCLDEMOTE : Predicate<"Subtarget->hasCLDEMOTE()">;
971 def HasMOVDIRI : Predicate<"Subtarget->hasMOVDIRI()">;
972 def HasMOVDIR64B : Predicate<"Subtarget->hasMOVDIR64B()">;
973 def HasPTWRITE : Predicate<"Subtarget->hasPTWRITE()">;
974 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
975 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
976 def HasSHSTK : Predicate<"Subtarget->hasSHSTK()">;
977 def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">;
978 def HasCLWB : Predicate<"Subtarget->hasCLWB()">;
979 def HasWBNOINVD : Predicate<"Subtarget->hasWBNOINVD()">;
980 def HasRDPID : Predicate<"Subtarget->hasRDPID()">;
981 def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">;
982 def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">;
983 def HasCX8 : Predicate<"Subtarget->hasCX8()">;
984 def HasCX16 : Predicate<"Subtarget->hasCX16()">;
985 def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">;
986 def HasENQCMD : Predicate<"Subtarget->hasENQCMD()">;
987 def HasKL : Predicate<"Subtarget->hasKL()">;
988 def HasWIDEKL : Predicate<"Subtarget->hasWIDEKL()">;
989 def HasHRESET : Predicate<"Subtarget->hasHRESET()">;
990 def HasSERIALIZE : Predicate<"Subtarget->hasSERIALIZE()">;
991 def HasTSXLDTRK : Predicate<"Subtarget->hasTSXLDTRK()">;
992 def HasAMXTILE : Predicate<"Subtarget->hasAMXTILE()">;
993 def HasAMXBF16 : Predicate<"Subtarget->hasAMXBF16()">;
994 def HasAMXINT8 : Predicate<"Subtarget->hasAMXINT8()">;
995 def HasUINTR : Predicate<"Subtarget->hasUINTR()">;
996 def HasCRC32 : Predicate<"Subtarget->hasCRC32()">;
997 def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
998 AssemblerPredicate<(all_of (not Is64Bit)), "Not 64-bit mode">;
999 def In64BitMode : Predicate<"Subtarget->is64Bit()">,
1000 AssemblerPredicate<(all_of Is64Bit), "64-bit mode">;
1001 def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">;
1002 def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">;
1003 def In16BitMode : Predicate<"Subtarget->is16Bit()">,
1004 AssemblerPredicate<(all_of Is16Bit), "16-bit mode">;
1005 def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
1006 AssemblerPredicate<(all_of (not Is16Bit)), "Not 16-bit mode">;
1007 def In32BitMode : Predicate<"Subtarget->is32Bit()">,
1008 AssemblerPredicate<(all_of Is32Bit), "32-bit mode">;
1009 def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
1010 def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
1011 def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
1012 "Subtarget->getFrameLowering()->hasFP(*MF)"> {
1013 let RecomputePerFunction = 1;
1015 def IsPS : Predicate<"Subtarget->isTargetPS()">;
1016 def NotPS : Predicate<"!Subtarget->isTargetPS()">;
1017 def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
1018 def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
1019 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
1020 def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
1021 def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
1022 "TM.getCodeModel() == CodeModel::Kernel">;
1023 def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
1025 // We could compute these on a per-module basis but doing so requires accessing
1026 // the Function object through the <Target>Subtarget and objections were raised
1027 // to that (see post-commit review comments for r301750).
1028 let RecomputePerFunction = 1 in {
1029 def OptForSize : Predicate<"shouldOptForSize(MF)">;
1030 def OptForMinSize : Predicate<"MF->getFunction().hasMinSize()">;
1031 def OptForSpeed : Predicate<"!shouldOptForSize(MF)">;
1032 def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
1033 "shouldOptForSize(MF)">;
1034 def NoSSE41_Or_OptForSize : Predicate<"shouldOptForSize(MF) || "
1035 "!Subtarget->hasSSE41()">;
1038 def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
1039 def FavorMemIndirectCall : Predicate<"!Subtarget->slowTwoMemOps()">;
1040 def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
1041 def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
1042 def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
1043 def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
1044 def HasFSRM : Predicate<"Subtarget->hasFSRM()">;
1045 def HasMFence : Predicate<"Subtarget->hasMFence()">;
1046 def UseIndirectThunkCalls : Predicate<"Subtarget->useIndirectThunkCalls()">;
1047 def NotUseIndirectThunkCalls : Predicate<"!Subtarget->useIndirectThunkCalls()">;
1049 //===----------------------------------------------------------------------===//
1050 // X86 Instruction Format Definitions.
1053 include "X86InstrFormats.td"
1055 //===----------------------------------------------------------------------===//
1056 // Pattern fragments.
1059 // X86 specific condition code. These correspond to CondCode in
1060 // X86InstrInfo.h. They must be kept in synch.
1061 def X86_COND_O : PatLeaf<(i8 0)>;
1062 def X86_COND_NO : PatLeaf<(i8 1)>;
1063 def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
1064 def X86_COND_AE : PatLeaf<(i8 3)>; // alt. COND_NC
1065 def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
1066 def X86_COND_NE : PatLeaf<(i8 5)>; // alt. COND_NZ
1067 def X86_COND_BE : PatLeaf<(i8 6)>; // alt. COND_NA
1068 def X86_COND_A : PatLeaf<(i8 7)>; // alt. COND_NBE
1069 def X86_COND_S : PatLeaf<(i8 8)>;
1070 def X86_COND_NS : PatLeaf<(i8 9)>;
1071 def X86_COND_P : PatLeaf<(i8 10)>; // alt. COND_PE
1072 def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
1073 def X86_COND_L : PatLeaf<(i8 12)>; // alt. COND_NGE
1074 def X86_COND_GE : PatLeaf<(i8 13)>; // alt. COND_NL
1075 def X86_COND_LE : PatLeaf<(i8 14)>; // alt. COND_NG
1076 def X86_COND_G : PatLeaf<(i8 15)>; // alt. COND_NLE
1078 def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
1079 def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
1080 def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
1081 def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
1082 def i64timmSExt32 : TImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
1084 def i16relocImmSExt8 : PatLeaf<(i16 relocImm), [{
1085 return isSExtAbsoluteSymbolRef(8, N);
1087 def i32relocImmSExt8 : PatLeaf<(i32 relocImm), [{
1088 return isSExtAbsoluteSymbolRef(8, N);
1090 def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
1091 return isSExtAbsoluteSymbolRef(8, N);
1093 def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
1094 return isSExtAbsoluteSymbolRef(32, N);
1097 // If we have multiple users of an immediate, it's much smaller to reuse
1098 // the register, rather than encode the immediate in every instruction.
1099 // This has the risk of increasing register pressure from stretched live
1100 // ranges, however, the immediates should be trivial to rematerialize by
1101 // the RA in the event of high register pressure.
1102 // TODO : This is currently enabled for stores and binary ops. There are more
1103 // cases for which this can be enabled, though this catches the bulk of the
1105 // TODO2 : This should really also be enabled under O2, but there's currently
1106 // an issue with RA where we don't pull the constants into their users
1107 // when we rematerialize them. I'll follow-up on enabling O2 after we fix that
1109 // TODO3 : This is currently limited to single basic blocks (DAG creation
1110 // pulls block immediates to the top and merges them if necessary).
1111 // Eventually, it would be nice to allow ConstantHoisting to merge constants
1112 // globally for potentially added savings.
1114 def imm_su : PatLeaf<(imm), [{
1115 return !shouldAvoidImmediateInstFormsForSize(N);
1117 def i64immSExt32_su : PatLeaf<(i64immSExt32), [{
1118 return !shouldAvoidImmediateInstFormsForSize(N);
1121 def relocImm8_su : PatLeaf<(i8 relocImm), [{
1122 return !shouldAvoidImmediateInstFormsForSize(N);
1124 def relocImm16_su : PatLeaf<(i16 relocImm), [{
1125 return !shouldAvoidImmediateInstFormsForSize(N);
1127 def relocImm32_su : PatLeaf<(i32 relocImm), [{
1128 return !shouldAvoidImmediateInstFormsForSize(N);
1131 def i16relocImmSExt8_su : PatLeaf<(i16relocImmSExt8), [{
1132 return !shouldAvoidImmediateInstFormsForSize(N);
1134 def i32relocImmSExt8_su : PatLeaf<(i32relocImmSExt8), [{
1135 return !shouldAvoidImmediateInstFormsForSize(N);
1137 def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
1138 return !shouldAvoidImmediateInstFormsForSize(N);
1140 def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
1141 return !shouldAvoidImmediateInstFormsForSize(N);
1144 def i16immSExt8_su : PatLeaf<(i16immSExt8), [{
1145 return !shouldAvoidImmediateInstFormsForSize(N);
1147 def i32immSExt8_su : PatLeaf<(i32immSExt8), [{
1148 return !shouldAvoidImmediateInstFormsForSize(N);
1150 def i64immSExt8_su : PatLeaf<(i64immSExt8), [{
1151 return !shouldAvoidImmediateInstFormsForSize(N);
1154 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
1156 def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
1158 def i64immZExt32SExt8 : ImmLeaf<i64, [{
1159 return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm));
1162 // Helper fragments for loads.
1164 // It's safe to fold a zextload/extload from i1 as a regular i8 load. The
1165 // upper bits are guaranteed to be zero and we were going to emit a MOV8rm
1166 // which might get folded during peephole anyway.
1167 def loadi8 : PatFrag<(ops node:$ptr), (i8 (unindexedload node:$ptr)), [{
1168 LoadSDNode *LD = cast<LoadSDNode>(N);
1169 ISD::LoadExtType ExtType = LD->getExtensionType();
1170 return ExtType == ISD::NON_EXTLOAD || ExtType == ISD::EXTLOAD ||
1171 ExtType == ISD::ZEXTLOAD;
1174 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
1175 // known to be 32-bit aligned or better. Ditto for i8 to i16.
1176 def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
1177 LoadSDNode *LD = cast<LoadSDNode>(N);
1178 ISD::LoadExtType ExtType = LD->getExtensionType();
1179 if (ExtType == ISD::NON_EXTLOAD)
1181 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad)
1182 return LD->getAlignment() >= 2 && LD->isSimple();
1186 def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
1187 LoadSDNode *LD = cast<LoadSDNode>(N);
1188 ISD::LoadExtType ExtType = LD->getExtensionType();
1189 if (ExtType == ISD::NON_EXTLOAD)
1191 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad)
1192 return LD->getAlignment() >= 4 && LD->isSimple();
1196 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
1197 def loadf16 : PatFrag<(ops node:$ptr), (f16 (load node:$ptr))>;
1198 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
1199 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
1200 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
1201 def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>;
1202 def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1203 LoadSDNode *Ld = cast<LoadSDNode>(N);
1204 return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1206 def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1207 LoadSDNode *Ld = cast<LoadSDNode>(N);
1208 return Subtarget->hasSSEUnalignedMem() ||
1209 Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1212 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
1213 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
1214 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
1215 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
1216 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
1217 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
1219 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
1220 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
1221 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
1222 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
1223 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
1224 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
1225 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
1226 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
1227 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
1228 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
1230 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
1231 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
1232 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
1233 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
1234 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
1235 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
1236 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
1237 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
1238 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
1240 // We can treat an i8/i16 extending load to i64 as a 32 bit load if its known
1241 // to be 4 byte aligned or better.
1242 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (unindexedload node:$ptr)), [{
1243 LoadSDNode *LD = cast<LoadSDNode>(N);
1244 ISD::LoadExtType ExtType = LD->getExtensionType();
1245 if (ExtType != ISD::EXTLOAD)
1247 if (LD->getMemoryVT() == MVT::i32)
1250 return LD->getAlignment() >= 4 && LD->isSimple();
1254 // An 'and' node with a single use.
1255 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
1256 return N->hasOneUse();
1258 // An 'srl' node with a single use.
1259 def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
1260 return N->hasOneUse();
1262 // An 'trunc' node with a single use.
1263 def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
1264 return N->hasOneUse();
1267 //===----------------------------------------------------------------------===//
1268 // Instruction list.
1272 let hasSideEffects = 0, SchedRW = [WriteNop] in {
1273 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
1274 def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
1275 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1276 def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
1277 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1278 def NOOPQ : RI<0x1f, MRMXm, (outs), (ins i64mem:$zero),
1279 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1280 Requires<[In64BitMode]>;
1281 // Also allow register so we can assemble/disassemble
1282 def NOOPWr : I<0x1f, MRMXr, (outs), (ins GR16:$zero),
1283 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1284 def NOOPLr : I<0x1f, MRMXr, (outs), (ins GR32:$zero),
1285 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1286 def NOOPQr : RI<0x1f, MRMXr, (outs), (ins GR64:$zero),
1287 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1288 Requires<[In64BitMode]>;
1292 // Constructing a stack frame.
1293 def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
1294 "enter\t$len, $lvl", []>, Sched<[WriteMicrocoded]>;
1296 let SchedRW = [WriteALU] in {
1297 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
1298 def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1299 Requires<[Not64BitMode]>;
1301 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
1302 def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1303 Requires<[In64BitMode]>;
1306 //===----------------------------------------------------------------------===//
1307 // Miscellaneous Instructions.
1310 let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1,
1311 SchedRW = [WriteSystem] in
1312 def Int_eh_sjlj_setup_dispatch
1313 : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
1315 let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
1316 let mayLoad = 1, SchedRW = [WriteLoad] in {
1317 def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1319 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1320 OpSize32, Requires<[Not64BitMode]>;
1321 // Long form for the disassembler.
1322 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1323 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1324 OpSize16, NotMemoryFoldable;
1325 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1326 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1327 } // isCodeGenOnly = 1, ForceDisassemble = 1
1328 } // mayLoad, SchedRW
1329 let mayStore = 1, mayLoad = 1, SchedRW = [WriteCopy] in {
1330 def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", []>,
1332 def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", []>,
1333 OpSize32, Requires<[Not64BitMode]>;
1334 } // mayStore, mayLoad, SchedRW
1336 let mayStore = 1, SchedRW = [WriteStore] in {
1337 def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1339 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1340 OpSize32, Requires<[Not64BitMode]>;
1341 // Long form for the disassembler.
1342 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1343 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1344 OpSize16, NotMemoryFoldable;
1345 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1346 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1347 } // isCodeGenOnly = 1, ForceDisassemble = 1
1349 def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
1350 "push{w}\t$imm", []>, OpSize16;
1351 def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
1352 "push{w}\t$imm", []>, OpSize16;
1354 def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
1355 "push{l}\t$imm", []>, OpSize32,
1356 Requires<[Not64BitMode]>;
1357 def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
1358 "push{l}\t$imm", []>, OpSize32,
1359 Requires<[Not64BitMode]>;
1360 } // mayStore, SchedRW
1362 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1363 def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src", []>,
1365 def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src", []>,
1366 OpSize32, Requires<[Not64BitMode]>;
1367 } // mayLoad, mayStore, SchedRW
1371 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1372 SchedRW = [WriteRMW], Defs = [ESP] in {
1374 def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
1375 [(set GR32:$dst, (int_x86_flags_read_u32))]>,
1376 Requires<[Not64BitMode]>;
1379 def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
1380 [(set GR64:$dst, (int_x86_flags_read_u64))]>,
1381 Requires<[In64BitMode]>;
1384 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1385 SchedRW = [WriteRMW] in {
1386 let Defs = [ESP, EFLAGS, DF], Uses = [ESP] in
1387 def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
1388 [(int_x86_flags_write_u32 GR32:$src)]>,
1389 Requires<[Not64BitMode]>;
1391 let Defs = [RSP, EFLAGS, DF], Uses = [RSP] in
1392 def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
1393 [(int_x86_flags_write_u64 GR64:$src)]>,
1394 Requires<[In64BitMode]>;
1397 let Defs = [ESP, EFLAGS, DF], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
1398 SchedRW = [WriteLoad] in {
1399 def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize16;
1400 def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>, OpSize32,
1401 Requires<[Not64BitMode]>;
1404 let Defs = [ESP], Uses = [ESP, EFLAGS, DF], mayStore = 1, hasSideEffects=0,
1405 SchedRW = [WriteStore] in {
1406 def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize16;
1407 def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>, OpSize32,
1408 Requires<[Not64BitMode]>;
1411 let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
1412 let mayLoad = 1, SchedRW = [WriteLoad] in {
1413 def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1414 OpSize32, Requires<[In64BitMode]>;
1415 // Long form for the disassembler.
1416 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1417 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1418 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1419 } // isCodeGenOnly = 1, ForceDisassemble = 1
1420 } // mayLoad, SchedRW
1421 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
1422 def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", []>,
1423 OpSize32, Requires<[In64BitMode]>;
1424 let mayStore = 1, SchedRW = [WriteStore] in {
1425 def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1426 OpSize32, Requires<[In64BitMode]>;
1427 // Long form for the disassembler.
1428 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1429 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1430 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1431 } // isCodeGenOnly = 1, ForceDisassemble = 1
1432 } // mayStore, SchedRW
1433 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1434 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
1435 OpSize32, Requires<[In64BitMode]>;
1436 } // mayLoad, mayStore, SchedRW
1439 let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
1440 SchedRW = [WriteStore] in {
1441 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
1442 "push{q}\t$imm", []>, OpSize32,
1443 Requires<[In64BitMode]>;
1444 def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
1445 "push{q}\t$imm", []>, OpSize32,
1446 Requires<[In64BitMode]>;
1449 let Defs = [RSP, EFLAGS, DF], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
1450 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
1451 OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
1452 let Defs = [RSP], Uses = [RSP, EFLAGS, DF], mayStore = 1, hasSideEffects=0 in
1453 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
1454 OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
1456 let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
1457 mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
1458 def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", []>,
1459 OpSize32, Requires<[Not64BitMode]>;
1460 def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", []>,
1461 OpSize16, Requires<[Not64BitMode]>;
1463 let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
1464 mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
1465 def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", []>,
1466 OpSize32, Requires<[Not64BitMode]>;
1467 def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", []>,
1468 OpSize16, Requires<[Not64BitMode]>;
1471 let Constraints = "$src = $dst", SchedRW = [WriteBSWAP32] in {
1472 // This instruction is a consequence of BSWAP32r observing operand size. The
1473 // encoding is valid, but the behavior is undefined.
1474 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
1475 def BSWAP16r_BAD : I<0xC8, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
1476 "bswap{w}\t$dst", []>, OpSize16, TB;
1477 // GR32 = bswap GR32
1478 def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
1480 [(set GR32:$dst, (bswap GR32:$src))]>, OpSize32, TB;
1482 let SchedRW = [WriteBSWAP64] in
1483 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
1485 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
1486 } // Constraints = "$src = $dst", SchedRW
1488 // Bit scan instructions.
1489 let Defs = [EFLAGS] in {
1490 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1491 "bsf{w}\t{$src, $dst|$dst, $src}",
1492 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
1493 PS, OpSize16, Sched<[WriteBSF]>;
1494 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1495 "bsf{w}\t{$src, $dst|$dst, $src}",
1496 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
1497 PS, OpSize16, Sched<[WriteBSFLd]>;
1498 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1499 "bsf{l}\t{$src, $dst|$dst, $src}",
1500 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
1501 PS, OpSize32, Sched<[WriteBSF]>;
1502 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1503 "bsf{l}\t{$src, $dst|$dst, $src}",
1504 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
1505 PS, OpSize32, Sched<[WriteBSFLd]>;
1506 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1507 "bsf{q}\t{$src, $dst|$dst, $src}",
1508 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
1509 PS, Sched<[WriteBSF]>;
1510 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1511 "bsf{q}\t{$src, $dst|$dst, $src}",
1512 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
1513 PS, Sched<[WriteBSFLd]>;
1515 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1516 "bsr{w}\t{$src, $dst|$dst, $src}",
1517 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
1518 PS, OpSize16, Sched<[WriteBSR]>;
1519 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1520 "bsr{w}\t{$src, $dst|$dst, $src}",
1521 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
1522 PS, OpSize16, Sched<[WriteBSRLd]>;
1523 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1524 "bsr{l}\t{$src, $dst|$dst, $src}",
1525 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
1526 PS, OpSize32, Sched<[WriteBSR]>;
1527 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1528 "bsr{l}\t{$src, $dst|$dst, $src}",
1529 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
1530 PS, OpSize32, Sched<[WriteBSRLd]>;
1531 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1532 "bsr{q}\t{$src, $dst|$dst, $src}",
1533 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
1534 PS, Sched<[WriteBSR]>;
1535 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1536 "bsr{q}\t{$src, $dst|$dst, $src}",
1537 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
1538 PS, Sched<[WriteBSRLd]>;
1539 } // Defs = [EFLAGS]
1541 let SchedRW = [WriteMicrocoded] in {
1542 let Defs = [EDI,ESI], Uses = [EDI,ESI,DF] in {
1543 def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1544 "movsb\t{$src, $dst|$dst, $src}", []>;
1545 def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1546 "movsw\t{$src, $dst|$dst, $src}", []>, OpSize16;
1547 def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1548 "movs{l|d}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1549 def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1550 "movsq\t{$src, $dst|$dst, $src}", []>,
1551 Requires<[In64BitMode]>;
1554 let Defs = [EDI], Uses = [AL,EDI,DF] in
1555 def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
1556 "stosb\t{%al, $dst|$dst, al}", []>;
1557 let Defs = [EDI], Uses = [AX,EDI,DF] in
1558 def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
1559 "stosw\t{%ax, $dst|$dst, ax}", []>, OpSize16;
1560 let Defs = [EDI], Uses = [EAX,EDI,DF] in
1561 def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
1562 "stos{l|d}\t{%eax, $dst|$dst, eax}", []>, OpSize32;
1563 let Defs = [RDI], Uses = [RAX,RDI,DF] in
1564 def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
1565 "stosq\t{%rax, $dst|$dst, rax}", []>,
1566 Requires<[In64BitMode]>;
1568 let Defs = [EDI,EFLAGS], Uses = [AL,EDI,DF] in
1569 def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
1570 "scasb\t{$dst, %al|al, $dst}", []>;
1571 let Defs = [EDI,EFLAGS], Uses = [AX,EDI,DF] in
1572 def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
1573 "scasw\t{$dst, %ax|ax, $dst}", []>, OpSize16;
1574 let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,DF] in
1575 def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
1576 "scas{l|d}\t{$dst, %eax|eax, $dst}", []>, OpSize32;
1577 let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,DF] in
1578 def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
1579 "scasq\t{$dst, %rax|rax, $dst}", []>,
1580 Requires<[In64BitMode]>;
1582 let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,DF] in {
1583 def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1584 "cmpsb\t{$dst, $src|$src, $dst}", []>;
1585 def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1586 "cmpsw\t{$dst, $src|$src, $dst}", []>, OpSize16;
1587 def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1588 "cmps{l|d}\t{$dst, $src|$src, $dst}", []>, OpSize32;
1589 def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1590 "cmpsq\t{$dst, $src|$src, $dst}", []>,
1591 Requires<[In64BitMode]>;
1595 //===----------------------------------------------------------------------===//
1596 // Move Instructions.
1598 let SchedRW = [WriteMove] in {
1599 let hasSideEffects = 0, isMoveReg = 1 in {
1600 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
1601 "mov{b}\t{$src, $dst|$dst, $src}", []>;
1602 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1603 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
1604 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1605 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1606 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1607 "mov{q}\t{$src, $dst|$dst, $src}", []>;
1610 let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
1611 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
1612 "mov{b}\t{$src, $dst|$dst, $src}",
1613 [(set GR8:$dst, imm:$src)]>;
1614 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
1615 "mov{w}\t{$src, $dst|$dst, $src}",
1616 [(set GR16:$dst, imm:$src)]>, OpSize16;
1617 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
1618 "mov{l}\t{$src, $dst|$dst, $src}",
1619 [(set GR32:$dst, imm:$src)]>, OpSize32;
1620 def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
1621 "mov{q}\t{$src, $dst|$dst, $src}",
1622 [(set GR64:$dst, i64immSExt32:$src)]>;
1624 let isReMaterializable = 1, isMoveImm = 1 in {
1625 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
1626 "movabs{q}\t{$src, $dst|$dst, $src}",
1627 [(set GR64:$dst, imm:$src)]>;
1630 // Longer forms that use a ModR/M byte. Needed for disassembler
1631 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1632 def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
1633 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1634 FoldGenData<"MOV8ri">;
1635 def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
1636 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1637 FoldGenData<"MOV16ri">;
1638 def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
1639 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1640 FoldGenData<"MOV32ri">;
1644 let SchedRW = [WriteStore] in {
1645 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
1646 "mov{b}\t{$src, $dst|$dst, $src}",
1647 [(store (i8 imm_su:$src), addr:$dst)]>;
1648 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
1649 "mov{w}\t{$src, $dst|$dst, $src}",
1650 [(store (i16 imm_su:$src), addr:$dst)]>, OpSize16;
1651 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
1652 "mov{l}\t{$src, $dst|$dst, $src}",
1653 [(store (i32 imm_su:$src), addr:$dst)]>, OpSize32;
1654 def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1655 "mov{q}\t{$src, $dst|$dst, $src}",
1656 [(store i64immSExt32_su:$src, addr:$dst)]>,
1657 Requires<[In64BitMode]>;
1660 def : Pat<(i32 relocImm:$src), (MOV32ri relocImm:$src)>;
1661 def : Pat<(i64 relocImm:$src), (MOV64ri relocImm:$src)>;
1663 def : Pat<(store (i8 relocImm8_su:$src), addr:$dst),
1664 (MOV8mi addr:$dst, relocImm8_su:$src)>;
1665 def : Pat<(store (i16 relocImm16_su:$src), addr:$dst),
1666 (MOV16mi addr:$dst, relocImm16_su:$src)>;
1667 def : Pat<(store (i32 relocImm32_su:$src), addr:$dst),
1668 (MOV32mi addr:$dst, relocImm32_su:$src)>;
1669 def : Pat<(store (i64 i64relocImmSExt32_su:$src), addr:$dst),
1670 (MOV64mi32 addr:$dst, i64immSExt32_su:$src)>;
1672 let hasSideEffects = 0 in {
1674 /// Memory offset versions of moves. The immediate is an address mode sized
1675 /// offset from the segment base.
1676 let SchedRW = [WriteALU] in {
1677 let mayLoad = 1 in {
1679 def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
1680 "mov{b}\t{$src, %al|al, $src}", []>,
1683 def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
1684 "mov{w}\t{$src, %ax|ax, $src}", []>,
1687 def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
1688 "mov{l}\t{$src, %eax|eax, $src}", []>,
1691 def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
1692 "mov{q}\t{$src, %rax|rax, $src}", []>,
1696 def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
1697 "mov{b}\t{$src, %al|al, $src}", []>, AdSize16;
1699 def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
1700 "mov{w}\t{$src, %ax|ax, $src}", []>,
1703 def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
1704 "mov{l}\t{$src, %eax|eax, $src}", []>,
1707 let mayStore = 1 in {
1709 def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
1710 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize32;
1712 def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
1713 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1716 def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
1717 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1720 def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
1721 "mov{q}\t{%rax, $dst|$dst, rax}", []>,
1725 def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
1726 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize16;
1728 def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
1729 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1732 def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
1733 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1737 // These forms all have full 64-bit absolute addresses in their instructions
1738 // and use the movabs mnemonic to indicate this specific form.
1739 let mayLoad = 1 in {
1741 def MOV8ao64 : Ii64<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
1742 "movabs{b}\t{$src, %al|al, $src}", []>,
1745 def MOV16ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
1746 "movabs{w}\t{$src, %ax|ax, $src}", []>,
1749 def MOV32ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
1750 "movabs{l}\t{$src, %eax|eax, $src}", []>,
1753 def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
1754 "movabs{q}\t{$src, %rax|rax, $src}", []>,
1758 let mayStore = 1 in {
1760 def MOV8o64a : Ii64<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
1761 "movabs{b}\t{%al, $dst|$dst, al}", []>,
1764 def MOV16o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
1765 "movabs{w}\t{%ax, $dst|$dst, ax}", []>,
1768 def MOV32o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
1769 "movabs{l}\t{%eax, $dst|$dst, eax}", []>,
1772 def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
1773 "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
1777 } // hasSideEffects = 0
1779 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
1780 SchedRW = [WriteMove], isMoveReg = 1 in {
1781 def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
1782 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1783 FoldGenData<"MOV8rr">;
1784 def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1785 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1786 FoldGenData<"MOV16rr">;
1787 def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1788 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1789 FoldGenData<"MOV32rr">;
1790 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1791 "mov{q}\t{$src, $dst|$dst, $src}", []>,
1792 FoldGenData<"MOV64rr">;
1795 // Reversed version with ".s" suffix for GAS compatibility.
1796 def : InstAlias<"mov{b}.s\t{$src, $dst|$dst, $src}",
1797 (MOV8rr_REV GR8:$dst, GR8:$src), 0>;
1798 def : InstAlias<"mov{w}.s\t{$src, $dst|$dst, $src}",
1799 (MOV16rr_REV GR16:$dst, GR16:$src), 0>;
1800 def : InstAlias<"mov{l}.s\t{$src, $dst|$dst, $src}",
1801 (MOV32rr_REV GR32:$dst, GR32:$src), 0>;
1802 def : InstAlias<"mov{q}.s\t{$src, $dst|$dst, $src}",
1803 (MOV64rr_REV GR64:$dst, GR64:$src), 0>;
1804 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1805 (MOV8rr_REV GR8:$dst, GR8:$src), 0, "att">;
1806 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1807 (MOV16rr_REV GR16:$dst, GR16:$src), 0, "att">;
1808 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1809 (MOV32rr_REV GR32:$dst, GR32:$src), 0, "att">;
1810 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1811 (MOV64rr_REV GR64:$dst, GR64:$src), 0, "att">;
1813 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1814 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
1815 "mov{b}\t{$src, $dst|$dst, $src}",
1816 [(set GR8:$dst, (loadi8 addr:$src))]>;
1817 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1818 "mov{w}\t{$src, $dst|$dst, $src}",
1819 [(set GR16:$dst, (loadi16 addr:$src))]>, OpSize16;
1820 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1821 "mov{l}\t{$src, $dst|$dst, $src}",
1822 [(set GR32:$dst, (loadi32 addr:$src))]>, OpSize32;
1823 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1824 "mov{q}\t{$src, $dst|$dst, $src}",
1825 [(set GR64:$dst, (load addr:$src))]>;
1828 let SchedRW = [WriteStore] in {
1829 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
1830 "mov{b}\t{$src, $dst|$dst, $src}",
1831 [(store GR8:$src, addr:$dst)]>;
1832 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1833 "mov{w}\t{$src, $dst|$dst, $src}",
1834 [(store GR16:$src, addr:$dst)]>, OpSize16;
1835 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1836 "mov{l}\t{$src, $dst|$dst, $src}",
1837 [(store GR32:$src, addr:$dst)]>, OpSize32;
1838 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1839 "mov{q}\t{$src, $dst|$dst, $src}",
1840 [(store GR64:$src, addr:$dst)]>;
1843 // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
1844 // that they can be used for copying and storing h registers, which can't be
1845 // encoded when a REX prefix is present.
1846 let isCodeGenOnly = 1 in {
1847 let hasSideEffects = 0, isMoveReg = 1 in
1848 def MOV8rr_NOREX : I<0x88, MRMDestReg,
1849 (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
1850 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1852 let mayStore = 1, hasSideEffects = 0 in
1853 def MOV8mr_NOREX : I<0x88, MRMDestMem,
1854 (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
1855 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1856 Sched<[WriteStore]>;
1857 let mayLoad = 1, hasSideEffects = 0,
1858 canFoldAsLoad = 1, isReMaterializable = 1 in
1859 def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
1860 (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
1861 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1866 // Condition code ops, incl. set if equal/not equal/...
1867 let SchedRW = [WriteLAHFSAHF] in {
1868 let Defs = [EFLAGS], Uses = [AH], hasSideEffects = 0 in
1869 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>, // flags = AH
1870 Requires<[HasLAHFSAHF]>;
1871 let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
1872 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>, // AH = flags
1873 Requires<[HasLAHFSAHF]>;
1876 //===----------------------------------------------------------------------===//
1877 // Bit tests instructions: BT, BTS, BTR, BTC.
1879 let Defs = [EFLAGS] in {
1880 let SchedRW = [WriteBitTest] in {
1881 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1882 "bt{w}\t{$src2, $src1|$src1, $src2}",
1883 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>,
1884 OpSize16, TB, NotMemoryFoldable;
1885 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1886 "bt{l}\t{$src2, $src1|$src1, $src2}",
1887 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>,
1888 OpSize32, TB, NotMemoryFoldable;
1889 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1890 "bt{q}\t{$src2, $src1|$src1, $src2}",
1891 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB,
1895 // Unlike with the register+register form, the memory+register form of the
1896 // bt instruction does not ignore the high bits of the index. From ISel's
1897 // perspective, this is pretty bizarre. Make these instructions disassembly
1898 // only for now. These instructions are also slow on modern CPUs so that's
1899 // another reason to avoid generating them.
1901 let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteBitTestRegLd] in {
1902 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1903 "bt{w}\t{$src2, $src1|$src1, $src2}",
1904 []>, OpSize16, TB, NotMemoryFoldable;
1905 def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1906 "bt{l}\t{$src2, $src1|$src1, $src2}",
1907 []>, OpSize32, TB, NotMemoryFoldable;
1908 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1909 "bt{q}\t{$src2, $src1|$src1, $src2}",
1910 []>, TB, NotMemoryFoldable;
1913 let SchedRW = [WriteBitTest] in {
1914 def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16u8imm:$src2),
1915 "bt{w}\t{$src2, $src1|$src1, $src2}",
1916 [(set EFLAGS, (X86bt GR16:$src1, imm:$src2))]>,
1918 def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32u8imm:$src2),
1919 "bt{l}\t{$src2, $src1|$src1, $src2}",
1920 [(set EFLAGS, (X86bt GR32:$src1, imm:$src2))]>,
1922 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64u8imm:$src2),
1923 "bt{q}\t{$src2, $src1|$src1, $src2}",
1924 [(set EFLAGS, (X86bt GR64:$src1, imm:$src2))]>, TB;
1927 // Note that these instructions aren't slow because that only applies when the
1928 // other operand is in a register. When it's an immediate, bt is still fast.
1929 let SchedRW = [WriteBitTestImmLd] in {
1930 def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
1931 "bt{w}\t{$src2, $src1|$src1, $src2}",
1932 [(set EFLAGS, (X86bt (loadi16 addr:$src1),
1935 def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
1936 "bt{l}\t{$src2, $src1|$src1, $src2}",
1937 [(set EFLAGS, (X86bt (loadi32 addr:$src1),
1940 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
1941 "bt{q}\t{$src2, $src1|$src1, $src2}",
1942 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1944 Requires<[In64BitMode]>;
1947 let hasSideEffects = 0 in {
1948 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1949 def BTC16rr : I<0xBB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1950 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1951 OpSize16, TB, NotMemoryFoldable;
1952 def BTC32rr : I<0xBB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1953 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1954 OpSize32, TB, NotMemoryFoldable;
1955 def BTC64rr : RI<0xBB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1956 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1960 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
1961 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1962 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1963 OpSize16, TB, NotMemoryFoldable;
1964 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1965 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1966 OpSize32, TB, NotMemoryFoldable;
1967 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1968 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1972 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1973 def BTC16ri8 : Ii8<0xBA, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
1974 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1975 def BTC32ri8 : Ii8<0xBA, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
1976 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1977 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
1978 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1981 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
1982 def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
1983 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1984 def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
1985 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1986 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
1987 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1988 Requires<[In64BitMode]>;
1991 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1992 def BTR16rr : I<0xB3, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1993 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1994 OpSize16, TB, NotMemoryFoldable;
1995 def BTR32rr : I<0xB3, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1996 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1997 OpSize32, TB, NotMemoryFoldable;
1998 def BTR64rr : RI<0xB3, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1999 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2003 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
2004 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
2005 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2006 OpSize16, TB, NotMemoryFoldable;
2007 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
2008 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2009 OpSize32, TB, NotMemoryFoldable;
2010 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
2011 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2015 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2016 def BTR16ri8 : Ii8<0xBA, MRM6r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
2017 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2019 def BTR32ri8 : Ii8<0xBA, MRM6r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
2020 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2022 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
2023 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2026 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
2027 def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
2028 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2030 def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
2031 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2033 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
2034 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2035 Requires<[In64BitMode]>;
2038 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2039 def BTS16rr : I<0xAB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
2040 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
2041 OpSize16, TB, NotMemoryFoldable;
2042 def BTS32rr : I<0xAB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
2043 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
2044 OpSize32, TB, NotMemoryFoldable;
2045 def BTS64rr : RI<0xAB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
2046 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2050 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
2051 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
2052 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
2053 OpSize16, TB, NotMemoryFoldable;
2054 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
2055 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
2056 OpSize32, TB, NotMemoryFoldable;
2057 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
2058 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2062 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2063 def BTS16ri8 : Ii8<0xBA, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
2064 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
2065 def BTS32ri8 : Ii8<0xBA, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
2066 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
2067 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
2068 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2071 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
2072 def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
2073 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
2074 def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
2075 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
2076 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
2077 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2078 Requires<[In64BitMode]>;
2080 } // hasSideEffects = 0
2081 } // Defs = [EFLAGS]
2084 //===----------------------------------------------------------------------===//
2088 // Atomic swap. These are just normal xchg instructions. But since a memory
2089 // operand is referenced, the atomicity is ensured.
2090 multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag> {
2091 let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in {
2092 def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
2093 (ins GR8:$val, i8mem:$ptr),
2094 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
2097 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
2098 def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
2099 (ins GR16:$val, i16mem:$ptr),
2100 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
2103 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
2105 def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
2106 (ins GR32:$val, i32mem:$ptr),
2107 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
2110 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
2112 def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
2113 (ins GR64:$val, i64mem:$ptr),
2114 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
2117 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
2121 defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">, NotMemoryFoldable;
2123 // Swap between registers.
2124 let SchedRW = [WriteXCHG] in {
2125 let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in {
2126 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2),
2127 (ins GR8:$src1, GR8:$src2),
2128 "xchg{b}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2129 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2),
2130 (ins GR16:$src1, GR16:$src2),
2131 "xchg{w}\t{$src2, $src1|$src1, $src2}", []>,
2132 OpSize16, NotMemoryFoldable;
2133 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2),
2134 (ins GR32:$src1, GR32:$src2),
2135 "xchg{l}\t{$src2, $src1|$src1, $src2}", []>,
2136 OpSize32, NotMemoryFoldable;
2137 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2),
2138 (ins GR64:$src1 ,GR64:$src2),
2139 "xchg{q}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2142 // Swap between EAX and other registers.
2143 let Constraints = "$src = $dst", hasSideEffects = 0 in {
2144 let Uses = [AX], Defs = [AX] in
2145 def XCHG16ar : I<0x90, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
2146 "xchg{w}\t{$src, %ax|ax, $src}", []>, OpSize16;
2147 let Uses = [EAX], Defs = [EAX] in
2148 def XCHG32ar : I<0x90, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
2149 "xchg{l}\t{$src, %eax|eax, $src}", []>, OpSize32;
2150 let Uses = [RAX], Defs = [RAX] in
2151 def XCHG64ar : RI<0x90, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
2152 "xchg{q}\t{$src, %rax|rax, $src}", []>;
2156 let hasSideEffects = 0, Constraints = "$src1 = $dst1, $src2 = $dst2",
2157 Defs = [EFLAGS], SchedRW = [WriteXCHG] in {
2158 def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst1, GR8:$dst2),
2159 (ins GR8:$src1, GR8:$src2),
2160 "xadd{b}\t{$src2, $src1|$src1, $src2}", []>, TB;
2161 def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst1, GR16:$dst2),
2162 (ins GR16:$src1, GR16:$src2),
2163 "xadd{w}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize16;
2164 def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst1, GR32:$dst2),
2165 (ins GR32:$src1, GR32:$src2),
2166 "xadd{l}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize32;
2167 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst1, GR64:$dst2),
2168 (ins GR64:$src1, GR64:$src2),
2169 "xadd{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2172 let mayLoad = 1, mayStore = 1, hasSideEffects = 0, Constraints = "$val = $dst",
2173 Defs = [EFLAGS], SchedRW = [WriteALULd, WriteRMW] in {
2174 def XADD8rm : I<0xC0, MRMSrcMem, (outs GR8:$dst),
2175 (ins GR8:$val, i8mem:$ptr),
2176 "xadd{b}\t{$val, $ptr|$ptr, $val}", []>, TB;
2177 def XADD16rm : I<0xC1, MRMSrcMem, (outs GR16:$dst),
2178 (ins GR16:$val, i16mem:$ptr),
2179 "xadd{w}\t{$val, $ptr|$ptr, $val}", []>, TB,
2181 def XADD32rm : I<0xC1, MRMSrcMem, (outs GR32:$dst),
2182 (ins GR32:$val, i32mem:$ptr),
2183 "xadd{l}\t{$val, $ptr|$ptr, $val}", []>, TB,
2185 def XADD64rm : RI<0xC1, MRMSrcMem, (outs GR64:$dst),
2186 (ins GR64:$val, i64mem:$ptr),
2187 "xadd{q}\t{$val, $ptr|$ptr, $val}", []>, TB;
2191 let SchedRW = [WriteCMPXCHG], hasSideEffects = 0 in {
2192 let Defs = [AL, EFLAGS], Uses = [AL] in
2193 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
2194 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2196 let Defs = [AX, EFLAGS], Uses = [AX] in
2197 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2198 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2200 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2201 def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
2202 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2204 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2205 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
2206 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2208 } // SchedRW, hasSideEffects
2210 let SchedRW = [WriteCMPXCHGRMW], mayLoad = 1, mayStore = 1,
2211 hasSideEffects = 0 in {
2212 let Defs = [AL, EFLAGS], Uses = [AL] in
2213 def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
2214 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2216 let Defs = [AX, EFLAGS], Uses = [AX] in
2217 def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2218 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2220 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2221 def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2222 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2224 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2225 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2226 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2229 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
2230 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
2231 "cmpxchg8b\t$dst", []>, TB, Requires<[HasCX8]>;
2233 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
2234 // NOTE: In64BitMode check needed for the AssemblerPredicate.
2235 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
2236 "cmpxchg16b\t$dst", []>,
2237 TB, Requires<[HasCX16,In64BitMode]>;
2238 } // SchedRW, mayLoad, mayStore, hasSideEffects
2241 // Lock instruction prefix
2242 let SchedRW = [WriteMicrocoded] in
2243 def LOCK_PREFIX : I<0xF0, PrefixByte, (outs), (ins), "lock", []>;
2245 let SchedRW = [WriteNop] in {
2247 // Rex64 instruction prefix
2248 def REX64_PREFIX : I<0x48, PrefixByte, (outs), (ins), "rex64", []>,
2249 Requires<[In64BitMode]>;
2251 // Data16 instruction prefix
2252 def DATA16_PREFIX : I<0x66, PrefixByte, (outs), (ins), "data16", []>;
2255 // Repeat string operation instruction prefixes
2256 let Defs = [ECX], Uses = [ECX,DF], SchedRW = [WriteMicrocoded] in {
2257 // Repeat (used with INS, OUTS, MOVS, LODS and STOS)
2258 def REP_PREFIX : I<0xF3, PrefixByte, (outs), (ins), "rep", []>;
2259 // Repeat while not equal (used with CMPS and SCAS)
2260 def REPNE_PREFIX : I<0xF2, PrefixByte, (outs), (ins), "repne", []>;
2263 // String manipulation instructions
2264 let SchedRW = [WriteMicrocoded] in {
2265 let Defs = [AL,ESI], Uses = [ESI,DF] in
2266 def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
2267 "lodsb\t{$src, %al|al, $src}", []>;
2268 let Defs = [AX,ESI], Uses = [ESI,DF] in
2269 def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
2270 "lodsw\t{$src, %ax|ax, $src}", []>, OpSize16;
2271 let Defs = [EAX,ESI], Uses = [ESI,DF] in
2272 def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
2273 "lods{l|d}\t{$src, %eax|eax, $src}", []>, OpSize32;
2274 let Defs = [RAX,ESI], Uses = [ESI,DF] in
2275 def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
2276 "lodsq\t{$src, %rax|rax, $src}", []>,
2277 Requires<[In64BitMode]>;
2280 let SchedRW = [WriteSystem] in {
2281 let Defs = [ESI], Uses = [DX,ESI,DF] in {
2282 def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
2283 "outsb\t{$src, %dx|dx, $src}", []>;
2284 def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
2285 "outsw\t{$src, %dx|dx, $src}", []>, OpSize16;
2286 def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
2287 "outs{l|d}\t{$src, %dx|dx, $src}", []>, OpSize32;
2290 let Defs = [EDI], Uses = [DX,EDI,DF] in {
2291 def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
2292 "insb\t{%dx, $dst|$dst, dx}", []>;
2293 def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
2294 "insw\t{%dx, $dst|$dst, dx}", []>, OpSize16;
2295 def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
2296 "ins{l|d}\t{%dx, $dst|$dst, dx}", []>, OpSize32;
2300 // EFLAGS management instructions.
2301 let SchedRW = [WriteALU], Defs = [EFLAGS], Uses = [EFLAGS] in {
2302 def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
2303 def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
2304 def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
2307 // DF management instructions.
2308 let SchedRW = [WriteALU], Defs = [DF] in {
2309 def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
2310 def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
2313 // Table lookup instructions
2314 let Uses = [AL,EBX], Defs = [AL], hasSideEffects = 0, mayLoad = 1 in
2315 def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>, Sched<[WriteLoad]>;
2317 let SchedRW = [WriteMicrocoded] in {
2318 // ASCII Adjust After Addition
2319 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2320 def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>,
2321 Requires<[Not64BitMode]>;
2323 // ASCII Adjust AX Before Division
2324 let Uses = [AX], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2325 def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
2326 "aad\t$src", []>, Requires<[Not64BitMode]>;
2328 // ASCII Adjust AX After Multiply
2329 let Uses = [AL], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2330 def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
2331 "aam\t$src", []>, Requires<[Not64BitMode]>;
2333 // ASCII Adjust AL After Subtraction - sets
2334 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2335 def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>,
2336 Requires<[Not64BitMode]>;
2338 // Decimal Adjust AL after Addition
2339 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2340 def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>,
2341 Requires<[Not64BitMode]>;
2343 // Decimal Adjust AL after Subtraction
2344 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2345 def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>,
2346 Requires<[Not64BitMode]>;
2349 let SchedRW = [WriteSystem] in {
2350 // Check Array Index Against Bounds
2351 // Note: "bound" does not have reversed operands in at&t syntax.
2352 def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2353 "bound\t$dst, $src", []>, OpSize16,
2354 Requires<[Not64BitMode]>;
2355 def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2356 "bound\t$dst, $src", []>, OpSize32,
2357 Requires<[Not64BitMode]>;
2359 // Adjust RPL Field of Segment Selector
2360 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2361 "arpl\t{$src, $dst|$dst, $src}", []>,
2362 Requires<[Not64BitMode]>, NotMemoryFoldable;
2364 def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2365 "arpl\t{$src, $dst|$dst, $src}", []>,
2366 Requires<[Not64BitMode]>, NotMemoryFoldable;
2369 //===----------------------------------------------------------------------===//
2370 // MOVBE Instructions
2372 let Predicates = [HasMOVBE] in {
2373 let SchedRW = [WriteALULd] in {
2374 def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2375 "movbe{w}\t{$src, $dst|$dst, $src}",
2376 [(set GR16:$dst, (bswap (loadi16 addr:$src)))]>,
2378 def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2379 "movbe{l}\t{$src, $dst|$dst, $src}",
2380 [(set GR32:$dst, (bswap (loadi32 addr:$src)))]>,
2382 def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2383 "movbe{q}\t{$src, $dst|$dst, $src}",
2384 [(set GR64:$dst, (bswap (loadi64 addr:$src)))]>,
2387 let SchedRW = [WriteStore] in {
2388 def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2389 "movbe{w}\t{$src, $dst|$dst, $src}",
2390 [(store (bswap GR16:$src), addr:$dst)]>,
2392 def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2393 "movbe{l}\t{$src, $dst|$dst, $src}",
2394 [(store (bswap GR32:$src), addr:$dst)]>,
2396 def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2397 "movbe{q}\t{$src, $dst|$dst, $src}",
2398 [(store (bswap GR64:$src), addr:$dst)]>,
2403 //===----------------------------------------------------------------------===//
2404 // RDRAND Instruction
2406 let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2407 def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
2408 "rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
2410 def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
2411 "rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
2413 def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
2414 "rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
2418 //===----------------------------------------------------------------------===//
2419 // RDSEED Instruction
2421 let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2422 def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
2423 [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, PS;
2424 def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
2425 [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, PS;
2426 def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
2427 [(set GR64:$dst, EFLAGS, (X86rdseed))]>, PS;
2430 //===----------------------------------------------------------------------===//
2431 // LZCNT Instruction
2433 let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
2434 def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2435 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2436 [(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>,
2437 XS, OpSize16, Sched<[WriteLZCNT]>;
2438 def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2439 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2440 [(set GR16:$dst, (ctlz (loadi16 addr:$src))),
2441 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteLZCNTLd]>;
2443 def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2444 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2445 [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>,
2446 XS, OpSize32, Sched<[WriteLZCNT]>;
2447 def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2448 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2449 [(set GR32:$dst, (ctlz (loadi32 addr:$src))),
2450 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteLZCNTLd]>;
2452 def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2453 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2454 [(set GR64:$dst, (ctlz GR64:$src)), (implicit EFLAGS)]>,
2455 XS, Sched<[WriteLZCNT]>;
2456 def LZCNT64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2457 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2458 [(set GR64:$dst, (ctlz (loadi64 addr:$src))),
2459 (implicit EFLAGS)]>, XS, Sched<[WriteLZCNTLd]>;
2462 //===----------------------------------------------------------------------===//
2465 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2466 def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2467 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2468 [(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>,
2469 XS, OpSize16, Sched<[WriteTZCNT]>;
2470 def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2471 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2472 [(set GR16:$dst, (cttz (loadi16 addr:$src))),
2473 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteTZCNTLd]>;
2475 def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2476 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2477 [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>,
2478 XS, OpSize32, Sched<[WriteTZCNT]>;
2479 def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2480 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2481 [(set GR32:$dst, (cttz (loadi32 addr:$src))),
2482 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteTZCNTLd]>;
2484 def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2485 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2486 [(set GR64:$dst, (cttz GR64:$src)), (implicit EFLAGS)]>,
2487 XS, Sched<[WriteTZCNT]>;
2488 def TZCNT64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2489 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2490 [(set GR64:$dst, (cttz (loadi64 addr:$src))),
2491 (implicit EFLAGS)]>, XS, Sched<[WriteTZCNTLd]>;
2494 multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
2495 RegisterClass RC, X86MemOperand x86memop,
2496 X86FoldableSchedWrite sched> {
2497 let hasSideEffects = 0 in {
2498 def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
2499 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2500 T8PS, VEX_4V, Sched<[sched]>;
2502 def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
2503 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2504 T8PS, VEX_4V, Sched<[sched.Folded]>;
2508 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2509 defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem, WriteBLS>;
2510 defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem, WriteBLS>, VEX_W;
2511 defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem, WriteBLS>;
2512 defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem, WriteBLS>, VEX_W;
2513 defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem, WriteBLS>;
2514 defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem, WriteBLS>, VEX_W;
2517 //===----------------------------------------------------------------------===//
2518 // Pattern fragments to auto generate BMI instructions.
2519 //===----------------------------------------------------------------------===//
2521 def or_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2522 (X86or_flag node:$lhs, node:$rhs), [{
2523 return hasNoCarryFlagUses(SDValue(N, 1));
2526 def xor_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2527 (X86xor_flag node:$lhs, node:$rhs), [{
2528 return hasNoCarryFlagUses(SDValue(N, 1));
2531 def and_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2532 (X86and_flag node:$lhs, node:$rhs), [{
2533 return hasNoCarryFlagUses(SDValue(N, 1));
2536 let Predicates = [HasBMI] in {
2537 // FIXME: patterns for the load versions are not implemented
2538 def : Pat<(and GR32:$src, (add GR32:$src, -1)),
2539 (BLSR32rr GR32:$src)>;
2540 def : Pat<(and GR64:$src, (add GR64:$src, -1)),
2541 (BLSR64rr GR64:$src)>;
2543 def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
2544 (BLSMSK32rr GR32:$src)>;
2545 def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
2546 (BLSMSK64rr GR64:$src)>;
2548 def : Pat<(and GR32:$src, (ineg GR32:$src)),
2549 (BLSI32rr GR32:$src)>;
2550 def : Pat<(and GR64:$src, (ineg GR64:$src)),
2551 (BLSI64rr GR64:$src)>;
2553 // Versions to match flag producing ops.
2554 def : Pat<(and_flag_nocf GR32:$src, (add GR32:$src, -1)),
2555 (BLSR32rr GR32:$src)>;
2556 def : Pat<(and_flag_nocf GR64:$src, (add GR64:$src, -1)),
2557 (BLSR64rr GR64:$src)>;
2559 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, -1)),
2560 (BLSMSK32rr GR32:$src)>;
2561 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, -1)),
2562 (BLSMSK64rr GR64:$src)>;
2564 def : Pat<(and_flag_nocf GR32:$src, (ineg GR32:$src)),
2565 (BLSI32rr GR32:$src)>;
2566 def : Pat<(and_flag_nocf GR64:$src, (ineg GR64:$src)),
2567 (BLSI64rr GR64:$src)>;
2570 multiclass bmi_bextr<bits<8> opc, string mnemonic, RegisterClass RC,
2571 X86MemOperand x86memop, SDNode OpNode,
2572 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2573 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2574 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2575 [(set RC:$dst, (OpNode RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2576 T8PS, VEX, Sched<[Sched]>;
2577 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2578 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2579 [(set RC:$dst, (OpNode (ld_frag addr:$src1), RC:$src2)),
2580 (implicit EFLAGS)]>, T8PS, VEX,
2581 Sched<[Sched.Folded,
2583 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2586 Sched.ReadAfterFold]>;
2589 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2590 defm BEXTR32 : bmi_bextr<0xF7, "bextr{l}", GR32, i32mem,
2591 X86bextr, loadi32, WriteBEXTR>;
2592 defm BEXTR64 : bmi_bextr<0xF7, "bextr{q}", GR64, i64mem,
2593 X86bextr, loadi64, WriteBEXTR>, VEX_W;
2596 multiclass bmi_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
2597 X86MemOperand x86memop, SDNode Int,
2598 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2599 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2600 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2601 [(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2602 T8PS, VEX, Sched<[Sched]>;
2603 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2604 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2605 [(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
2606 (implicit EFLAGS)]>, T8PS, VEX,
2607 Sched<[Sched.Folded,
2609 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2612 Sched.ReadAfterFold]>;
2615 let Predicates = [HasBMI2], Defs = [EFLAGS] in {
2616 defm BZHI32 : bmi_bzhi<0xF5, "bzhi{l}", GR32, i32mem,
2617 X86bzhi, loadi32, WriteBZHI>;
2618 defm BZHI64 : bmi_bzhi<0xF5, "bzhi{q}", GR64, i64mem,
2619 X86bzhi, loadi64, WriteBZHI>, VEX_W;
2622 def CountTrailingOnes : SDNodeXForm<imm, [{
2623 // Count the trailing ones in the immediate.
2624 return getI8Imm(countTrailingOnes(N->getZExtValue()), SDLoc(N));
2627 def BEXTRMaskXForm : SDNodeXForm<imm, [{
2628 unsigned Length = countTrailingOnes(N->getZExtValue());
2629 return getI32Imm(Length << 8, SDLoc(N));
2632 def AndMask64 : ImmLeaf<i64, [{
2633 return isMask_64(Imm) && !isUInt<32>(Imm);
2636 // Use BEXTR for 64-bit 'and' with large immediate 'mask'.
2637 let Predicates = [HasBMI, NoBMI2, NoTBM] in {
2638 def : Pat<(and GR64:$src, AndMask64:$mask),
2639 (BEXTR64rr GR64:$src,
2640 (SUBREG_TO_REG (i64 0),
2641 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2642 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2643 (BEXTR64rm addr:$src,
2644 (SUBREG_TO_REG (i64 0),
2645 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2648 // Use BZHI for 64-bit 'and' with large immediate 'mask'.
2649 let Predicates = [HasBMI2, NoTBM] in {
2650 def : Pat<(and GR64:$src, AndMask64:$mask),
2651 (BZHI64rr GR64:$src,
2652 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2653 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2654 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2655 (BZHI64rm addr:$src,
2656 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2657 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2660 multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
2661 X86MemOperand x86memop, SDNode OpNode,
2663 def rr : I<0xF5, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2664 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2665 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>,
2666 VEX_4V, Sched<[WriteALU]>;
2667 def rm : I<0xF5, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2668 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2669 [(set RC:$dst, (OpNode RC:$src1, (ld_frag addr:$src2)))]>,
2670 VEX_4V, Sched<[WriteALU.Folded, WriteALU.ReadAfterFold]>;
2673 let Predicates = [HasBMI2] in {
2674 defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
2675 X86pdep, loadi32>, T8XD;
2676 defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
2677 X86pdep, loadi64>, T8XD, VEX_W;
2678 defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
2679 X86pext, loadi32>, T8XS;
2680 defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
2681 X86pext, loadi64>, T8XS, VEX_W;
2684 //===----------------------------------------------------------------------===//
2687 let Predicates = [HasTBM], Defs = [EFLAGS] in {
2689 multiclass tbm_bextri<bits<8> opc, RegisterClass RC, string OpcodeStr,
2690 X86MemOperand x86memop, PatFrag ld_frag,
2691 SDNode OpNode, Operand immtype,
2692 SDPatternOperator immoperator,
2693 X86FoldableSchedWrite Sched> {
2694 def ri : Ii32<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, immtype:$cntl),
2695 !strconcat(OpcodeStr,
2696 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2697 [(set RC:$dst, (OpNode RC:$src1, immoperator:$cntl))]>,
2698 XOP, XOPA, Sched<[Sched]>;
2699 def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
2700 (ins x86memop:$src1, immtype:$cntl),
2701 !strconcat(OpcodeStr,
2702 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2703 [(set RC:$dst, (OpNode (ld_frag addr:$src1), immoperator:$cntl))]>,
2704 XOP, XOPA, Sched<[Sched.Folded]>;
2707 defm BEXTRI32 : tbm_bextri<0x10, GR32, "bextr{l}", i32mem, loadi32,
2708 X86bextri, i32imm, timm, WriteBEXTR>;
2709 let ImmT = Imm32S in
2710 defm BEXTRI64 : tbm_bextri<0x10, GR64, "bextr{q}", i64mem, loadi64,
2711 X86bextri, i64i32imm,
2712 i64timmSExt32, WriteBEXTR>, VEX_W;
2714 multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
2715 RegisterClass RC, string OpcodeStr,
2716 X86MemOperand x86memop, X86FoldableSchedWrite Sched> {
2717 let hasSideEffects = 0 in {
2718 def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
2719 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2720 XOP_4V, XOP9, Sched<[Sched]>;
2722 def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
2723 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2724 XOP_4V, XOP9, Sched<[Sched.Folded]>;
2728 multiclass tbm_binary_intr<bits<8> opc, string OpcodeStr,
2729 X86FoldableSchedWrite Sched,
2730 Format FormReg, Format FormMem> {
2731 defm NAME#32 : tbm_binary_rm<opc, FormReg, FormMem, GR32, OpcodeStr#"{l}",
2733 defm NAME#64 : tbm_binary_rm<opc, FormReg, FormMem, GR64, OpcodeStr#"{q}",
2734 i64mem, Sched>, VEX_W;
2737 defm BLCFILL : tbm_binary_intr<0x01, "blcfill", WriteALU, MRM1r, MRM1m>;
2738 defm BLCI : tbm_binary_intr<0x02, "blci", WriteALU, MRM6r, MRM6m>;
2739 defm BLCIC : tbm_binary_intr<0x01, "blcic", WriteALU, MRM5r, MRM5m>;
2740 defm BLCMSK : tbm_binary_intr<0x02, "blcmsk", WriteALU, MRM1r, MRM1m>;
2741 defm BLCS : tbm_binary_intr<0x01, "blcs", WriteALU, MRM3r, MRM3m>;
2742 defm BLSFILL : tbm_binary_intr<0x01, "blsfill", WriteALU, MRM2r, MRM2m>;
2743 defm BLSIC : tbm_binary_intr<0x01, "blsic", WriteALU, MRM6r, MRM6m>;
2744 defm T1MSKC : tbm_binary_intr<0x01, "t1mskc", WriteALU, MRM7r, MRM7m>;
2745 defm TZMSK : tbm_binary_intr<0x01, "tzmsk", WriteALU, MRM4r, MRM4m>;
2748 // Use BEXTRI for 64-bit 'and' with large immediate 'mask'.
2749 let Predicates = [HasTBM] in {
2750 def : Pat<(and GR64:$src, AndMask64:$mask),
2751 (BEXTRI64ri GR64:$src, (BEXTRMaskXForm imm:$mask))>;
2753 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2754 (BEXTRI64mi addr:$src, (BEXTRMaskXForm imm:$mask))>;
2757 //===----------------------------------------------------------------------===//
2758 // Lightweight Profiling Instructions
2760 let Predicates = [HasLWP], SchedRW = [WriteSystem] in {
2762 def LLWPCB : I<0x12, MRM0r, (outs), (ins GR32:$src), "llwpcb\t$src",
2763 [(int_x86_llwpcb GR32:$src)]>, XOP, XOP9;
2764 def SLWPCB : I<0x12, MRM1r, (outs GR32:$dst), (ins), "slwpcb\t$dst",
2765 [(set GR32:$dst, (int_x86_slwpcb))]>, XOP, XOP9;
2767 def LLWPCB64 : I<0x12, MRM0r, (outs), (ins GR64:$src), "llwpcb\t$src",
2768 [(int_x86_llwpcb GR64:$src)]>, XOP, XOP9, VEX_W;
2769 def SLWPCB64 : I<0x12, MRM1r, (outs GR64:$dst), (ins), "slwpcb\t$dst",
2770 [(set GR64:$dst, (int_x86_slwpcb))]>, XOP, XOP9, VEX_W;
2772 multiclass lwpins_intr<RegisterClass RC> {
2773 def rri : Ii32<0x12, MRM0r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2774 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2775 [(set EFLAGS, (X86lwpins RC:$src0, GR32:$src1, timm:$cntl))]>,
2778 def rmi : Ii32<0x12, MRM0m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2779 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2780 [(set EFLAGS, (X86lwpins RC:$src0, (loadi32 addr:$src1), timm:$cntl))]>,
2784 let Defs = [EFLAGS] in {
2785 defm LWPINS32 : lwpins_intr<GR32>;
2786 defm LWPINS64 : lwpins_intr<GR64>, VEX_W;
2789 multiclass lwpval_intr<RegisterClass RC, Intrinsic Int> {
2790 def rri : Ii32<0x12, MRM1r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2791 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2792 [(Int RC:$src0, GR32:$src1, timm:$cntl)]>, XOP_4V, XOPA;
2794 def rmi : Ii32<0x12, MRM1m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2795 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2796 [(Int RC:$src0, (loadi32 addr:$src1), timm:$cntl)]>,
2800 defm LWPVAL32 : lwpval_intr<GR32, int_x86_lwpval32>;
2801 defm LWPVAL64 : lwpval_intr<GR64, int_x86_lwpval64>, VEX_W;
2803 } // HasLWP, SchedRW
2805 //===----------------------------------------------------------------------===//
2806 // MONITORX/MWAITX Instructions
2808 let SchedRW = [ WriteSystem ] in {
2809 let Uses = [ EAX, ECX, EDX ] in
2810 def MONITORX32rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2811 TB, Requires<[ HasMWAITX, Not64BitMode ]>;
2812 let Uses = [ RAX, ECX, EDX ] in
2813 def MONITORX64rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2814 TB, Requires<[ HasMWAITX, In64BitMode ]>;
2816 let Uses = [ ECX, EAX, EBX ] in {
2817 def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
2818 []>, TB, Requires<[ HasMWAITX ]>;
2822 def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
2823 Requires<[ Not64BitMode ]>;
2824 def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
2825 Requires<[ In64BitMode ]>;
2827 def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORX32rrr)>,
2828 Requires<[ Not64BitMode ]>;
2829 def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORX64rrr)>,
2830 Requires<[ In64BitMode ]>;
2832 //===----------------------------------------------------------------------===//
2833 // WAITPKG Instructions
2835 let SchedRW = [WriteSystem] in {
2836 def UMONITOR16 : I<0xAE, MRM6r, (outs), (ins GR16:$src),
2837 "umonitor\t$src", [(int_x86_umonitor GR16:$src)]>,
2838 XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
2839 def UMONITOR32 : I<0xAE, MRM6r, (outs), (ins GR32:$src),
2840 "umonitor\t$src", [(int_x86_umonitor GR32:$src)]>,
2841 XS, AdSize32, Requires<[HasWAITPKG]>;
2842 def UMONITOR64 : I<0xAE, MRM6r, (outs), (ins GR64:$src),
2843 "umonitor\t$src", [(int_x86_umonitor GR64:$src)]>,
2844 XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
2845 let Uses = [EAX, EDX], Defs = [EFLAGS] in {
2846 def UMWAIT : I<0xAE, MRM6r,
2847 (outs), (ins GR32orGR64:$src), "umwait\t$src",
2848 [(set EFLAGS, (X86umwait GR32orGR64:$src, EDX, EAX))]>,
2849 XD, Requires<[HasWAITPKG]>;
2850 def TPAUSE : I<0xAE, MRM6r,
2851 (outs), (ins GR32orGR64:$src), "tpause\t$src",
2852 [(set EFLAGS, (X86tpause GR32orGR64:$src, EDX, EAX))]>,
2853 PD, Requires<[HasWAITPKG]>;
2857 //===----------------------------------------------------------------------===//
2858 // MOVDIRI - Move doubleword/quadword as direct store
2860 let SchedRW = [WriteStore] in {
2861 def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2862 "movdiri\t{$src, $dst|$dst, $src}",
2863 [(int_x86_directstore32 addr:$dst, GR32:$src)]>,
2864 T8PS, Requires<[HasMOVDIRI]>;
2865 def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2866 "movdiri\t{$src, $dst|$dst, $src}",
2867 [(int_x86_directstore64 addr:$dst, GR64:$src)]>,
2868 T8PS, Requires<[In64BitMode, HasMOVDIRI]>;
2871 //===----------------------------------------------------------------------===//
2872 // MOVDIR64B - Move 64 bytes as direct store
2874 let SchedRW = [WriteStore] in {
2875 def MOVDIR64B16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2876 "movdir64b\t{$src, $dst|$dst, $src}", []>,
2877 T8PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
2878 def MOVDIR64B32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2879 "movdir64b\t{$src, $dst|$dst, $src}",
2880 [(int_x86_movdir64b GR32:$dst, addr:$src)]>,
2881 T8PD, AdSize32, Requires<[HasMOVDIR64B]>;
2882 def MOVDIR64B64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2883 "movdir64b\t{$src, $dst|$dst, $src}",
2884 [(int_x86_movdir64b GR64:$dst, addr:$src)]>,
2885 T8PD, AdSize64, Requires<[HasMOVDIR64B, In64BitMode]>;
2888 //===----------------------------------------------------------------------===//
2889 // ENQCMD/S - Enqueue 64-byte command as user with 64-byte write atomicity
2891 let SchedRW = [WriteStore], Defs = [EFLAGS] in {
2892 def ENQCMD16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2893 "enqcmd\t{$src, $dst|$dst, $src}",
2894 [(set EFLAGS, (X86enqcmd GR16:$dst, addr:$src))]>,
2895 T8XD, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
2896 def ENQCMD32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2897 "enqcmd\t{$src, $dst|$dst, $src}",
2898 [(set EFLAGS, (X86enqcmd GR32:$dst, addr:$src))]>,
2899 T8XD, AdSize32, Requires<[HasENQCMD]>;
2900 def ENQCMD64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2901 "enqcmd\t{$src, $dst|$dst, $src}",
2902 [(set EFLAGS, (X86enqcmd GR64:$dst, addr:$src))]>,
2903 T8XD, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
2905 def ENQCMDS16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2906 "enqcmds\t{$src, $dst|$dst, $src}",
2907 [(set EFLAGS, (X86enqcmds GR16:$dst, addr:$src))]>,
2908 T8XS, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
2909 def ENQCMDS32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2910 "enqcmds\t{$src, $dst|$dst, $src}",
2911 [(set EFLAGS, (X86enqcmds GR32:$dst, addr:$src))]>,
2912 T8XS, AdSize32, Requires<[HasENQCMD]>;
2913 def ENQCMDS64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2914 "enqcmds\t{$src, $dst|$dst, $src}",
2915 [(set EFLAGS, (X86enqcmds GR64:$dst, addr:$src))]>,
2916 T8XS, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
2919 //===----------------------------------------------------------------------===//
2920 // CLZERO Instruction
2922 let SchedRW = [WriteLoad] in {
2924 def CLZERO32r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2925 TB, Requires<[HasCLZERO, Not64BitMode]>;
2927 def CLZERO64r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2928 TB, Requires<[HasCLZERO, In64BitMode]>;
2931 def : InstAlias<"clzero\t{%eax|eax}", (CLZERO32r)>, Requires<[Not64BitMode]>;
2932 def : InstAlias<"clzero\t{%rax|rax}", (CLZERO64r)>, Requires<[In64BitMode]>;
2934 //===----------------------------------------------------------------------===//
2935 // INVLPGB Instruction
2938 let SchedRW = [WriteSystem] in {
2939 let Uses = [EAX, EDX] in
2940 def INVLPGB32 : I<0x01, MRM_FE, (outs), (ins),
2942 PS, Requires<[Not64BitMode]>;
2943 let Uses = [RAX, EDX] in
2944 def INVLPGB64 : I<0x01, MRM_FE, (outs), (ins),
2946 PS, Requires<[In64BitMode]>;
2949 def : InstAlias<"invlpgb\t{%eax, %edx|eax, edx}", (INVLPGB32)>, Requires<[Not64BitMode]>;
2950 def : InstAlias<"invlpgb\t{%rax, %edx|rax, edx}", (INVLPGB64)>, Requires<[In64BitMode]>;
2952 //===----------------------------------------------------------------------===//
2953 // TLBSYNC Instruction
2956 let SchedRW = [WriteSystem] in {
2957 def TLBSYNC : I<0x01, MRM_FF, (outs), (ins),
2962 //===----------------------------------------------------------------------===//
2963 // HRESET Instruction
2965 let Uses = [EAX], SchedRW = [WriteSystem] in
2966 def HRESET : Ii8<0xF0, MRM_C0, (outs), (ins i32u8imm:$imm), "hreset\t$imm", []>,
2967 Requires<[HasHRESET]>, TAXS;
2969 //===----------------------------------------------------------------------===//
2970 // SERIALIZE Instruction
2972 let SchedRW = [WriteSystem] in
2973 def SERIALIZE : I<0x01, MRM_E8, (outs), (ins), "serialize",
2974 [(int_x86_serialize)]>, PS,
2975 Requires<[HasSERIALIZE]>;
2977 //===----------------------------------------------------------------------===//
2978 // TSXLDTRK - TSX Suspend Load Address Tracking
2980 let Predicates = [HasTSXLDTRK], SchedRW = [WriteSystem] in {
2981 def XSUSLDTRK : I<0x01, MRM_E8, (outs), (ins), "xsusldtrk",
2982 [(int_x86_xsusldtrk)]>, XD;
2983 def XRESLDTRK : I<0x01, MRM_E9, (outs), (ins), "xresldtrk",
2984 [(int_x86_xresldtrk)]>, XD;
2987 //===----------------------------------------------------------------------===//
2988 // UINTR Instructions
2990 let Predicates = [HasUINTR, In64BitMode], SchedRW = [WriteSystem] in {
2991 def UIRET : I<0x01, MRM_EC, (outs), (ins), "uiret",
2993 def CLUI : I<0x01, MRM_EE, (outs), (ins), "clui",
2994 [(int_x86_clui)]>, XS;
2995 def STUI : I<0x01, MRM_EF, (outs), (ins), "stui",
2996 [(int_x86_stui)]>, XS;
2998 def SENDUIPI : I<0xC7, MRM6r, (outs), (ins GR64:$arg), "senduipi\t$arg",
2999 [(int_x86_senduipi GR64:$arg)]>, XS;
3001 let Defs = [EFLAGS] in
3002 def TESTUI : I<0x01, MRM_ED, (outs), (ins), "testui",
3003 [(set EFLAGS, (X86testui))]>, XS;
3006 //===----------------------------------------------------------------------===//
3007 // Pattern fragments to auto generate TBM instructions.
3008 //===----------------------------------------------------------------------===//
3010 let Predicates = [HasTBM] in {
3011 // FIXME: patterns for the load versions are not implemented
3012 def : Pat<(and GR32:$src, (add GR32:$src, 1)),
3013 (BLCFILL32rr GR32:$src)>;
3014 def : Pat<(and GR64:$src, (add GR64:$src, 1)),
3015 (BLCFILL64rr GR64:$src)>;
3017 def : Pat<(or GR32:$src, (not (add GR32:$src, 1))),
3018 (BLCI32rr GR32:$src)>;
3019 def : Pat<(or GR64:$src, (not (add GR64:$src, 1))),
3020 (BLCI64rr GR64:$src)>;
3022 // Extra patterns because opt can optimize the above patterns to this.
3023 def : Pat<(or GR32:$src, (sub -2, GR32:$src)),
3024 (BLCI32rr GR32:$src)>;
3025 def : Pat<(or GR64:$src, (sub -2, GR64:$src)),
3026 (BLCI64rr GR64:$src)>;
3028 def : Pat<(and (not GR32:$src), (add GR32:$src, 1)),
3029 (BLCIC32rr GR32:$src)>;
3030 def : Pat<(and (not GR64:$src), (add GR64:$src, 1)),
3031 (BLCIC64rr GR64:$src)>;
3033 def : Pat<(xor GR32:$src, (add GR32:$src, 1)),
3034 (BLCMSK32rr GR32:$src)>;
3035 def : Pat<(xor GR64:$src, (add GR64:$src, 1)),
3036 (BLCMSK64rr GR64:$src)>;
3038 def : Pat<(or GR32:$src, (add GR32:$src, 1)),
3039 (BLCS32rr GR32:$src)>;
3040 def : Pat<(or GR64:$src, (add GR64:$src, 1)),
3041 (BLCS64rr GR64:$src)>;
3043 def : Pat<(or GR32:$src, (add GR32:$src, -1)),
3044 (BLSFILL32rr GR32:$src)>;
3045 def : Pat<(or GR64:$src, (add GR64:$src, -1)),
3046 (BLSFILL64rr GR64:$src)>;
3048 def : Pat<(or (not GR32:$src), (add GR32:$src, -1)),
3049 (BLSIC32rr GR32:$src)>;
3050 def : Pat<(or (not GR64:$src), (add GR64:$src, -1)),
3051 (BLSIC64rr GR64:$src)>;
3053 def : Pat<(or (not GR32:$src), (add GR32:$src, 1)),
3054 (T1MSKC32rr GR32:$src)>;
3055 def : Pat<(or (not GR64:$src), (add GR64:$src, 1)),
3056 (T1MSKC64rr GR64:$src)>;
3058 def : Pat<(and (not GR32:$src), (add GR32:$src, -1)),
3059 (TZMSK32rr GR32:$src)>;
3060 def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
3061 (TZMSK64rr GR64:$src)>;
3063 // Patterns to match flag producing ops.
3064 def : Pat<(and_flag_nocf GR32:$src, (add GR32:$src, 1)),
3065 (BLCFILL32rr GR32:$src)>;
3066 def : Pat<(and_flag_nocf GR64:$src, (add GR64:$src, 1)),
3067 (BLCFILL64rr GR64:$src)>;
3069 def : Pat<(or_flag_nocf GR32:$src, (not (add GR32:$src, 1))),
3070 (BLCI32rr GR32:$src)>;
3071 def : Pat<(or_flag_nocf GR64:$src, (not (add GR64:$src, 1))),
3072 (BLCI64rr GR64:$src)>;
3074 // Extra patterns because opt can optimize the above patterns to this.
3075 def : Pat<(or_flag_nocf GR32:$src, (sub -2, GR32:$src)),
3076 (BLCI32rr GR32:$src)>;
3077 def : Pat<(or_flag_nocf GR64:$src, (sub -2, GR64:$src)),
3078 (BLCI64rr GR64:$src)>;
3080 def : Pat<(and_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
3081 (BLCIC32rr GR32:$src)>;
3082 def : Pat<(and_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
3083 (BLCIC64rr GR64:$src)>;
3085 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, 1)),
3086 (BLCMSK32rr GR32:$src)>;
3087 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, 1)),
3088 (BLCMSK64rr GR64:$src)>;
3090 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, 1)),
3091 (BLCS32rr GR32:$src)>;
3092 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, 1)),
3093 (BLCS64rr GR64:$src)>;
3095 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, -1)),
3096 (BLSFILL32rr GR32:$src)>;
3097 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, -1)),
3098 (BLSFILL64rr GR64:$src)>;
3100 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
3101 (BLSIC32rr GR32:$src)>;
3102 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
3103 (BLSIC64rr GR64:$src)>;
3105 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
3106 (T1MSKC32rr GR32:$src)>;
3107 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
3108 (T1MSKC64rr GR64:$src)>;
3110 def : Pat<(and_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
3111 (TZMSK32rr GR32:$src)>;
3112 def : Pat<(and_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
3113 (TZMSK64rr GR64:$src)>;
3116 //===----------------------------------------------------------------------===//
3117 // Memory Instructions
3120 let Predicates = [HasCLFLUSHOPT], SchedRW = [WriteLoad] in
3121 def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3122 "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
3124 let Predicates = [HasCLWB], SchedRW = [WriteLoad] in
3125 def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
3126 [(int_x86_clwb addr:$src)]>, PD;
3128 let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
3129 def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
3130 [(int_x86_cldemote addr:$src)]>, PS;
3132 //===----------------------------------------------------------------------===//
3134 //===----------------------------------------------------------------------===//
3136 include "X86InstrArithmetic.td"
3137 include "X86InstrCMovSetCC.td"
3138 include "X86InstrExtension.td"
3139 include "X86InstrControl.td"
3140 include "X86InstrShiftRotate.td"
3142 // X87 Floating Point Stack.
3143 include "X86InstrFPStack.td"
3145 // SIMD support (SSE, MMX and AVX)
3146 include "X86InstrFragmentsSIMD.td"
3148 // FMA - Fused Multiply-Add support (requires FMA)
3149 include "X86InstrFMA.td"
3152 include "X86InstrXOP.td"
3154 // SSE, MMX and 3DNow! vector support.
3155 include "X86InstrSSE.td"
3156 include "X86InstrAVX512.td"
3157 include "X86InstrMMX.td"
3158 include "X86Instr3DNow.td"
3160 include "X86InstrVMX.td"
3161 include "X86InstrSVM.td"
3162 include "X86InstrSNP.td"
3164 include "X86InstrTSX.td"
3165 include "X86InstrSGX.td"
3167 include "X86InstrTDX.td"
3169 // Key Locker instructions
3170 include "X86InstrKL.td"
3173 include "X86InstrAMX.td"
3175 // System instructions.
3176 include "X86InstrSystem.td"
3178 // Compiler Pseudo Instructions and Pat Patterns
3179 include "X86InstrCompiler.td"
3180 include "X86InstrVecCompiler.td"
3182 //===----------------------------------------------------------------------===//
3183 // Assembler Mnemonic Aliases
3184 //===----------------------------------------------------------------------===//
3186 def : MnemonicAlias<"call", "callw", "att">, Requires<[In16BitMode]>;
3187 def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>;
3188 def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>;
3190 def : MnemonicAlias<"cbw", "cbtw", "att">;
3191 def : MnemonicAlias<"cwde", "cwtl", "att">;
3192 def : MnemonicAlias<"cwd", "cwtd", "att">;
3193 def : MnemonicAlias<"cdq", "cltd", "att">;
3194 def : MnemonicAlias<"cdqe", "cltq", "att">;
3195 def : MnemonicAlias<"cqo", "cqto", "att">;
3197 // In 64-bit mode lret maps to lretl; it is not ambiguous with lretq.
3198 def : MnemonicAlias<"lret", "lretw", "att">, Requires<[In16BitMode]>;
3199 def : MnemonicAlias<"lret", "lretl", "att">, Requires<[Not16BitMode]>;
3201 def : MnemonicAlias<"leavel", "leave", "att">, Requires<[Not64BitMode]>;
3202 def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>;
3204 def : MnemonicAlias<"loopz", "loope">;
3205 def : MnemonicAlias<"loopnz", "loopne">;
3207 def : MnemonicAlias<"pop", "popw", "att">, Requires<[In16BitMode]>;
3208 def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>;
3209 def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>;
3210 def : MnemonicAlias<"popf", "popfw", "att">, Requires<[In16BitMode]>;
3211 def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>;
3212 def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>;
3213 def : MnemonicAlias<"popf", "popfq", "intel">, Requires<[In64BitMode]>;
3214 def : MnemonicAlias<"popfd", "popfl", "att">;
3215 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In32BitMode]>;
3216 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In64BitMode]>;
3218 // FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
3219 // all modes. However: "push (addr)" and "push $42" should default to
3220 // pushl/pushq depending on the current mode. Similar for "pop %bx"
3221 def : MnemonicAlias<"push", "pushw", "att">, Requires<[In16BitMode]>;
3222 def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>;
3223 def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>;
3224 def : MnemonicAlias<"pushf", "pushfw", "att">, Requires<[In16BitMode]>;
3225 def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
3226 def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
3227 def : MnemonicAlias<"pushf", "pushfq", "intel">, Requires<[In64BitMode]>;
3228 def : MnemonicAlias<"pushfd", "pushfl", "att">;
3229 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In32BitMode]>;
3230 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In64BitMode]>;
3232 def : MnemonicAlias<"popad", "popal", "intel">, Requires<[Not64BitMode]>;
3233 def : MnemonicAlias<"pushad", "pushal", "intel">, Requires<[Not64BitMode]>;
3234 def : MnemonicAlias<"popa", "popaw", "intel">, Requires<[In16BitMode]>;
3235 def : MnemonicAlias<"pusha", "pushaw", "intel">, Requires<[In16BitMode]>;
3236 def : MnemonicAlias<"popa", "popal", "intel">, Requires<[In32BitMode]>;
3237 def : MnemonicAlias<"pusha", "pushal", "intel">, Requires<[In32BitMode]>;
3239 def : MnemonicAlias<"popa", "popaw", "att">, Requires<[In16BitMode]>;
3240 def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>;
3241 def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>;
3242 def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>;
3244 def : MnemonicAlias<"repe", "rep">;
3245 def : MnemonicAlias<"repz", "rep">;
3246 def : MnemonicAlias<"repnz", "repne">;
3248 def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>;
3249 def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>;
3250 def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>;
3252 // Apply 'ret' behavior to 'retn'
3253 def : MnemonicAlias<"retn", "retw", "att">, Requires<[In16BitMode]>;
3254 def : MnemonicAlias<"retn", "retl", "att">, Requires<[In32BitMode]>;
3255 def : MnemonicAlias<"retn", "retq", "att">, Requires<[In64BitMode]>;
3256 def : MnemonicAlias<"retn", "ret", "intel">;
3258 def : MnemonicAlias<"sal", "shl", "intel">;
3259 def : MnemonicAlias<"salb", "shlb", "att">;
3260 def : MnemonicAlias<"salw", "shlw", "att">;
3261 def : MnemonicAlias<"sall", "shll", "att">;
3262 def : MnemonicAlias<"salq", "shlq", "att">;
3264 def : MnemonicAlias<"smovb", "movsb", "att">;
3265 def : MnemonicAlias<"smovw", "movsw", "att">;
3266 def : MnemonicAlias<"smovl", "movsl", "att">;
3267 def : MnemonicAlias<"smovq", "movsq", "att">;
3269 def : MnemonicAlias<"ud2a", "ud2", "att">;
3270 def : MnemonicAlias<"ud2bw", "ud1w", "att">;
3271 def : MnemonicAlias<"ud2bl", "ud1l", "att">;
3272 def : MnemonicAlias<"ud2bq", "ud1q", "att">;
3273 def : MnemonicAlias<"verrw", "verr", "att">;
3275 // MS recognizes 'xacquire'/'xrelease' as 'acquire'/'release'
3276 def : MnemonicAlias<"acquire", "xacquire", "intel">;
3277 def : MnemonicAlias<"release", "xrelease", "intel">;
3279 // System instruction aliases.
3280 def : MnemonicAlias<"iret", "iretw", "att">, Requires<[In16BitMode]>;
3281 def : MnemonicAlias<"iret", "iretl", "att">, Requires<[Not16BitMode]>;
3282 def : MnemonicAlias<"sysret", "sysretl", "att">;
3283 def : MnemonicAlias<"sysexit", "sysexitl", "att">;
3285 def : MnemonicAlias<"lgdt", "lgdtw", "att">, Requires<[In16BitMode]>;
3286 def : MnemonicAlias<"lgdt", "lgdtl", "att">, Requires<[In32BitMode]>;
3287 def : MnemonicAlias<"lgdt", "lgdtq", "att">, Requires<[In64BitMode]>;
3288 def : MnemonicAlias<"lidt", "lidtw", "att">, Requires<[In16BitMode]>;
3289 def : MnemonicAlias<"lidt", "lidtl", "att">, Requires<[In32BitMode]>;
3290 def : MnemonicAlias<"lidt", "lidtq", "att">, Requires<[In64BitMode]>;
3291 def : MnemonicAlias<"sgdt", "sgdtw", "att">, Requires<[In16BitMode]>;
3292 def : MnemonicAlias<"sgdt", "sgdtl", "att">, Requires<[In32BitMode]>;
3293 def : MnemonicAlias<"sgdt", "sgdtq", "att">, Requires<[In64BitMode]>;
3294 def : MnemonicAlias<"sidt", "sidtw", "att">, Requires<[In16BitMode]>;
3295 def : MnemonicAlias<"sidt", "sidtl", "att">, Requires<[In32BitMode]>;
3296 def : MnemonicAlias<"sidt", "sidtq", "att">, Requires<[In64BitMode]>;
3297 def : MnemonicAlias<"lgdt", "lgdtw", "intel">, Requires<[In16BitMode]>;
3298 def : MnemonicAlias<"lgdt", "lgdtd", "intel">, Requires<[In32BitMode]>;
3299 def : MnemonicAlias<"lidt", "lidtw", "intel">, Requires<[In16BitMode]>;
3300 def : MnemonicAlias<"lidt", "lidtd", "intel">, Requires<[In32BitMode]>;
3301 def : MnemonicAlias<"sgdt", "sgdtw", "intel">, Requires<[In16BitMode]>;
3302 def : MnemonicAlias<"sgdt", "sgdtd", "intel">, Requires<[In32BitMode]>;
3303 def : MnemonicAlias<"sidt", "sidtw", "intel">, Requires<[In16BitMode]>;
3304 def : MnemonicAlias<"sidt", "sidtd", "intel">, Requires<[In32BitMode]>;
3307 // Floating point stack aliases.
3308 def : MnemonicAlias<"fcmovz", "fcmove", "att">;
3309 def : MnemonicAlias<"fcmova", "fcmovnbe", "att">;
3310 def : MnemonicAlias<"fcmovnae", "fcmovb", "att">;
3311 def : MnemonicAlias<"fcmovna", "fcmovbe", "att">;
3312 def : MnemonicAlias<"fcmovae", "fcmovnb", "att">;
3313 def : MnemonicAlias<"fcomip", "fcompi">;
3314 def : MnemonicAlias<"fildq", "fildll", "att">;
3315 def : MnemonicAlias<"fistpq", "fistpll", "att">;
3316 def : MnemonicAlias<"fisttpq", "fisttpll", "att">;
3317 def : MnemonicAlias<"fldcww", "fldcw", "att">;
3318 def : MnemonicAlias<"fnstcww", "fnstcw", "att">;
3319 def : MnemonicAlias<"fnstsww", "fnstsw", "att">;
3320 def : MnemonicAlias<"fucomip", "fucompi">;
3321 def : MnemonicAlias<"fwait", "wait">;
3323 def : MnemonicAlias<"fxsaveq", "fxsave64", "att">;
3324 def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">;
3325 def : MnemonicAlias<"xsaveq", "xsave64", "att">;
3326 def : MnemonicAlias<"xrstorq", "xrstor64", "att">;
3327 def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">;
3328 def : MnemonicAlias<"xrstorsq", "xrstors64", "att">;
3329 def : MnemonicAlias<"xsavecq", "xsavec64", "att">;
3330 def : MnemonicAlias<"xsavesq", "xsaves64", "att">;
3332 class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
3334 : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
3335 !strconcat(Prefix, NewCond, Suffix), VariantName>;
3337 /// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
3338 /// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
3339 /// example "setz" -> "sete".
3340 multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix,
3342 def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb
3343 def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete
3344 def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe
3345 def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae
3346 def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae
3347 def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle
3348 def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge
3349 def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne
3350 def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp
3351 def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp
3353 def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb
3354 def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta
3355 def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl
3356 def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg
3359 // Aliases for set<CC>
3360 defm : IntegerCondCodeMnemonicAlias<"set", "">;
3361 // Aliases for j<CC>
3362 defm : IntegerCondCodeMnemonicAlias<"j", "">;
3363 // Aliases for cmov<CC>{w,l,q}
3364 defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">;
3365 defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">;
3366 defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">;
3367 // No size suffix for intel-style asm.
3368 defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">;
3371 //===----------------------------------------------------------------------===//
3372 // Assembler Instruction Aliases
3373 //===----------------------------------------------------------------------===//
3375 // aad/aam default to base 10 if no operand is specified.
3376 def : InstAlias<"aad", (AAD8i8 10)>, Requires<[Not64BitMode]>;
3377 def : InstAlias<"aam", (AAM8i8 10)>, Requires<[Not64BitMode]>;
3379 // Disambiguate the mem/imm form of bt-without-a-suffix as btl.
3380 // Likewise for btc/btr/bts.
3381 def : InstAlias<"bt\t{$imm, $mem|$mem, $imm}",
3382 (BT32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3383 def : InstAlias<"btc\t{$imm, $mem|$mem, $imm}",
3384 (BTC32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3385 def : InstAlias<"btr\t{$imm, $mem|$mem, $imm}",
3386 (BTR32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3387 def : InstAlias<"bts\t{$imm, $mem|$mem, $imm}",
3388 (BTS32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3391 def : InstAlias<"clr{b}\t$reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>;
3392 def : InstAlias<"clr{w}\t$reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
3393 def : InstAlias<"clr{l}\t$reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
3394 def : InstAlias<"clr{q}\t$reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
3396 // lods aliases. Accept the destination being omitted because it's implicit
3397 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3398 // in the destination.
3399 def : InstAlias<"lodsb\t$src", (LODSB srcidx8:$src), 0>;
3400 def : InstAlias<"lodsw\t$src", (LODSW srcidx16:$src), 0>;
3401 def : InstAlias<"lods{l|d}\t$src", (LODSL srcidx32:$src), 0>;
3402 def : InstAlias<"lodsq\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3403 def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
3404 def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
3405 def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
3406 def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3407 def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0, "intel">;
3408 def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0, "intel">;
3409 def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0, "intel">;
3410 def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3413 // stos aliases. Accept the source being omitted because it's implicit in
3414 // the mnemonic, or the mnemonic suffix being omitted because it's implicit
3416 def : InstAlias<"stosb\t$dst", (STOSB dstidx8:$dst), 0>;
3417 def : InstAlias<"stosw\t$dst", (STOSW dstidx16:$dst), 0>;
3418 def : InstAlias<"stos{l|d}\t$dst", (STOSL dstidx32:$dst), 0>;
3419 def : InstAlias<"stosq\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3420 def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
3421 def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
3422 def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
3423 def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3424 def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0, "intel">;
3425 def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0, "intel">;
3426 def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0, "intel">;
3427 def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3430 // scas aliases. Accept the destination being omitted because it's implicit
3431 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3432 // in the destination.
3433 def : InstAlias<"scasb\t$dst", (SCASB dstidx8:$dst), 0>;
3434 def : InstAlias<"scasw\t$dst", (SCASW dstidx16:$dst), 0>;
3435 def : InstAlias<"scas{l|d}\t$dst", (SCASL dstidx32:$dst), 0>;
3436 def : InstAlias<"scasq\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3437 def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
3438 def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
3439 def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
3440 def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3441 def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0, "intel">;
3442 def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0, "intel">;
3443 def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0, "intel">;
3444 def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3446 // cmps aliases. Mnemonic suffix being omitted because it's implicit
3447 // in the destination.
3448 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3449 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3450 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3451 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3453 // movs aliases. Mnemonic suffix being omitted because it's implicit
3454 // in the destination.
3455 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3456 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3457 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3458 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3460 // div and idiv aliases for explicit A register.
3461 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
3462 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
3463 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>;
3464 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>;
3465 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>;
3466 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>;
3467 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>;
3468 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>;
3469 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>;
3470 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>;
3471 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>;
3472 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>;
3473 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>;
3474 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>;
3475 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>;
3476 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>;
3480 // Various unary fpstack operations default to operating on ST1.
3481 // For example, "fxch" -> "fxch %st(1)"
3482 def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
3483 def: InstAlias<"fadd", (ADD_FPrST0 ST1), 0>;
3484 def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>;
3485 def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>;
3486 def : InstAlias<"fmul", (MUL_FPrST0 ST1), 0>;
3487 def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>;
3488 def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>;
3489 def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>;
3490 def : InstAlias<"fxch", (XCH_F ST1), 0>;
3491 def : InstAlias<"fcom", (COM_FST0r ST1), 0>;
3492 def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>;
3493 def : InstAlias<"fcomi", (COM_FIr ST1), 0>;
3494 def : InstAlias<"fcompi", (COM_FIPr ST1), 0>;
3495 def : InstAlias<"fucom", (UCOM_Fr ST1), 0>;
3496 def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>;
3497 def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>;
3498 def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>;
3500 // Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
3501 // For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
3502 // instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
3504 multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
3505 def : InstAlias<!strconcat(Mnemonic, "\t$op"),
3506 (Inst RSTi:$op), EmitAlias>;
3507 def : InstAlias<!strconcat(Mnemonic, "\t{%st, %st|st, st}"),
3508 (Inst ST0), EmitAlias>;
3511 defm : FpUnaryAlias<"fadd", ADD_FST0r, 0>;
3512 defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
3513 defm : FpUnaryAlias<"fsub", SUB_FST0r, 0>;
3514 defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0, 0>;
3515 defm : FpUnaryAlias<"fsubr", SUBR_FST0r, 0>;
3516 defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0, 0>;
3517 defm : FpUnaryAlias<"fmul", MUL_FST0r, 0>;
3518 defm : FpUnaryAlias<"fmulp", MUL_FPrST0, 0>;
3519 defm : FpUnaryAlias<"fdiv", DIV_FST0r, 0>;
3520 defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0, 0>;
3521 defm : FpUnaryAlias<"fdivr", DIVR_FST0r, 0>;
3522 defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0, 0>;
3523 defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
3524 defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
3525 defm : FpUnaryAlias<"fcompi", COM_FIPr, 0>;
3526 defm : FpUnaryAlias<"fucompi", UCOM_FIPr, 0>;
3529 // Handle "f{mulp,addp} $op, %st(0)" the same as "f{mulp,addp} $op", since they
3530 // commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
3531 // solely because gas supports it.
3532 def : InstAlias<"faddp\t{$op, %st|st, $op}", (ADD_FPrST0 RSTi:$op), 0>;
3533 def : InstAlias<"fmulp\t{$op, %st|st, $op}", (MUL_FPrST0 RSTi:$op), 0>;
3534 def : InstAlias<"fsub{|r}p\t{$op, %st|st, $op}", (SUBR_FPrST0 RSTi:$op), 0>;
3535 def : InstAlias<"fsub{r|}p\t{$op, %st|st, $op}", (SUB_FPrST0 RSTi:$op), 0>;
3536 def : InstAlias<"fdiv{|r}p\t{$op, %st|st, $op}", (DIVR_FPrST0 RSTi:$op), 0>;
3537 def : InstAlias<"fdiv{r|}p\t{$op, %st|st, $op}", (DIV_FPrST0 RSTi:$op), 0>;
3539 def : InstAlias<"fnstsw" , (FNSTSW16r), 0>;
3541 // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
3542 // this is compatible with what GAS does.
3543 def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3544 def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3545 def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3546 def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3547 def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3548 def : InstAlias<"ljmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3549 def : InstAlias<"lcall\t{*}$dst", (FARCALL16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3550 def : InstAlias<"ljmp\t{*}$dst", (FARJMP16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3552 def : InstAlias<"jmp\t{*}$dst", (JMP64m i64mem:$dst), 0, "att">, Requires<[In64BitMode]>;
3553 def : InstAlias<"jmp\t{*}$dst", (JMP32m i32mem:$dst), 0, "att">, Requires<[In32BitMode]>;
3554 def : InstAlias<"jmp\t{*}$dst", (JMP16m i16mem:$dst), 0, "att">, Requires<[In16BitMode]>;
3557 // "imul <imm>, B" is an alias for "imul <imm>, B, B".
3558 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>;
3559 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>;
3560 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>;
3561 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>;
3562 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
3563 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
3565 // ins aliases. Accept the mnemonic suffix being omitted because it's implicit
3566 // in the destination.
3567 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0, "intel">;
3568 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0, "intel">;
3569 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0, "intel">;
3571 // outs aliases. Accept the mnemonic suffix being omitted because it's implicit
3573 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0, "intel">;
3574 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0, "intel">;
3575 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0, "intel">;
3577 // inb %dx -> inb %al, %dx
3578 def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
3579 def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
3580 def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>;
3581 def : InstAlias<"inb\t$port", (IN8ri u8imm:$port), 0>;
3582 def : InstAlias<"inw\t$port", (IN16ri u8imm:$port), 0>;
3583 def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
3586 // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
3587 def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3588 def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3589 def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3590 def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3591 def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3592 def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3593 def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3594 def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3596 // Match 'movq <largeimm>, <reg>' as an alias for movabsq.
3597 def : InstAlias<"mov{q}\t{$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
3599 // Match 'movd GR64, MMX' as an alias for movq to be compatible with gas,
3600 // which supports this due to an old AMD documentation bug when 64-bit mode was
3602 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3603 (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
3604 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3605 (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
3608 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3609 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3610 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3611 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3612 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3613 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3614 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0, "att">;
3617 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3618 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3619 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3620 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3621 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3622 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3623 // Note: No GR32->GR64 movzx form.
3625 // outb %dx -> outb %al, %dx
3626 def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>;
3627 def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>;
3628 def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>;
3629 def : InstAlias<"outb\t$port", (OUT8ir u8imm:$port), 0>;
3630 def : InstAlias<"outw\t$port", (OUT16ir u8imm:$port), 0>;
3631 def : InstAlias<"outl\t$port", (OUT32ir u8imm:$port), 0>;
3633 // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
3634 // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
3635 // errors, since its encoding is the most compact.
3636 def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>;
3638 // shld/shrd op,op -> shld op, op, CL
3639 def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
3640 def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>;
3641 def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>;
3642 def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>;
3643 def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>;
3644 def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>;
3646 def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>;
3647 def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>;
3648 def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>;
3649 def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>;
3650 def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>;
3651 def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>;
3653 /* FIXME: This is disabled because the asm matcher is currently incapable of
3654 * matching a fixed immediate like $1.
3655 // "shl X, $1" is an alias for "shl X".
3656 multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
3657 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3658 (!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
3659 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3660 (!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
3661 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3662 (!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
3663 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3664 (!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
3665 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3666 (!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
3667 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3668 (!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
3669 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3670 (!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
3671 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3672 (!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
3675 defm : ShiftRotateByOneAlias<"rcl", "RCL">;
3676 defm : ShiftRotateByOneAlias<"rcr", "RCR">;
3677 defm : ShiftRotateByOneAlias<"rol", "ROL">;
3678 defm : ShiftRotateByOneAlias<"ror", "ROR">;
3681 // test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
3682 def : InstAlias<"test{b}\t{$mem, $val|$val, $mem}",
3683 (TEST8mr i8mem :$mem, GR8 :$val), 0>;
3684 def : InstAlias<"test{w}\t{$mem, $val|$val, $mem}",
3685 (TEST16mr i16mem:$mem, GR16:$val), 0>;
3686 def : InstAlias<"test{l}\t{$mem, $val|$val, $mem}",
3687 (TEST32mr i32mem:$mem, GR32:$val), 0>;
3688 def : InstAlias<"test{q}\t{$mem, $val|$val, $mem}",
3689 (TEST64mr i64mem:$mem, GR64:$val), 0>;
3691 // xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
3692 def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
3693 (XCHG8rm GR8 :$val, i8mem :$mem), 0>;
3694 def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
3695 (XCHG16rm GR16:$val, i16mem:$mem), 0>;
3696 def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
3697 (XCHG32rm GR32:$val, i32mem:$mem), 0>;
3698 def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
3699 (XCHG64rm GR64:$val, i64mem:$mem), 0>;
3701 // xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
3702 def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
3703 def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src), 0>;
3704 def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;
3706 // In 64-bit mode, xchg %eax, %eax can't be encoded with the 0x90 opcode we
3707 // would get by default because it's defined as NOP. But xchg %eax, %eax implies
3708 // implicit zeroing of the upper 32 bits. So alias to the longer encoding.
3709 def : InstAlias<"xchg{l}\t{%eax, %eax|eax, eax}",
3710 (XCHG32rr EAX, EAX), 0>, Requires<[In64BitMode]>;
3712 // xchg %rax, %rax is a nop in x86-64 and can be encoded as such. Without this
3713 // we emit an unneeded REX.w prefix.
3714 def : InstAlias<"xchg{q}\t{%rax, %rax|rax, rax}", (NOOP), 0>;
3716 // These aliases exist to get the parser to prioritize matching 8-bit
3717 // immediate encodings over matching the implicit ax/eax/rax encodings. By
3718 // explicitly mentioning the A register here, these entries will be ordered
3719 // first due to the more explicit immediate type.
3720 def : InstAlias<"adc{w}\t{$imm, %ax|ax, $imm}", (ADC16ri8 AX, i16i8imm:$imm), 0>;
3721 def : InstAlias<"add{w}\t{$imm, %ax|ax, $imm}", (ADD16ri8 AX, i16i8imm:$imm), 0>;
3722 def : InstAlias<"and{w}\t{$imm, %ax|ax, $imm}", (AND16ri8 AX, i16i8imm:$imm), 0>;
3723 def : InstAlias<"cmp{w}\t{$imm, %ax|ax, $imm}", (CMP16ri8 AX, i16i8imm:$imm), 0>;
3724 def : InstAlias<"or{w}\t{$imm, %ax|ax, $imm}", (OR16ri8 AX, i16i8imm:$imm), 0>;
3725 def : InstAlias<"sbb{w}\t{$imm, %ax|ax, $imm}", (SBB16ri8 AX, i16i8imm:$imm), 0>;
3726 def : InstAlias<"sub{w}\t{$imm, %ax|ax, $imm}", (SUB16ri8 AX, i16i8imm:$imm), 0>;
3727 def : InstAlias<"xor{w}\t{$imm, %ax|ax, $imm}", (XOR16ri8 AX, i16i8imm:$imm), 0>;
3729 def : InstAlias<"adc{l}\t{$imm, %eax|eax, $imm}", (ADC32ri8 EAX, i32i8imm:$imm), 0>;
3730 def : InstAlias<"add{l}\t{$imm, %eax|eax, $imm}", (ADD32ri8 EAX, i32i8imm:$imm), 0>;
3731 def : InstAlias<"and{l}\t{$imm, %eax|eax, $imm}", (AND32ri8 EAX, i32i8imm:$imm), 0>;
3732 def : InstAlias<"cmp{l}\t{$imm, %eax|eax, $imm}", (CMP32ri8 EAX, i32i8imm:$imm), 0>;
3733 def : InstAlias<"or{l}\t{$imm, %eax|eax, $imm}", (OR32ri8 EAX, i32i8imm:$imm), 0>;
3734 def : InstAlias<"sbb{l}\t{$imm, %eax|eax, $imm}", (SBB32ri8 EAX, i32i8imm:$imm), 0>;
3735 def : InstAlias<"sub{l}\t{$imm, %eax|eax, $imm}", (SUB32ri8 EAX, i32i8imm:$imm), 0>;
3736 def : InstAlias<"xor{l}\t{$imm, %eax|eax, $imm}", (XOR32ri8 EAX, i32i8imm:$imm), 0>;
3738 def : InstAlias<"adc{q}\t{$imm, %rax|rax, $imm}", (ADC64ri8 RAX, i64i8imm:$imm), 0>;
3739 def : InstAlias<"add{q}\t{$imm, %rax|rax, $imm}", (ADD64ri8 RAX, i64i8imm:$imm), 0>;
3740 def : InstAlias<"and{q}\t{$imm, %rax|rax, $imm}", (AND64ri8 RAX, i64i8imm:$imm), 0>;
3741 def : InstAlias<"cmp{q}\t{$imm, %rax|rax, $imm}", (CMP64ri8 RAX, i64i8imm:$imm), 0>;
3742 def : InstAlias<"or{q}\t{$imm, %rax|rax, $imm}", (OR64ri8 RAX, i64i8imm:$imm), 0>;
3743 def : InstAlias<"sbb{q}\t{$imm, %rax|rax, $imm}", (SBB64ri8 RAX, i64i8imm:$imm), 0>;
3744 def : InstAlias<"sub{q}\t{$imm, %rax|rax, $imm}", (SUB64ri8 RAX, i64i8imm:$imm), 0>;
3745 def : InstAlias<"xor{q}\t{$imm, %rax|rax, $imm}", (XOR64ri8 RAX, i64i8imm:$imm), 0>;