1 //===-- X86InstrInfo.td - Main X86 Instruction Definition --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 instruction set, defining the instructions, and
11 // properties of the instructions which are needed for code generation, machine
12 // code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // X86 specific DAG Nodes.
20 def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisSameAs<1, 2>]>;
22 def SDTX86Cmps : SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
23 //def SDTX86Cmpss : SDTypeProfile<1, 3, [SDTCisVT<0, f32>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
25 def SDTX86Cmov : SDTypeProfile<1, 4,
26 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
27 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
29 // Unary and binary operator instructions that set EFLAGS as a side-effect.
30 def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
32 SDTCisInt<0>, SDTCisVT<1, i32>]>;
34 def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
37 SDTCisInt<0>, SDTCisVT<1, i32>]>;
39 // SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
40 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
46 // RES1, RES2, FLAGS = op LHS, RHS
47 def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
51 SDTCisInt<0>, SDTCisVT<1, i32>]>;
52 def SDTX86BrCond : SDTypeProfile<0, 3,
53 [SDTCisVT<0, OtherVT>,
54 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
56 def SDTX86SetCC : SDTypeProfile<1, 2,
58 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
59 def SDTX86SetCC_C : SDTypeProfile<1, 2,
61 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
63 def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>;
65 def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
67 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
69 def SDTX86caspair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
70 def SDTX86caspairSaveEbx8 : SDTypeProfile<1, 3,
71 [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
72 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
73 def SDTX86caspairSaveRbx16 : SDTypeProfile<1, 3,
74 [SDTCisVT<0, i64>, SDTCisPtrTy<1>,
75 SDTCisVT<2, i64>, SDTCisVT<3, i64>]>;
77 def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
81 def SDTLockUnaryArithWithFlags : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
84 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
86 def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
88 def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
91 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
93 def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
95 def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
99 def SDT_X86VAARG_64 : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
105 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
107 def SDTX86Void : SDTypeProfile<0, 0, []>;
109 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
111 def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
113 def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
115 def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
117 def SDT_X86WIN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
119 def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
121 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
123 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
125 def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
127 def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
128 [SDNPHasChain,SDNPSideEffect]>;
129 def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
133 def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
134 def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
135 def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
136 def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>;
138 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
139 def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
141 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
142 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
144 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
145 def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
147 def X86sahf : SDNode<"X86ISD::SAHF", SDTX86sahf>;
149 def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand,
150 [SDNPHasChain, SDNPSideEffect]>;
152 def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand,
153 [SDNPHasChain, SDNPSideEffect]>;
155 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
156 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
157 SDNPMayLoad, SDNPMemOperand]>;
158 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86caspair,
159 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
160 SDNPMayLoad, SDNPMemOperand]>;
161 def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair,
162 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
163 SDNPMayLoad, SDNPMemOperand]>;
164 def X86cas8save_ebx : SDNode<"X86ISD::LCMPXCHG8_SAVE_EBX_DAG",
165 SDTX86caspairSaveEbx8,
166 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
167 SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
168 def X86cas16save_rbx : SDNode<"X86ISD::LCMPXCHG16_SAVE_RBX_DAG",
169 SDTX86caspairSaveRbx16,
170 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
171 SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
173 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
174 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
175 def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret,
176 [SDNPHasChain, SDNPOptInGlue]>;
178 def X86vastart_save_xmm_regs :
179 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
180 SDT_X86VASTART_SAVE_XMM_REGS,
181 [SDNPHasChain, SDNPVariadic]>;
183 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG_64,
184 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
186 def X86callseq_start :
187 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
188 [SDNPHasChain, SDNPOutGlue]>;
190 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
191 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
193 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
194 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
197 def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call,
198 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
200 def X86NoTrackBrind : SDNode<"X86ISD::NT_BRIND", SDT_X86NtBrind,
203 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
204 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>;
205 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
206 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
209 def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG", SDTX86Void,
210 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
211 def X86rdtscp : SDNode<"X86ISD::RDTSCP_DAG", SDTX86Void,
212 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
213 def X86rdpmc : SDNode<"X86ISD::RDPMC_DAG", SDTX86Void,
214 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
216 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
217 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
219 def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER",
220 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
223 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
224 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
226 def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
227 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
229 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
232 def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
233 SDTypeProfile<1, 1, [SDTCisInt<0>,
235 [SDNPHasChain, SDNPSideEffect]>;
236 def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
237 SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
238 [SDNPHasChain, SDNPSideEffect]>;
239 def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
240 SDTypeProfile<0, 0, []>,
241 [SDNPHasChain, SDNPSideEffect]>;
243 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
244 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
246 def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
248 def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
249 def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
251 def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
253 def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>;
254 def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>;
256 def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
258 def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
260 def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
263 def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
264 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
266 def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
267 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
269 def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
270 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
272 def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
273 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
275 def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
276 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
279 def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
281 def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntBinOp>;
283 def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
285 def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDT_X86WIN_ALLOCA,
286 [SDNPHasChain, SDNPOutGlue]>;
288 def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
291 def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
292 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
294 def X86lwpins : SDNode<"X86ISD::LWPINS",
295 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
296 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
297 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>;
299 def X86umwait : SDNode<"X86ISD::UMWAIT",
300 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
301 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
302 [SDNPHasChain, SDNPSideEffect]>;
304 def X86tpause : SDNode<"X86ISD::TPAUSE",
305 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
306 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
307 [SDNPHasChain, SDNPSideEffect]>;
309 //===----------------------------------------------------------------------===//
310 // X86 Operand Definitions.
313 // A version of ptr_rc which excludes SP, ESP, and RSP. This is used for
314 // the index operand of an address, to conform to x86 encoding restrictions.
315 def ptr_rc_nosp : PointerLikeRegClass<1>;
317 // *mem - Operand definitions for the funky X86 addressing mode operands.
319 def X86MemAsmOperand : AsmOperandClass {
322 let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
323 def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
324 def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
325 def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
326 def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; }
327 def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; }
328 def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; }
329 def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
330 def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
331 // Gather mem operands
332 def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
333 def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
334 def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
335 def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
336 def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
338 def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
339 def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
340 def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
341 def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
342 def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
343 def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
344 def X86Mem256_RC512Operand : AsmOperandClass { let Name = "Mem256_RC512"; }
345 def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
348 def X86AbsMemAsmOperand : AsmOperandClass {
350 let SuperClasses = [X86MemAsmOperand];
353 class X86MemOperand<string printMethod,
354 AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
355 let PrintMethod = printMethod;
356 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
357 let ParserMatchClass = parserMatchClass;
358 let OperandType = "OPERAND_MEMORY";
361 // Gather mem operands
362 class X86VMemOperand<RegisterClass RC, string printMethod,
363 AsmOperandClass parserMatchClass>
364 : X86MemOperand<printMethod, parserMatchClass> {
365 let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
368 def anymem : X86MemOperand<"printanymem">;
370 // FIXME: Right now we allow any size during parsing, but we might want to
371 // restrict to only unsized memory.
372 def opaquemem : X86MemOperand<"printopaquemem">;
374 def i8mem : X86MemOperand<"printi8mem", X86Mem8AsmOperand>;
375 def i16mem : X86MemOperand<"printi16mem", X86Mem16AsmOperand>;
376 def i32mem : X86MemOperand<"printi32mem", X86Mem32AsmOperand>;
377 def i64mem : X86MemOperand<"printi64mem", X86Mem64AsmOperand>;
378 def i128mem : X86MemOperand<"printi128mem", X86Mem128AsmOperand>;
379 def i256mem : X86MemOperand<"printi256mem", X86Mem256AsmOperand>;
380 def i512mem : X86MemOperand<"printi512mem", X86Mem512AsmOperand>;
381 def f32mem : X86MemOperand<"printf32mem", X86Mem32AsmOperand>;
382 def f64mem : X86MemOperand<"printf64mem", X86Mem64AsmOperand>;
383 def f80mem : X86MemOperand<"printf80mem", X86Mem80AsmOperand>;
384 def f128mem : X86MemOperand<"printf128mem", X86Mem128AsmOperand>;
385 def f256mem : X86MemOperand<"printf256mem", X86Mem256AsmOperand>;
386 def f512mem : X86MemOperand<"printf512mem", X86Mem512AsmOperand>;
388 def v512mem : X86VMemOperand<VR512, "printf512mem", X86Mem512AsmOperand>;
390 // Gather mem operands
391 def vx64mem : X86VMemOperand<VR128, "printi64mem", X86Mem64_RC128Operand>;
392 def vx128mem : X86VMemOperand<VR128, "printi128mem", X86Mem128_RC128Operand>;
393 def vx256mem : X86VMemOperand<VR128, "printi256mem", X86Mem256_RC128Operand>;
394 def vy128mem : X86VMemOperand<VR256, "printi128mem", X86Mem128_RC256Operand>;
395 def vy256mem : X86VMemOperand<VR256, "printi256mem", X86Mem256_RC256Operand>;
397 def vx64xmem : X86VMemOperand<VR128X, "printi64mem", X86Mem64_RC128XOperand>;
398 def vx128xmem : X86VMemOperand<VR128X, "printi128mem", X86Mem128_RC128XOperand>;
399 def vx256xmem : X86VMemOperand<VR128X, "printi256mem", X86Mem256_RC128XOperand>;
400 def vy128xmem : X86VMemOperand<VR256X, "printi128mem", X86Mem128_RC256XOperand>;
401 def vy256xmem : X86VMemOperand<VR256X, "printi256mem", X86Mem256_RC256XOperand>;
402 def vy512xmem : X86VMemOperand<VR256X, "printi512mem", X86Mem512_RC256XOperand>;
403 def vz256mem : X86VMemOperand<VR512, "printi256mem", X86Mem256_RC512Operand>;
404 def vz512mem : X86VMemOperand<VR512, "printi512mem", X86Mem512_RC512Operand>;
406 // A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
407 // of a plain GPR, so that it doesn't potentially require a REX prefix.
408 def ptr_rc_norex : PointerLikeRegClass<2>;
409 def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
411 def i8mem_NOREX : Operand<iPTR> {
412 let PrintMethod = "printi8mem";
413 let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
415 let ParserMatchClass = X86Mem8AsmOperand;
416 let OperandType = "OPERAND_MEMORY";
419 // GPRs available for tailcall.
420 // It represents GR32_TC, GR64_TC or GR64_TCW64.
421 def ptr_rc_tailcall : PointerLikeRegClass<4>;
423 // Special i32mem for addresses of load folding tail calls. These are not
424 // allowed to use callee-saved registers since they must be scheduled
425 // after callee-saved register are popped.
426 def i32mem_TC : Operand<i32> {
427 let PrintMethod = "printi32mem";
428 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
429 i32imm, SEGMENT_REG);
430 let ParserMatchClass = X86Mem32AsmOperand;
431 let OperandType = "OPERAND_MEMORY";
434 // Special i64mem for addresses of load folding tail calls. These are not
435 // allowed to use callee-saved registers since they must be scheduled
436 // after callee-saved register are popped.
437 def i64mem_TC : Operand<i64> {
438 let PrintMethod = "printi64mem";
439 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
440 ptr_rc_tailcall, i32imm, SEGMENT_REG);
441 let ParserMatchClass = X86Mem64AsmOperand;
442 let OperandType = "OPERAND_MEMORY";
445 let OperandType = "OPERAND_PCREL",
446 ParserMatchClass = X86AbsMemAsmOperand,
447 PrintMethod = "printPCRelImm" in {
448 def i32imm_pcrel : Operand<i32>;
449 def i16imm_pcrel : Operand<i16>;
451 // Branch targets have OtherVT type and print as pc-relative values.
452 def brtarget : Operand<OtherVT>;
453 def brtarget8 : Operand<OtherVT>;
457 // Special parser to detect 16-bit mode to select 16-bit displacement.
458 def X86AbsMem16AsmOperand : AsmOperandClass {
459 let Name = "AbsMem16";
460 let RenderMethod = "addAbsMemOperands";
461 let SuperClasses = [X86AbsMemAsmOperand];
464 // Branch targets have OtherVT type and print as pc-relative values.
465 let OperandType = "OPERAND_PCREL",
466 PrintMethod = "printPCRelImm" in {
467 let ParserMatchClass = X86AbsMem16AsmOperand in
468 def brtarget16 : Operand<OtherVT>;
469 let ParserMatchClass = X86AbsMemAsmOperand in
470 def brtarget32 : Operand<OtherVT>;
473 let RenderMethod = "addSrcIdxOperands" in {
474 def X86SrcIdx8Operand : AsmOperandClass {
475 let Name = "SrcIdx8";
476 let SuperClasses = [X86Mem8AsmOperand];
478 def X86SrcIdx16Operand : AsmOperandClass {
479 let Name = "SrcIdx16";
480 let SuperClasses = [X86Mem16AsmOperand];
482 def X86SrcIdx32Operand : AsmOperandClass {
483 let Name = "SrcIdx32";
484 let SuperClasses = [X86Mem32AsmOperand];
486 def X86SrcIdx64Operand : AsmOperandClass {
487 let Name = "SrcIdx64";
488 let SuperClasses = [X86Mem64AsmOperand];
490 } // RenderMethod = "addSrcIdxOperands"
492 let RenderMethod = "addDstIdxOperands" in {
493 def X86DstIdx8Operand : AsmOperandClass {
494 let Name = "DstIdx8";
495 let SuperClasses = [X86Mem8AsmOperand];
497 def X86DstIdx16Operand : AsmOperandClass {
498 let Name = "DstIdx16";
499 let SuperClasses = [X86Mem16AsmOperand];
501 def X86DstIdx32Operand : AsmOperandClass {
502 let Name = "DstIdx32";
503 let SuperClasses = [X86Mem32AsmOperand];
505 def X86DstIdx64Operand : AsmOperandClass {
506 let Name = "DstIdx64";
507 let SuperClasses = [X86Mem64AsmOperand];
509 } // RenderMethod = "addDstIdxOperands"
511 let RenderMethod = "addMemOffsOperands" in {
512 def X86MemOffs16_8AsmOperand : AsmOperandClass {
513 let Name = "MemOffs16_8";
514 let SuperClasses = [X86Mem8AsmOperand];
516 def X86MemOffs16_16AsmOperand : AsmOperandClass {
517 let Name = "MemOffs16_16";
518 let SuperClasses = [X86Mem16AsmOperand];
520 def X86MemOffs16_32AsmOperand : AsmOperandClass {
521 let Name = "MemOffs16_32";
522 let SuperClasses = [X86Mem32AsmOperand];
524 def X86MemOffs32_8AsmOperand : AsmOperandClass {
525 let Name = "MemOffs32_8";
526 let SuperClasses = [X86Mem8AsmOperand];
528 def X86MemOffs32_16AsmOperand : AsmOperandClass {
529 let Name = "MemOffs32_16";
530 let SuperClasses = [X86Mem16AsmOperand];
532 def X86MemOffs32_32AsmOperand : AsmOperandClass {
533 let Name = "MemOffs32_32";
534 let SuperClasses = [X86Mem32AsmOperand];
536 def X86MemOffs32_64AsmOperand : AsmOperandClass {
537 let Name = "MemOffs32_64";
538 let SuperClasses = [X86Mem64AsmOperand];
540 def X86MemOffs64_8AsmOperand : AsmOperandClass {
541 let Name = "MemOffs64_8";
542 let SuperClasses = [X86Mem8AsmOperand];
544 def X86MemOffs64_16AsmOperand : AsmOperandClass {
545 let Name = "MemOffs64_16";
546 let SuperClasses = [X86Mem16AsmOperand];
548 def X86MemOffs64_32AsmOperand : AsmOperandClass {
549 let Name = "MemOffs64_32";
550 let SuperClasses = [X86Mem32AsmOperand];
552 def X86MemOffs64_64AsmOperand : AsmOperandClass {
553 let Name = "MemOffs64_64";
554 let SuperClasses = [X86Mem64AsmOperand];
556 } // RenderMethod = "addMemOffsOperands"
558 class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
559 : X86MemOperand<printMethod, parserMatchClass> {
560 let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
563 class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
564 : X86MemOperand<printMethod, parserMatchClass> {
565 let MIOperandInfo = (ops ptr_rc);
568 def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>;
569 def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>;
570 def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>;
571 def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>;
572 def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>;
573 def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>;
574 def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>;
575 def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
577 class X86MemOffsOperand<Operand immOperand, string printMethod,
578 AsmOperandClass parserMatchClass>
579 : X86MemOperand<printMethod, parserMatchClass> {
580 let MIOperandInfo = (ops immOperand, SEGMENT_REG);
583 def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
584 X86MemOffs16_8AsmOperand>;
585 def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16",
586 X86MemOffs16_16AsmOperand>;
587 def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32",
588 X86MemOffs16_32AsmOperand>;
589 def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8",
590 X86MemOffs32_8AsmOperand>;
591 def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16",
592 X86MemOffs32_16AsmOperand>;
593 def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32",
594 X86MemOffs32_32AsmOperand>;
595 def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64",
596 X86MemOffs32_64AsmOperand>;
597 def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8",
598 X86MemOffs64_8AsmOperand>;
599 def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16",
600 X86MemOffs64_16AsmOperand>;
601 def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32",
602 X86MemOffs64_32AsmOperand>;
603 def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64",
604 X86MemOffs64_64AsmOperand>;
606 def SSECC : Operand<i8> {
607 let PrintMethod = "printSSEAVXCC";
608 let OperandType = "OPERAND_IMMEDIATE";
611 def AVXCC : Operand<i8> {
612 let PrintMethod = "printSSEAVXCC";
613 let OperandType = "OPERAND_IMMEDIATE";
616 def AVX512ICC : Operand<i8> {
617 let PrintMethod = "printSSEAVXCC";
618 let OperandType = "OPERAND_IMMEDIATE";
621 def XOPCC : Operand<i8> {
622 let PrintMethod = "printXOPCC";
623 let OperandType = "OPERAND_IMMEDIATE";
626 class ImmSExtAsmOperandClass : AsmOperandClass {
627 let SuperClasses = [ImmAsmOperand];
628 let RenderMethod = "addImmOperands";
631 def X86GR32orGR64AsmOperand : AsmOperandClass {
632 let Name = "GR32orGR64";
635 def GR32orGR64 : RegisterOperand<GR32> {
636 let ParserMatchClass = X86GR32orGR64AsmOperand;
638 def AVX512RCOperand : AsmOperandClass {
639 let Name = "AVX512RC";
641 def AVX512RC : Operand<i32> {
642 let PrintMethod = "printRoundingControl";
643 let OperandType = "OPERAND_IMMEDIATE";
644 let ParserMatchClass = AVX512RCOperand;
647 // Sign-extended immediate classes. We don't need to define the full lattice
648 // here because there is no instruction with an ambiguity between ImmSExti64i32
651 // The strange ranges come from the fact that the assembler always works with
652 // 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
653 // (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
656 // [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
657 def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
658 let Name = "ImmSExti64i32";
661 // [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
662 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
663 def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
664 let Name = "ImmSExti16i8";
665 let SuperClasses = [ImmSExti64i32AsmOperand];
668 // [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
669 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
670 def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
671 let Name = "ImmSExti32i8";
675 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
676 def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
677 let Name = "ImmSExti64i8";
678 let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
679 ImmSExti64i32AsmOperand];
682 // Unsigned immediate used by SSE/AVX instructions
684 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
685 def ImmUnsignedi8AsmOperand : AsmOperandClass {
686 let Name = "ImmUnsignedi8";
687 let RenderMethod = "addImmOperands";
690 // A couple of more descriptive operand definitions.
691 // 16-bits but only 8 bits are significant.
692 def i16i8imm : Operand<i16> {
693 let ParserMatchClass = ImmSExti16i8AsmOperand;
694 let OperandType = "OPERAND_IMMEDIATE";
696 // 32-bits but only 8 bits are significant.
697 def i32i8imm : Operand<i32> {
698 let ParserMatchClass = ImmSExti32i8AsmOperand;
699 let OperandType = "OPERAND_IMMEDIATE";
702 // 64-bits but only 32 bits are significant.
703 def i64i32imm : Operand<i64> {
704 let ParserMatchClass = ImmSExti64i32AsmOperand;
705 let OperandType = "OPERAND_IMMEDIATE";
708 // 64-bits but only 8 bits are significant.
709 def i64i8imm : Operand<i64> {
710 let ParserMatchClass = ImmSExti64i8AsmOperand;
711 let OperandType = "OPERAND_IMMEDIATE";
714 // Unsigned 8-bit immediate used by SSE/AVX instructions.
715 def u8imm : Operand<i8> {
716 let PrintMethod = "printU8Imm";
717 let ParserMatchClass = ImmUnsignedi8AsmOperand;
718 let OperandType = "OPERAND_IMMEDIATE";
721 // 32-bit immediate but only 8-bits are significant and they are unsigned.
722 // Used by some SSE/AVX instructions that use intrinsics.
723 def i32u8imm : Operand<i32> {
724 let PrintMethod = "printU8Imm";
725 let ParserMatchClass = ImmUnsignedi8AsmOperand;
726 let OperandType = "OPERAND_IMMEDIATE";
729 // 64-bits but only 32 bits are significant, and those bits are treated as being
731 def i64i32imm_pcrel : Operand<i64> {
732 let PrintMethod = "printPCRelImm";
733 let ParserMatchClass = X86AbsMemAsmOperand;
734 let OperandType = "OPERAND_PCREL";
737 def lea64_32mem : Operand<i32> {
738 let PrintMethod = "printanymem";
739 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
740 let ParserMatchClass = X86MemAsmOperand;
743 // Memory operands that use 64-bit pointers in both ILP32 and LP64.
744 def lea64mem : Operand<i64> {
745 let PrintMethod = "printanymem";
746 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
747 let ParserMatchClass = X86MemAsmOperand;
751 //===----------------------------------------------------------------------===//
752 // X86 Complex Pattern Definitions.
755 // Define X86-specific addressing mode.
756 def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>;
757 def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr",
758 [add, sub, mul, X86mul_imm, shl, or, frameindex],
760 // In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
761 def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr",
762 [add, sub, mul, X86mul_imm, shl, or,
763 frameindex, X86WrapperRIP],
766 def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
767 [tglobaltlsaddr], []>;
769 def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
770 [tglobaltlsaddr], []>;
772 def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr",
773 [add, sub, mul, X86mul_imm, shl, or, frameindex,
776 def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
777 [tglobaltlsaddr], []>;
779 def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
780 [tglobaltlsaddr], []>;
782 def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>;
784 // A relocatable immediate is either an immediate operand or an operand that can
785 // be relocated by the linker to an immediate, such as a regular symbol in
787 def relocImm : ComplexPattern<iAny, 1, "selectRelocImm", [imm, X86Wrapper], [],
790 //===----------------------------------------------------------------------===//
791 // X86 Instruction Predicate Definitions.
792 def TruePredicate : Predicate<"true">;
794 def HasCMov : Predicate<"Subtarget->hasCMov()">;
795 def NoCMov : Predicate<"!Subtarget->hasCMov()">;
797 def HasMMX : Predicate<"Subtarget->hasMMX()">;
798 def Has3DNow : Predicate<"Subtarget->has3DNow()">;
799 def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
800 def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
801 def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
802 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
803 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
804 def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
805 def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
806 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
807 def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
808 def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
809 def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">;
810 def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
811 def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
812 def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
813 def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
814 def NoAVX : Predicate<"!Subtarget->hasAVX()">;
815 def HasAVX : Predicate<"Subtarget->hasAVX()">;
816 def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
817 def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
818 def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
819 def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
820 def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
821 def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
822 def HasCDI : Predicate<"Subtarget->hasCDI()">;
823 def HasVPOPCNTDQ : Predicate<"Subtarget->hasVPOPCNTDQ()">;
824 def HasPFI : Predicate<"Subtarget->hasPFI()">;
825 def HasERI : Predicate<"Subtarget->hasERI()">;
826 def HasDQI : Predicate<"Subtarget->hasDQI()">;
827 def NoDQI : Predicate<"!Subtarget->hasDQI()">;
828 def HasBWI : Predicate<"Subtarget->hasBWI()">;
829 def NoBWI : Predicate<"!Subtarget->hasBWI()">;
830 def HasVLX : Predicate<"Subtarget->hasVLX()">;
831 def NoVLX : Predicate<"!Subtarget->hasVLX()">;
832 def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
833 def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
834 def PKU : Predicate<"Subtarget->hasPKU()">;
835 def HasVNNI : Predicate<"Subtarget->hasVNNI()">;
837 def HasBITALG : Predicate<"Subtarget->hasBITALG()">;
838 def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
839 def HasAES : Predicate<"Subtarget->hasAES()">;
840 def HasVAES : Predicate<"Subtarget->hasVAES()">;
841 def NoVLX_Or_NoVAES : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVAES()">;
842 def HasFXSR : Predicate<"Subtarget->hasFXSR()">;
843 def HasXSAVE : Predicate<"Subtarget->hasXSAVE()">;
844 def HasXSAVEOPT : Predicate<"Subtarget->hasXSAVEOPT()">;
845 def HasXSAVEC : Predicate<"Subtarget->hasXSAVEC()">;
846 def HasXSAVES : Predicate<"Subtarget->hasXSAVES()">;
847 def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
848 def NoVLX_Or_NoVPCLMULQDQ :
849 Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">;
850 def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">;
851 def HasGFNI : Predicate<"Subtarget->hasGFNI()">;
852 def HasFMA : Predicate<"Subtarget->hasFMA()">;
853 def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
854 def NoFMA4 : Predicate<"!Subtarget->hasFMA4()">;
855 def HasXOP : Predicate<"Subtarget->hasXOP()">;
856 def HasTBM : Predicate<"Subtarget->hasTBM()">;
857 def NoTBM : Predicate<"!Subtarget->hasTBM()">;
858 def HasLWP : Predicate<"Subtarget->hasLWP()">;
859 def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
860 def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
861 def HasF16C : Predicate<"Subtarget->hasF16C()">;
862 def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
863 def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
864 def HasBMI : Predicate<"Subtarget->hasBMI()">;
865 def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
866 def NoBMI2 : Predicate<"!Subtarget->hasBMI2()">;
867 def HasVBMI : Predicate<"Subtarget->hasVBMI()">;
868 def HasVBMI2 : Predicate<"Subtarget->hasVBMI2()">;
869 def HasIFMA : Predicate<"Subtarget->hasIFMA()">;
870 def HasRTM : Predicate<"Subtarget->hasRTM()">;
871 def HasADX : Predicate<"Subtarget->hasADX()">;
872 def HasSHA : Predicate<"Subtarget->hasSHA()">;
873 def HasSGX : Predicate<"Subtarget->hasSGX()">;
874 def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">;
875 def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
876 def HasSSEPrefetch : Predicate<"Subtarget->hasSSEPrefetch()">;
877 def NoSSEPrefetch : Predicate<"!Subtarget->hasSSEPrefetch()">;
878 def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">;
879 def HasPREFETCHWT1 : Predicate<"Subtarget->hasPREFETCHWT1()">;
880 def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
881 def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
882 def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">;
883 def HasCLDEMOTE : Predicate<"Subtarget->hasCLDEMOTE()">;
884 def HasMOVDIRI : Predicate<"Subtarget->hasMOVDIRI()">;
885 def HasMOVDIR64B : Predicate<"Subtarget->hasMOVDIR64B()">;
886 def HasPTWRITE : Predicate<"Subtarget->hasPTWRITE()">;
887 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
888 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
889 def HasMPX : Predicate<"Subtarget->hasMPX()">;
890 def HasSHSTK : Predicate<"Subtarget->hasSHSTK()">;
891 def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">;
892 def HasCLWB : Predicate<"Subtarget->hasCLWB()">;
893 def HasWBNOINVD : Predicate<"Subtarget->hasWBNOINVD()">;
894 def HasRDPID : Predicate<"Subtarget->hasRDPID()">;
895 def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">;
896 def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">;
897 def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
898 def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">;
899 def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
900 AssemblerPredicate<"!Mode64Bit", "Not 64-bit mode">;
901 def In64BitMode : Predicate<"Subtarget->is64Bit()">,
902 AssemblerPredicate<"Mode64Bit", "64-bit mode">;
903 def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">;
904 def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">;
905 def In16BitMode : Predicate<"Subtarget->is16Bit()">,
906 AssemblerPredicate<"Mode16Bit", "16-bit mode">;
907 def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
908 AssemblerPredicate<"!Mode16Bit", "Not 16-bit mode">;
909 def In32BitMode : Predicate<"Subtarget->is32Bit()">,
910 AssemblerPredicate<"Mode32Bit", "32-bit mode">;
911 def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
912 def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
913 def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
914 "Subtarget->getFrameLowering()->hasFP(*MF)"> {
915 let RecomputePerFunction = 1;
917 def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
918 def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
919 def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
920 def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
921 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
922 def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
923 def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
924 "TM.getCodeModel() == CodeModel::Kernel">;
925 def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
927 // We could compute these on a per-module basis but doing so requires accessing
928 // the Function object through the <Target>Subtarget and objections were raised
929 // to that (see post-commit review comments for r301750).
930 let RecomputePerFunction = 1 in {
931 def OptForSize : Predicate<"MF->getFunction().optForSize()">;
932 def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">;
933 def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">;
934 def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
935 "MF->getFunction().optForSize()">;
936 def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().optForSize() || "
937 "!Subtarget->hasSSE41()">;
940 def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
941 def FavorMemIndirectCall : Predicate<"!Subtarget->slowTwoMemOps()">;
942 def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
943 def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
944 def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
945 def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
946 def HasMFence : Predicate<"Subtarget->hasMFence()">;
947 def UseRetpolineIndirectCalls : Predicate<"Subtarget->useRetpolineIndirectCalls()">;
948 def NotUseRetpolineIndirectCalls : Predicate<"!Subtarget->useRetpolineIndirectCalls()">;
950 //===----------------------------------------------------------------------===//
951 // X86 Instruction Format Definitions.
954 include "X86InstrFormats.td"
956 //===----------------------------------------------------------------------===//
957 // Pattern fragments.
960 // X86 specific condition code. These correspond to CondCode in
961 // X86InstrInfo.h. They must be kept in synch.
962 def X86_COND_A : PatLeaf<(i8 0)>; // alt. COND_NBE
963 def X86_COND_AE : PatLeaf<(i8 1)>; // alt. COND_NC
964 def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
965 def X86_COND_BE : PatLeaf<(i8 3)>; // alt. COND_NA
966 def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
967 def X86_COND_G : PatLeaf<(i8 5)>; // alt. COND_NLE
968 def X86_COND_GE : PatLeaf<(i8 6)>; // alt. COND_NL
969 def X86_COND_L : PatLeaf<(i8 7)>; // alt. COND_NGE
970 def X86_COND_LE : PatLeaf<(i8 8)>; // alt. COND_NG
971 def X86_COND_NE : PatLeaf<(i8 9)>; // alt. COND_NZ
972 def X86_COND_NO : PatLeaf<(i8 10)>;
973 def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
974 def X86_COND_NS : PatLeaf<(i8 12)>;
975 def X86_COND_O : PatLeaf<(i8 13)>;
976 def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
977 def X86_COND_S : PatLeaf<(i8 15)>;
979 def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
980 def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
981 def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
982 def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
984 // FIXME: Ideally we would just replace the above i*immSExt* matchers with
985 // relocImm-based matchers, but then FastISel would be unable to use them.
986 def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
987 return isSExtRelocImm<8>(N);
989 def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
990 return isSExtRelocImm<32>(N);
993 // If we have multiple users of an immediate, it's much smaller to reuse
994 // the register, rather than encode the immediate in every instruction.
995 // This has the risk of increasing register pressure from stretched live
996 // ranges, however, the immediates should be trivial to rematerialize by
997 // the RA in the event of high register pressure.
998 // TODO : This is currently enabled for stores and binary ops. There are more
999 // cases for which this can be enabled, though this catches the bulk of the
1001 // TODO2 : This should really also be enabled under O2, but there's currently
1002 // an issue with RA where we don't pull the constants into their users
1003 // when we rematerialize them. I'll follow-up on enabling O2 after we fix that
1005 // TODO3 : This is currently limited to single basic blocks (DAG creation
1006 // pulls block immediates to the top and merges them if necessary).
1007 // Eventually, it would be nice to allow ConstantHoisting to merge constants
1008 // globally for potentially added savings.
1010 def imm8_su : PatLeaf<(i8 relocImm), [{
1011 return !shouldAvoidImmediateInstFormsForSize(N);
1013 def imm16_su : PatLeaf<(i16 relocImm), [{
1014 return !shouldAvoidImmediateInstFormsForSize(N);
1016 def imm32_su : PatLeaf<(i32 relocImm), [{
1017 return !shouldAvoidImmediateInstFormsForSize(N);
1019 def i64immSExt32_su : PatLeaf<(i64immSExt32), [{
1020 return !shouldAvoidImmediateInstFormsForSize(N);
1023 def i16immSExt8_su : PatLeaf<(i16immSExt8), [{
1024 return !shouldAvoidImmediateInstFormsForSize(N);
1026 def i32immSExt8_su : PatLeaf<(i32immSExt8), [{
1027 return !shouldAvoidImmediateInstFormsForSize(N);
1029 def i64immSExt8_su : PatLeaf<(i64immSExt8), [{
1030 return !shouldAvoidImmediateInstFormsForSize(N);
1033 def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
1034 return !shouldAvoidImmediateInstFormsForSize(N);
1036 def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
1037 return !shouldAvoidImmediateInstFormsForSize(N);
1040 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
1042 def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
1044 def i64immZExt32SExt8 : ImmLeaf<i64, [{
1045 return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm));
1048 // Helper fragments for loads.
1050 // It's safe to fold a zextload/extload from i1 as a regular i8 load. The
1051 // upper bits are guaranteed to be zero and we were going to emit a MOV8rm
1052 // which might get folded during peephole anyway.
1053 def loadi8 : PatFrag<(ops node:$ptr), (i8 (unindexedload node:$ptr)), [{
1054 LoadSDNode *LD = cast<LoadSDNode>(N);
1055 ISD::LoadExtType ExtType = LD->getExtensionType();
1056 return ExtType == ISD::NON_EXTLOAD || ExtType == ISD::EXTLOAD ||
1057 ExtType == ISD::ZEXTLOAD;
1060 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
1061 // known to be 32-bit aligned or better. Ditto for i8 to i16.
1062 def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
1063 LoadSDNode *LD = cast<LoadSDNode>(N);
1064 ISD::LoadExtType ExtType = LD->getExtensionType();
1065 if (ExtType == ISD::NON_EXTLOAD)
1067 if (ExtType == ISD::EXTLOAD)
1068 return LD->getAlignment() >= 2 && !LD->isVolatile();
1072 def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
1073 LoadSDNode *LD = cast<LoadSDNode>(N);
1074 ISD::LoadExtType ExtType = LD->getExtensionType();
1075 if (ExtType == ISD::NON_EXTLOAD)
1077 if (ExtType == ISD::EXTLOAD)
1078 return LD->getAlignment() >= 4 && !LD->isVolatile();
1082 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
1083 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
1084 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
1085 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
1086 def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>;
1087 def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1088 LoadSDNode *Ld = cast<LoadSDNode>(N);
1089 return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1091 def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1092 LoadSDNode *Ld = cast<LoadSDNode>(N);
1093 return Subtarget->hasSSEUnalignedMem() ||
1094 Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1097 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
1098 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
1099 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
1100 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
1101 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
1102 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
1104 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
1105 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
1106 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
1107 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
1108 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
1109 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
1110 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
1111 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
1112 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
1113 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
1115 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
1116 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
1117 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
1118 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
1119 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
1120 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
1121 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
1122 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
1123 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
1124 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
1127 // An 'and' node with a single use.
1128 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
1129 return N->hasOneUse();
1131 // An 'srl' node with a single use.
1132 def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
1133 return N->hasOneUse();
1135 // An 'trunc' node with a single use.
1136 def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
1137 return N->hasOneUse();
1140 //===----------------------------------------------------------------------===//
1141 // Instruction list.
1145 let hasSideEffects = 0, SchedRW = [WriteNop] in {
1146 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
1147 def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
1148 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1149 def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
1150 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1151 def NOOPQ : RI<0x1f, MRMXm, (outs), (ins i64mem:$zero),
1152 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1153 Requires<[In64BitMode]>;
1154 // Also allow register so we can assemble/disassemble
1155 def NOOPWr : I<0x1f, MRMXr, (outs), (ins GR16:$zero),
1156 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1157 def NOOPLr : I<0x1f, MRMXr, (outs), (ins GR32:$zero),
1158 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1159 def NOOPQr : RI<0x1f, MRMXr, (outs), (ins GR64:$zero),
1160 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1161 Requires<[In64BitMode]>;
1165 // Constructing a stack frame.
1166 def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
1167 "enter\t$len, $lvl", []>, Sched<[WriteMicrocoded]>;
1169 let SchedRW = [WriteALU] in {
1170 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
1171 def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1172 Requires<[Not64BitMode]>;
1174 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
1175 def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1176 Requires<[In64BitMode]>;
1179 //===----------------------------------------------------------------------===//
1180 // Miscellaneous Instructions.
1183 let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1,
1184 SchedRW = [WriteSystem] in
1185 def Int_eh_sjlj_setup_dispatch
1186 : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
1188 let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
1189 let mayLoad = 1, SchedRW = [WriteLoad] in {
1190 def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1192 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1193 OpSize32, Requires<[Not64BitMode]>;
1194 // Long form for the disassembler.
1195 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1196 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1197 OpSize16, NotMemoryFoldable;
1198 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1199 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1200 } // isCodeGenOnly = 1, ForceDisassemble = 1
1201 } // mayLoad, SchedRW
1202 let mayStore = 1, mayLoad = 1, SchedRW = [WriteCopy] in {
1203 def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", []>,
1205 def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", []>,
1206 OpSize32, Requires<[Not64BitMode]>;
1207 } // mayStore, mayLoad, SchedRW
1209 let mayStore = 1, SchedRW = [WriteStore] in {
1210 def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1212 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1213 OpSize32, Requires<[Not64BitMode]>;
1214 // Long form for the disassembler.
1215 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1216 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1217 OpSize16, NotMemoryFoldable;
1218 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1219 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1220 } // isCodeGenOnly = 1, ForceDisassemble = 1
1222 def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
1223 "push{w}\t$imm", []>, OpSize16;
1224 def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
1225 "push{w}\t$imm", []>, OpSize16;
1227 def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
1228 "push{l}\t$imm", []>, OpSize32,
1229 Requires<[Not64BitMode]>;
1230 def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
1231 "push{l}\t$imm", []>, OpSize32,
1232 Requires<[Not64BitMode]>;
1233 } // mayStore, SchedRW
1235 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1236 def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src", []>,
1238 def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src", []>,
1239 OpSize32, Requires<[Not64BitMode]>;
1240 } // mayLoad, mayStore, SchedRW
1244 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1245 SchedRW = [WriteRMW], Defs = [ESP] in {
1247 def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
1248 [(set GR32:$dst, (int_x86_flags_read_u32))]>,
1249 Requires<[Not64BitMode]>;
1252 def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
1253 [(set GR64:$dst, (int_x86_flags_read_u64))]>,
1254 Requires<[In64BitMode]>;
1257 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1258 SchedRW = [WriteRMW] in {
1259 let Defs = [ESP, EFLAGS, DF], Uses = [ESP] in
1260 def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
1261 [(int_x86_flags_write_u32 GR32:$src)]>,
1262 Requires<[Not64BitMode]>;
1264 let Defs = [RSP, EFLAGS, DF], Uses = [RSP] in
1265 def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
1266 [(int_x86_flags_write_u64 GR64:$src)]>,
1267 Requires<[In64BitMode]>;
1270 let Defs = [ESP, EFLAGS, DF], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
1271 SchedRW = [WriteLoad] in {
1272 def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize16;
1273 def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>, OpSize32,
1274 Requires<[Not64BitMode]>;
1277 let Defs = [ESP], Uses = [ESP, EFLAGS, DF], mayStore = 1, hasSideEffects=0,
1278 SchedRW = [WriteStore] in {
1279 def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize16;
1280 def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>, OpSize32,
1281 Requires<[Not64BitMode]>;
1284 let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
1285 let mayLoad = 1, SchedRW = [WriteLoad] in {
1286 def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1287 OpSize32, Requires<[In64BitMode]>;
1288 // Long form for the disassembler.
1289 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1290 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1291 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1292 } // isCodeGenOnly = 1, ForceDisassemble = 1
1293 } // mayLoad, SchedRW
1294 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
1295 def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", []>,
1296 OpSize32, Requires<[In64BitMode]>;
1297 let mayStore = 1, SchedRW = [WriteStore] in {
1298 def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1299 OpSize32, Requires<[In64BitMode]>;
1300 // Long form for the disassembler.
1301 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1302 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1303 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1304 } // isCodeGenOnly = 1, ForceDisassemble = 1
1305 } // mayStore, SchedRW
1306 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1307 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
1308 OpSize32, Requires<[In64BitMode]>;
1309 } // mayLoad, mayStore, SchedRW
1312 let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
1313 SchedRW = [WriteStore] in {
1314 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
1315 "push{q}\t$imm", []>, OpSize32,
1316 Requires<[In64BitMode]>;
1317 def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
1318 "push{q}\t$imm", []>, OpSize32,
1319 Requires<[In64BitMode]>;
1322 let Defs = [RSP, EFLAGS, DF], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
1323 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
1324 OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
1325 let Defs = [RSP], Uses = [RSP, EFLAGS, DF], mayStore = 1, hasSideEffects=0 in
1326 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
1327 OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
1329 let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
1330 mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
1331 def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", []>,
1332 OpSize32, Requires<[Not64BitMode]>;
1333 def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", []>,
1334 OpSize16, Requires<[Not64BitMode]>;
1336 let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
1337 mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
1338 def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", []>,
1339 OpSize32, Requires<[Not64BitMode]>;
1340 def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", []>,
1341 OpSize16, Requires<[Not64BitMode]>;
1344 let Constraints = "$src = $dst", SchedRW = [WriteBSWAP32] in {
1345 // This instruction is a consequence of BSWAP32r observing operand size. The
1346 // encoding is valid, but the behavior is undefined.
1347 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
1348 def BSWAP16r_BAD : I<0xC8, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
1349 "bswap{w}\t$dst", []>, OpSize16, TB;
1350 // GR32 = bswap GR32
1351 def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
1353 [(set GR32:$dst, (bswap GR32:$src))]>, OpSize32, TB;
1355 let SchedRW = [WriteBSWAP64] in
1356 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
1358 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
1359 } // Constraints = "$src = $dst", SchedRW
1361 // Bit scan instructions.
1362 let Defs = [EFLAGS] in {
1363 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1364 "bsf{w}\t{$src, $dst|$dst, $src}",
1365 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
1366 PS, OpSize16, Sched<[WriteBSF]>;
1367 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1368 "bsf{w}\t{$src, $dst|$dst, $src}",
1369 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
1370 PS, OpSize16, Sched<[WriteBSFLd]>;
1371 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1372 "bsf{l}\t{$src, $dst|$dst, $src}",
1373 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
1374 PS, OpSize32, Sched<[WriteBSF]>;
1375 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1376 "bsf{l}\t{$src, $dst|$dst, $src}",
1377 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
1378 PS, OpSize32, Sched<[WriteBSFLd]>;
1379 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1380 "bsf{q}\t{$src, $dst|$dst, $src}",
1381 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
1382 PS, Sched<[WriteBSF]>;
1383 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1384 "bsf{q}\t{$src, $dst|$dst, $src}",
1385 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
1386 PS, Sched<[WriteBSFLd]>;
1388 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1389 "bsr{w}\t{$src, $dst|$dst, $src}",
1390 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
1391 PS, OpSize16, Sched<[WriteBSR]>;
1392 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1393 "bsr{w}\t{$src, $dst|$dst, $src}",
1394 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
1395 PS, OpSize16, Sched<[WriteBSRLd]>;
1396 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1397 "bsr{l}\t{$src, $dst|$dst, $src}",
1398 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
1399 PS, OpSize32, Sched<[WriteBSR]>;
1400 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1401 "bsr{l}\t{$src, $dst|$dst, $src}",
1402 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
1403 PS, OpSize32, Sched<[WriteBSRLd]>;
1404 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1405 "bsr{q}\t{$src, $dst|$dst, $src}",
1406 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
1407 PS, Sched<[WriteBSR]>;
1408 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1409 "bsr{q}\t{$src, $dst|$dst, $src}",
1410 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
1411 PS, Sched<[WriteBSRLd]>;
1412 } // Defs = [EFLAGS]
1414 let SchedRW = [WriteMicrocoded] in {
1415 let Defs = [EDI,ESI], Uses = [EDI,ESI,DF] in {
1416 def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1417 "movsb\t{$src, $dst|$dst, $src}", []>;
1418 def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1419 "movsw\t{$src, $dst|$dst, $src}", []>, OpSize16;
1420 def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1421 "movs{l|d}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1422 def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1423 "movsq\t{$src, $dst|$dst, $src}", []>,
1424 Requires<[In64BitMode]>;
1427 let Defs = [EDI], Uses = [AL,EDI,DF] in
1428 def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
1429 "stosb\t{%al, $dst|$dst, al}", []>;
1430 let Defs = [EDI], Uses = [AX,EDI,DF] in
1431 def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
1432 "stosw\t{%ax, $dst|$dst, ax}", []>, OpSize16;
1433 let Defs = [EDI], Uses = [EAX,EDI,DF] in
1434 def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
1435 "stos{l|d}\t{%eax, $dst|$dst, eax}", []>, OpSize32;
1436 let Defs = [RDI], Uses = [RAX,RDI,DF] in
1437 def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
1438 "stosq\t{%rax, $dst|$dst, rax}", []>,
1439 Requires<[In64BitMode]>;
1441 let Defs = [EDI,EFLAGS], Uses = [AL,EDI,DF] in
1442 def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
1443 "scasb\t{$dst, %al|al, $dst}", []>;
1444 let Defs = [EDI,EFLAGS], Uses = [AX,EDI,DF] in
1445 def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
1446 "scasw\t{$dst, %ax|ax, $dst}", []>, OpSize16;
1447 let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,DF] in
1448 def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
1449 "scas{l|d}\t{$dst, %eax|eax, $dst}", []>, OpSize32;
1450 let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,DF] in
1451 def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
1452 "scasq\t{$dst, %rax|rax, $dst}", []>,
1453 Requires<[In64BitMode]>;
1455 let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,DF] in {
1456 def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1457 "cmpsb\t{$dst, $src|$src, $dst}", []>;
1458 def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1459 "cmpsw\t{$dst, $src|$src, $dst}", []>, OpSize16;
1460 def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1461 "cmps{l|d}\t{$dst, $src|$src, $dst}", []>, OpSize32;
1462 def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1463 "cmpsq\t{$dst, $src|$src, $dst}", []>,
1464 Requires<[In64BitMode]>;
1468 //===----------------------------------------------------------------------===//
1469 // Move Instructions.
1471 let SchedRW = [WriteMove] in {
1472 let hasSideEffects = 0, isMoveReg = 1 in {
1473 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
1474 "mov{b}\t{$src, $dst|$dst, $src}", []>;
1475 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1476 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
1477 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1478 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1479 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1480 "mov{q}\t{$src, $dst|$dst, $src}", []>;
1483 let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
1484 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
1485 "mov{b}\t{$src, $dst|$dst, $src}",
1486 [(set GR8:$dst, imm:$src)]>;
1487 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
1488 "mov{w}\t{$src, $dst|$dst, $src}",
1489 [(set GR16:$dst, imm:$src)]>, OpSize16;
1490 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
1491 "mov{l}\t{$src, $dst|$dst, $src}",
1492 [(set GR32:$dst, relocImm:$src)]>, OpSize32;
1493 def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
1494 "mov{q}\t{$src, $dst|$dst, $src}",
1495 [(set GR64:$dst, i64immSExt32:$src)]>;
1497 let isReMaterializable = 1, isMoveImm = 1 in {
1498 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
1499 "movabs{q}\t{$src, $dst|$dst, $src}",
1500 [(set GR64:$dst, relocImm:$src)]>;
1503 // Longer forms that use a ModR/M byte. Needed for disassembler
1504 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1505 def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
1506 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1507 FoldGenData<"MOV8ri">;
1508 def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
1509 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1510 FoldGenData<"MOV16ri">;
1511 def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
1512 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1513 FoldGenData<"MOV32ri">;
1517 let SchedRW = [WriteStore] in {
1518 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
1519 "mov{b}\t{$src, $dst|$dst, $src}",
1520 [(store (i8 imm8_su:$src), addr:$dst)]>;
1521 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
1522 "mov{w}\t{$src, $dst|$dst, $src}",
1523 [(store (i16 imm16_su:$src), addr:$dst)]>, OpSize16;
1524 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
1525 "mov{l}\t{$src, $dst|$dst, $src}",
1526 [(store (i32 imm32_su:$src), addr:$dst)]>, OpSize32;
1527 def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1528 "mov{q}\t{$src, $dst|$dst, $src}",
1529 [(store i64immSExt32_su:$src, addr:$dst)]>,
1530 Requires<[In64BitMode]>;
1533 let hasSideEffects = 0 in {
1535 /// Memory offset versions of moves. The immediate is an address mode sized
1536 /// offset from the segment base.
1537 let SchedRW = [WriteALU] in {
1538 let mayLoad = 1 in {
1540 def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
1541 "mov{b}\t{$src, %al|al, $src}", []>,
1544 def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
1545 "mov{w}\t{$src, %ax|ax, $src}", []>,
1548 def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
1549 "mov{l}\t{$src, %eax|eax, $src}", []>,
1552 def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
1553 "mov{q}\t{$src, %rax|rax, $src}", []>,
1557 def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
1558 "mov{b}\t{$src, %al|al, $src}", []>, AdSize16;
1560 def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
1561 "mov{w}\t{$src, %ax|ax, $src}", []>,
1564 def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
1565 "mov{l}\t{$src, %eax|eax, $src}", []>,
1568 let mayStore = 1 in {
1570 def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
1571 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize32;
1573 def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
1574 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1577 def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
1578 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1581 def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
1582 "mov{q}\t{%rax, $dst|$dst, rax}", []>,
1586 def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
1587 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize16;
1589 def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
1590 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1593 def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
1594 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1598 // These forms all have full 64-bit absolute addresses in their instructions
1599 // and use the movabs mnemonic to indicate this specific form.
1600 let mayLoad = 1 in {
1602 def MOV8ao64 : Ii64<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
1603 "movabs{b}\t{$src, %al|al, $src}", []>,
1606 def MOV16ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
1607 "movabs{w}\t{$src, %ax|ax, $src}", []>,
1610 def MOV32ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
1611 "movabs{l}\t{$src, %eax|eax, $src}", []>,
1614 def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
1615 "movabs{q}\t{$src, %rax|rax, $src}", []>,
1619 let mayStore = 1 in {
1621 def MOV8o64a : Ii64<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
1622 "movabs{b}\t{%al, $dst|$dst, al}", []>,
1625 def MOV16o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
1626 "movabs{w}\t{%ax, $dst|$dst, ax}", []>,
1629 def MOV32o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
1630 "movabs{l}\t{%eax, $dst|$dst, eax}", []>,
1633 def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
1634 "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
1638 } // hasSideEffects = 0
1640 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
1641 SchedRW = [WriteMove], isMoveReg = 1 in {
1642 def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
1643 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1644 FoldGenData<"MOV8rr">;
1645 def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1646 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1647 FoldGenData<"MOV16rr">;
1648 def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1649 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1650 FoldGenData<"MOV32rr">;
1651 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1652 "mov{q}\t{$src, $dst|$dst, $src}", []>,
1653 FoldGenData<"MOV64rr">;
1656 // Reversed version with ".s" suffix for GAS compatibility.
1657 def : InstAlias<"mov{b}.s\t{$src, $dst|$dst, $src}",
1658 (MOV8rr_REV GR8:$dst, GR8:$src), 0>;
1659 def : InstAlias<"mov{w}.s\t{$src, $dst|$dst, $src}",
1660 (MOV16rr_REV GR16:$dst, GR16:$src), 0>;
1661 def : InstAlias<"mov{l}.s\t{$src, $dst|$dst, $src}",
1662 (MOV32rr_REV GR32:$dst, GR32:$src), 0>;
1663 def : InstAlias<"mov{q}.s\t{$src, $dst|$dst, $src}",
1664 (MOV64rr_REV GR64:$dst, GR64:$src), 0>;
1665 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1666 (MOV8rr_REV GR8:$dst, GR8:$src), 0, "att">;
1667 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1668 (MOV16rr_REV GR16:$dst, GR16:$src), 0, "att">;
1669 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1670 (MOV32rr_REV GR32:$dst, GR32:$src), 0, "att">;
1671 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1672 (MOV64rr_REV GR64:$dst, GR64:$src), 0, "att">;
1674 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1675 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
1676 "mov{b}\t{$src, $dst|$dst, $src}",
1677 [(set GR8:$dst, (loadi8 addr:$src))]>;
1678 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1679 "mov{w}\t{$src, $dst|$dst, $src}",
1680 [(set GR16:$dst, (loadi16 addr:$src))]>, OpSize16;
1681 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1682 "mov{l}\t{$src, $dst|$dst, $src}",
1683 [(set GR32:$dst, (loadi32 addr:$src))]>, OpSize32;
1684 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1685 "mov{q}\t{$src, $dst|$dst, $src}",
1686 [(set GR64:$dst, (load addr:$src))]>;
1689 let SchedRW = [WriteStore] in {
1690 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
1691 "mov{b}\t{$src, $dst|$dst, $src}",
1692 [(store GR8:$src, addr:$dst)]>;
1693 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1694 "mov{w}\t{$src, $dst|$dst, $src}",
1695 [(store GR16:$src, addr:$dst)]>, OpSize16;
1696 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1697 "mov{l}\t{$src, $dst|$dst, $src}",
1698 [(store GR32:$src, addr:$dst)]>, OpSize32;
1699 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1700 "mov{q}\t{$src, $dst|$dst, $src}",
1701 [(store GR64:$src, addr:$dst)]>;
1704 // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
1705 // that they can be used for copying and storing h registers, which can't be
1706 // encoded when a REX prefix is present.
1707 let isCodeGenOnly = 1 in {
1708 let hasSideEffects = 0, isMoveReg = 1 in
1709 def MOV8rr_NOREX : I<0x88, MRMDestReg,
1710 (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
1711 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1713 let mayStore = 1, hasSideEffects = 0 in
1714 def MOV8mr_NOREX : I<0x88, MRMDestMem,
1715 (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
1716 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1717 Sched<[WriteStore]>;
1718 let mayLoad = 1, hasSideEffects = 0,
1719 canFoldAsLoad = 1, isReMaterializable = 1 in
1720 def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
1721 (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
1722 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1727 // Condition code ops, incl. set if equal/not equal/...
1728 let SchedRW = [WriteLAHFSAHF] in {
1729 let Defs = [EFLAGS], Uses = [AH] in
1730 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf",
1731 [(set EFLAGS, (X86sahf AH))]>,
1732 Requires<[HasLAHFSAHF]>;
1733 let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
1734 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>, // AH = flags
1735 Requires<[HasLAHFSAHF]>;
1738 //===----------------------------------------------------------------------===//
1739 // Bit tests instructions: BT, BTS, BTR, BTC.
1741 let Defs = [EFLAGS] in {
1742 let SchedRW = [WriteBitTest] in {
1743 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1744 "bt{w}\t{$src2, $src1|$src1, $src2}",
1745 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>,
1746 OpSize16, TB, NotMemoryFoldable;
1747 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1748 "bt{l}\t{$src2, $src1|$src1, $src2}",
1749 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>,
1750 OpSize32, TB, NotMemoryFoldable;
1751 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1752 "bt{q}\t{$src2, $src1|$src1, $src2}",
1753 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB,
1757 // Unlike with the register+register form, the memory+register form of the
1758 // bt instruction does not ignore the high bits of the index. From ISel's
1759 // perspective, this is pretty bizarre. Make these instructions disassembly
1760 // only for now. These instructions are also slow on modern CPUs so that's
1761 // another reason to avoid generating them.
1763 let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteBitTestRegLd] in {
1764 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1765 "bt{w}\t{$src2, $src1|$src1, $src2}",
1766 []>, OpSize16, TB, NotMemoryFoldable;
1767 def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1768 "bt{l}\t{$src2, $src1|$src1, $src2}",
1769 []>, OpSize32, TB, NotMemoryFoldable;
1770 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1771 "bt{q}\t{$src2, $src1|$src1, $src2}",
1772 []>, TB, NotMemoryFoldable;
1775 let SchedRW = [WriteBitTest] in {
1776 def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
1777 "bt{w}\t{$src2, $src1|$src1, $src2}",
1778 [(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))]>,
1780 def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
1781 "bt{l}\t{$src2, $src1|$src1, $src2}",
1782 [(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))]>,
1784 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1785 "bt{q}\t{$src2, $src1|$src1, $src2}",
1786 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
1789 // Note that these instructions aren't slow because that only applies when the
1790 // other operand is in a register. When it's an immediate, bt is still fast.
1791 let SchedRW = [WriteBitTestImmLd] in {
1792 def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1793 "bt{w}\t{$src2, $src1|$src1, $src2}",
1794 [(set EFLAGS, (X86bt (loadi16 addr:$src1),
1795 i16immSExt8:$src2))]>,
1797 def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1798 "bt{l}\t{$src2, $src1|$src1, $src2}",
1799 [(set EFLAGS, (X86bt (loadi32 addr:$src1),
1800 i32immSExt8:$src2))]>,
1802 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1803 "bt{q}\t{$src2, $src1|$src1, $src2}",
1804 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1805 i64immSExt8:$src2))]>, TB,
1806 Requires<[In64BitMode]>;
1809 let hasSideEffects = 0 in {
1810 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1811 def BTC16rr : I<0xBB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1812 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1813 OpSize16, TB, NotMemoryFoldable;
1814 def BTC32rr : I<0xBB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1815 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1816 OpSize32, TB, NotMemoryFoldable;
1817 def BTC64rr : RI<0xBB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1818 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1822 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
1823 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1824 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1825 OpSize16, TB, NotMemoryFoldable;
1826 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1827 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1828 OpSize32, TB, NotMemoryFoldable;
1829 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1830 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1834 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1835 def BTC16ri8 : Ii8<0xBA, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1836 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1837 def BTC32ri8 : Ii8<0xBA, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1838 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1839 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1840 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1843 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
1844 def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1845 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1846 def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1847 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1848 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1849 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1850 Requires<[In64BitMode]>;
1853 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1854 def BTR16rr : I<0xB3, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1855 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1856 OpSize16, TB, NotMemoryFoldable;
1857 def BTR32rr : I<0xB3, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1858 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1859 OpSize32, TB, NotMemoryFoldable;
1860 def BTR64rr : RI<0xB3, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1861 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1865 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
1866 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1867 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1868 OpSize16, TB, NotMemoryFoldable;
1869 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1870 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1871 OpSize32, TB, NotMemoryFoldable;
1872 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1873 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1877 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1878 def BTR16ri8 : Ii8<0xBA, MRM6r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1879 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1881 def BTR32ri8 : Ii8<0xBA, MRM6r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1882 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1884 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1885 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1888 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
1889 def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1890 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1892 def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1893 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1895 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1896 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1897 Requires<[In64BitMode]>;
1900 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1901 def BTS16rr : I<0xAB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1902 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
1903 OpSize16, TB, NotMemoryFoldable;
1904 def BTS32rr : I<0xAB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1905 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
1906 OpSize32, TB, NotMemoryFoldable;
1907 def BTS64rr : RI<0xAB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1908 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1912 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
1913 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1914 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
1915 OpSize16, TB, NotMemoryFoldable;
1916 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1917 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
1918 OpSize32, TB, NotMemoryFoldable;
1919 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1920 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1924 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1925 def BTS16ri8 : Ii8<0xBA, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1926 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1927 def BTS32ri8 : Ii8<0xBA, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1928 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1929 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1930 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1933 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
1934 def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1935 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1936 def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1937 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1938 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1939 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1940 Requires<[In64BitMode]>;
1942 } // hasSideEffects = 0
1943 } // Defs = [EFLAGS]
1946 //===----------------------------------------------------------------------===//
1950 // Atomic swap. These are just normal xchg instructions. But since a memory
1951 // operand is referenced, the atomicity is ensured.
1952 multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag> {
1953 let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in {
1954 def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
1955 (ins GR8:$val, i8mem:$ptr),
1956 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
1959 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
1960 def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
1961 (ins GR16:$val, i16mem:$ptr),
1962 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
1965 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
1967 def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
1968 (ins GR32:$val, i32mem:$ptr),
1969 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
1972 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
1974 def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
1975 (ins GR64:$val, i64mem:$ptr),
1976 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
1979 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
1983 defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">, NotMemoryFoldable;
1985 // Swap between registers.
1986 let SchedRW = [WriteXCHG] in {
1987 let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in {
1988 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2),
1989 (ins GR8:$src1, GR8:$src2),
1990 "xchg{b}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
1991 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2),
1992 (ins GR16:$src1, GR16:$src2),
1993 "xchg{w}\t{$src2, $src1|$src1, $src2}", []>,
1994 OpSize16, NotMemoryFoldable;
1995 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2),
1996 (ins GR32:$src1, GR32:$src2),
1997 "xchg{l}\t{$src2, $src1|$src1, $src2}", []>,
1998 OpSize32, NotMemoryFoldable;
1999 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2),
2000 (ins GR64:$src1 ,GR64:$src2),
2001 "xchg{q}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2004 // Swap between EAX and other registers.
2005 let Constraints = "$src = $dst", hasSideEffects = 0 in {
2006 let Uses = [AX], Defs = [AX] in
2007 def XCHG16ar : I<0x90, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
2008 "xchg{w}\t{$src, %ax|ax, $src}", []>, OpSize16;
2009 let Uses = [EAX], Defs = [EAX] in
2010 def XCHG32ar : I<0x90, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
2011 "xchg{l}\t{$src, %eax|eax, $src}", []>, OpSize32;
2012 let Uses = [RAX], Defs = [RAX] in
2013 def XCHG64ar : RI<0x90, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
2014 "xchg{q}\t{$src, %rax|rax, $src}", []>;
2018 let hasSideEffects = 0, Constraints = "$src1 = $dst1, $src2 = $dst2",
2019 Defs = [EFLAGS], SchedRW = [WriteXCHG] in {
2020 def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst1, GR8:$dst2),
2021 (ins GR8:$src1, GR8:$src2),
2022 "xadd{b}\t{$src2, $src1|$src1, $src2}", []>, TB;
2023 def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst1, GR16:$dst2),
2024 (ins GR16:$src1, GR16:$src2),
2025 "xadd{w}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize16;
2026 def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst1, GR32:$dst2),
2027 (ins GR32:$src1, GR32:$src2),
2028 "xadd{l}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize32;
2029 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst1, GR64:$dst2),
2030 (ins GR64:$src1, GR64:$src2),
2031 "xadd{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2034 let mayLoad = 1, mayStore = 1, hasSideEffects = 0, Constraints = "$val = $dst",
2035 Defs = [EFLAGS], SchedRW = [WriteALULd, WriteRMW] in {
2036 def XADD8rm : I<0xC0, MRMSrcMem, (outs GR8:$dst),
2037 (ins GR8:$val, i8mem:$ptr),
2038 "xadd{b}\t{$val, $ptr|$ptr, $val}", []>, TB;
2039 def XADD16rm : I<0xC1, MRMSrcMem, (outs GR16:$dst),
2040 (ins GR16:$val, i16mem:$ptr),
2041 "xadd{w}\t{$val, $ptr|$ptr, $val}", []>, TB,
2043 def XADD32rm : I<0xC1, MRMSrcMem, (outs GR32:$dst),
2044 (ins GR32:$val, i32mem:$ptr),
2045 "xadd{l}\t{$val, $ptr|$ptr, $val}", []>, TB,
2047 def XADD64rm : RI<0xC1, MRMSrcMem, (outs GR64:$dst),
2048 (ins GR64:$val, i64mem:$ptr),
2049 "xadd{q}\t{$val, $ptr|$ptr, $val}", []>, TB;
2053 let SchedRW = [WriteCMPXCHG], hasSideEffects = 0 in {
2054 let Defs = [AL, EFLAGS], Uses = [AL] in
2055 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
2056 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2058 let Defs = [AX, EFLAGS], Uses = [AX] in
2059 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2060 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2062 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2063 def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
2064 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2066 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2067 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
2068 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2070 } // SchedRW, hasSideEffects
2072 let SchedRW = [WriteCMPXCHGRMW], mayLoad = 1, mayStore = 1,
2073 hasSideEffects = 0 in {
2074 let Defs = [AL, EFLAGS], Uses = [AL] in
2075 def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
2076 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2078 let Defs = [AX, EFLAGS], Uses = [AX] in
2079 def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2080 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2082 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2083 def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2084 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2086 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2087 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2088 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2091 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
2092 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
2093 "cmpxchg8b\t$dst", []>, TB;
2095 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
2096 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
2097 "cmpxchg16b\t$dst", []>,
2098 TB, Requires<[HasCmpxchg16b, In64BitMode]>;
2099 } // SchedRW, mayLoad, mayStore, hasSideEffects
2102 // Lock instruction prefix
2103 let SchedRW = [WriteMicrocoded] in
2104 def LOCK_PREFIX : I<0xF0, RawFrm, (outs), (ins), "lock", []>;
2106 let SchedRW = [WriteNop] in {
2108 // Rex64 instruction prefix
2109 def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>,
2110 Requires<[In64BitMode]>;
2112 // Data16 instruction prefix
2113 def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>;
2116 // Repeat string operation instruction prefixes
2117 let Defs = [ECX], Uses = [ECX,DF], SchedRW = [WriteMicrocoded] in {
2118 // Repeat (used with INS, OUTS, MOVS, LODS and STOS)
2119 def REP_PREFIX : I<0xF3, RawFrm, (outs), (ins), "rep", []>;
2120 // Repeat while not equal (used with CMPS and SCAS)
2121 def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>;
2124 // String manipulation instructions
2125 let SchedRW = [WriteMicrocoded] in {
2126 let Defs = [AL,ESI], Uses = [ESI,DF] in
2127 def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
2128 "lodsb\t{$src, %al|al, $src}", []>;
2129 let Defs = [AX,ESI], Uses = [ESI,DF] in
2130 def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
2131 "lodsw\t{$src, %ax|ax, $src}", []>, OpSize16;
2132 let Defs = [EAX,ESI], Uses = [ESI,DF] in
2133 def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
2134 "lods{l|d}\t{$src, %eax|eax, $src}", []>, OpSize32;
2135 let Defs = [RAX,ESI], Uses = [ESI,DF] in
2136 def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
2137 "lodsq\t{$src, %rax|rax, $src}", []>,
2138 Requires<[In64BitMode]>;
2141 let SchedRW = [WriteSystem] in {
2142 let Defs = [ESI], Uses = [DX,ESI,DF] in {
2143 def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
2144 "outsb\t{$src, %dx|dx, $src}", []>;
2145 def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
2146 "outsw\t{$src, %dx|dx, $src}", []>, OpSize16;
2147 def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
2148 "outs{l|d}\t{$src, %dx|dx, $src}", []>, OpSize32;
2151 let Defs = [EDI], Uses = [DX,EDI,DF] in {
2152 def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
2153 "insb\t{%dx, $dst|$dst, dx}", []>;
2154 def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
2155 "insw\t{%dx, $dst|$dst, dx}", []>, OpSize16;
2156 def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
2157 "ins{l|d}\t{%dx, $dst|$dst, dx}", []>, OpSize32;
2161 // EFLAGS management instructions.
2162 let SchedRW = [WriteALU], Defs = [EFLAGS], Uses = [EFLAGS] in {
2163 def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
2164 def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
2165 def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
2168 // DF management instructions.
2169 let SchedRW = [WriteALU], Defs = [DF] in {
2170 def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
2171 def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
2174 // Table lookup instructions
2175 let Uses = [AL,EBX], Defs = [AL], hasSideEffects = 0, mayLoad = 1 in
2176 def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>, Sched<[WriteLoad]>;
2178 let SchedRW = [WriteMicrocoded] in {
2179 // ASCII Adjust After Addition
2180 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2181 def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>,
2182 Requires<[Not64BitMode]>;
2184 // ASCII Adjust AX Before Division
2185 let Uses = [AX], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2186 def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
2187 "aad\t$src", []>, Requires<[Not64BitMode]>;
2189 // ASCII Adjust AX After Multiply
2190 let Uses = [AL], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2191 def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
2192 "aam\t$src", []>, Requires<[Not64BitMode]>;
2194 // ASCII Adjust AL After Subtraction - sets
2195 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2196 def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>,
2197 Requires<[Not64BitMode]>;
2199 // Decimal Adjust AL after Addition
2200 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2201 def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>,
2202 Requires<[Not64BitMode]>;
2204 // Decimal Adjust AL after Subtraction
2205 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2206 def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>,
2207 Requires<[Not64BitMode]>;
2210 let SchedRW = [WriteSystem] in {
2211 // Check Array Index Against Bounds
2212 // Note: "bound" does not have reversed operands in at&t syntax.
2213 def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2214 "bound\t$dst, $src", []>, OpSize16,
2215 Requires<[Not64BitMode]>;
2216 def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2217 "bound\t$dst, $src", []>, OpSize32,
2218 Requires<[Not64BitMode]>;
2220 // Adjust RPL Field of Segment Selector
2221 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2222 "arpl\t{$src, $dst|$dst, $src}", []>,
2223 Requires<[Not64BitMode]>, NotMemoryFoldable;
2225 def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2226 "arpl\t{$src, $dst|$dst, $src}", []>,
2227 Requires<[Not64BitMode]>, NotMemoryFoldable;
2230 //===----------------------------------------------------------------------===//
2231 // MOVBE Instructions
2233 let Predicates = [HasMOVBE] in {
2234 let SchedRW = [WriteALULd] in {
2235 def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2236 "movbe{w}\t{$src, $dst|$dst, $src}",
2237 [(set GR16:$dst, (bswap (loadi16 addr:$src)))]>,
2239 def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2240 "movbe{l}\t{$src, $dst|$dst, $src}",
2241 [(set GR32:$dst, (bswap (loadi32 addr:$src)))]>,
2243 def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2244 "movbe{q}\t{$src, $dst|$dst, $src}",
2245 [(set GR64:$dst, (bswap (loadi64 addr:$src)))]>,
2248 let SchedRW = [WriteStore] in {
2249 def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2250 "movbe{w}\t{$src, $dst|$dst, $src}",
2251 [(store (bswap GR16:$src), addr:$dst)]>,
2253 def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2254 "movbe{l}\t{$src, $dst|$dst, $src}",
2255 [(store (bswap GR32:$src), addr:$dst)]>,
2257 def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2258 "movbe{q}\t{$src, $dst|$dst, $src}",
2259 [(store (bswap GR64:$src), addr:$dst)]>,
2264 //===----------------------------------------------------------------------===//
2265 // RDRAND Instruction
2267 let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2268 def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
2269 "rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
2271 def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
2272 "rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
2274 def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
2275 "rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
2279 //===----------------------------------------------------------------------===//
2280 // RDSEED Instruction
2282 let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2283 def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
2284 [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, PS;
2285 def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
2286 [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, PS;
2287 def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
2288 [(set GR64:$dst, EFLAGS, (X86rdseed))]>, PS;
2291 //===----------------------------------------------------------------------===//
2292 // LZCNT Instruction
2294 let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
2295 def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2296 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2297 [(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>,
2298 XS, OpSize16, Sched<[WriteLZCNT]>;
2299 def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2300 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2301 [(set GR16:$dst, (ctlz (loadi16 addr:$src))),
2302 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteLZCNTLd]>;
2304 def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2305 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2306 [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>,
2307 XS, OpSize32, Sched<[WriteLZCNT]>;
2308 def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2309 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2310 [(set GR32:$dst, (ctlz (loadi32 addr:$src))),
2311 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteLZCNTLd]>;
2313 def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2314 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2315 [(set GR64:$dst, (ctlz GR64:$src)), (implicit EFLAGS)]>,
2316 XS, Sched<[WriteLZCNT]>;
2317 def LZCNT64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2318 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2319 [(set GR64:$dst, (ctlz (loadi64 addr:$src))),
2320 (implicit EFLAGS)]>, XS, Sched<[WriteLZCNTLd]>;
2323 //===----------------------------------------------------------------------===//
2326 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2327 def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2328 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2329 [(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>,
2330 XS, OpSize16, Sched<[WriteTZCNT]>;
2331 def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2332 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2333 [(set GR16:$dst, (cttz (loadi16 addr:$src))),
2334 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteTZCNTLd]>;
2336 def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2337 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2338 [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>,
2339 XS, OpSize32, Sched<[WriteTZCNT]>;
2340 def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2341 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2342 [(set GR32:$dst, (cttz (loadi32 addr:$src))),
2343 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteTZCNTLd]>;
2345 def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2346 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2347 [(set GR64:$dst, (cttz GR64:$src)), (implicit EFLAGS)]>,
2348 XS, Sched<[WriteTZCNT]>;
2349 def TZCNT64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2350 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2351 [(set GR64:$dst, (cttz (loadi64 addr:$src))),
2352 (implicit EFLAGS)]>, XS, Sched<[WriteTZCNTLd]>;
2355 multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
2356 RegisterClass RC, X86MemOperand x86memop> {
2357 let hasSideEffects = 0 in {
2358 def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
2359 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2360 T8PS, VEX_4V, Sched<[WriteBLS]>;
2362 def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
2363 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2364 T8PS, VEX_4V, Sched<[WriteBLS.Folded]>;
2368 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2369 defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem>;
2370 defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem>, VEX_W;
2371 defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem>;
2372 defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem>, VEX_W;
2373 defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem>;
2374 defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem>, VEX_W;
2377 //===----------------------------------------------------------------------===//
2378 // Pattern fragments to auto generate BMI instructions.
2379 //===----------------------------------------------------------------------===//
2381 def or_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2382 (X86or_flag node:$lhs, node:$rhs), [{
2383 return hasNoCarryFlagUses(SDValue(N, 1));
2386 def xor_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2387 (X86xor_flag node:$lhs, node:$rhs), [{
2388 return hasNoCarryFlagUses(SDValue(N, 1));
2391 let Predicates = [HasBMI] in {
2392 // FIXME: patterns for the load versions are not implemented
2393 def : Pat<(and GR32:$src, (add GR32:$src, -1)),
2394 (BLSR32rr GR32:$src)>;
2395 def : Pat<(and GR64:$src, (add GR64:$src, -1)),
2396 (BLSR64rr GR64:$src)>;
2398 def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
2399 (BLSMSK32rr GR32:$src)>;
2400 def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
2401 (BLSMSK64rr GR64:$src)>;
2403 def : Pat<(and GR32:$src, (ineg GR32:$src)),
2404 (BLSI32rr GR32:$src)>;
2405 def : Pat<(and GR64:$src, (ineg GR64:$src)),
2406 (BLSI64rr GR64:$src)>;
2408 // Versions to match flag producing ops.
2409 // X86and_flag nodes are rarely created. Those should use CMP+AND. We do
2410 // TESTrr matching in PostProcessISelDAG to allow BLSR/BLSI to be formed.
2411 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, -1)),
2412 (BLSMSK32rr GR32:$src)>;
2413 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, -1)),
2414 (BLSMSK64rr GR64:$src)>;
2417 multiclass bmi_bextr<bits<8> opc, string mnemonic, RegisterClass RC,
2418 X86MemOperand x86memop, SDNode OpNode,
2419 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2420 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2421 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2422 [(set RC:$dst, (OpNode RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2423 T8PS, VEX, Sched<[Sched]>;
2424 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2425 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2426 [(set RC:$dst, (OpNode (ld_frag addr:$src1), RC:$src2)),
2427 (implicit EFLAGS)]>, T8PS, VEX,
2428 Sched<[Sched.Folded,
2430 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2433 Sched.ReadAfterFold]>;
2436 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2437 defm BEXTR32 : bmi_bextr<0xF7, "bextr{l}", GR32, i32mem,
2438 X86bextr, loadi32, WriteBEXTR>;
2439 defm BEXTR64 : bmi_bextr<0xF7, "bextr{q}", GR64, i64mem,
2440 X86bextr, loadi64, WriteBEXTR>, VEX_W;
2443 multiclass bmi_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
2444 X86MemOperand x86memop, Intrinsic Int,
2445 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2446 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2447 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2448 [(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2449 T8PS, VEX, Sched<[Sched]>;
2450 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2451 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2452 [(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
2453 (implicit EFLAGS)]>, T8PS, VEX,
2454 Sched<[Sched.Folded,
2456 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2459 Sched.ReadAfterFold]>;
2462 let Predicates = [HasBMI2], Defs = [EFLAGS] in {
2463 defm BZHI32 : bmi_bzhi<0xF5, "bzhi{l}", GR32, i32mem,
2464 X86bzhi, loadi32, WriteBZHI>;
2465 defm BZHI64 : bmi_bzhi<0xF5, "bzhi{q}", GR64, i64mem,
2466 X86bzhi, loadi64, WriteBZHI>, VEX_W;
2469 def CountTrailingOnes : SDNodeXForm<imm, [{
2470 // Count the trailing ones in the immediate.
2471 return getI8Imm(countTrailingOnes(N->getZExtValue()), SDLoc(N));
2474 def BEXTRMaskXForm : SDNodeXForm<imm, [{
2475 unsigned Length = countTrailingOnes(N->getZExtValue());
2476 return getI32Imm(Length << 8, SDLoc(N));
2479 def AndMask64 : ImmLeaf<i64, [{
2480 return isMask_64(Imm) && !isUInt<32>(Imm);
2483 // Use BEXTR for 64-bit 'and' with large immediate 'mask'.
2484 let Predicates = [HasBMI, NoBMI2, NoTBM] in {
2485 def : Pat<(and GR64:$src, AndMask64:$mask),
2486 (BEXTR64rr GR64:$src,
2487 (SUBREG_TO_REG (i64 0),
2488 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2489 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2490 (BEXTR64rm addr:$src,
2491 (SUBREG_TO_REG (i64 0),
2492 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2495 // Use BZHI for 64-bit 'and' with large immediate 'mask'.
2496 let Predicates = [HasBMI2, NoTBM] in {
2497 def : Pat<(and GR64:$src, AndMask64:$mask),
2498 (BZHI64rr GR64:$src,
2499 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2500 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2501 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2502 (BZHI64rm addr:$src,
2503 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2504 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2507 multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
2508 X86MemOperand x86memop, Intrinsic Int,
2510 def rr : I<0xF5, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2511 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2512 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>,
2513 VEX_4V, Sched<[WriteALU]>;
2514 def rm : I<0xF5, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2515 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2516 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))]>,
2517 VEX_4V, Sched<[WriteALU.Folded, WriteALU.ReadAfterFold]>;
2520 let Predicates = [HasBMI2] in {
2521 defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
2522 int_x86_bmi_pdep_32, loadi32>, T8XD;
2523 defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
2524 int_x86_bmi_pdep_64, loadi64>, T8XD, VEX_W;
2525 defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
2526 int_x86_bmi_pext_32, loadi32>, T8XS;
2527 defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
2528 int_x86_bmi_pext_64, loadi64>, T8XS, VEX_W;
2531 //===----------------------------------------------------------------------===//
2534 let Predicates = [HasTBM], Defs = [EFLAGS] in {
2536 multiclass tbm_ternary_imm<bits<8> opc, RegisterClass RC, string OpcodeStr,
2537 X86MemOperand x86memop, PatFrag ld_frag,
2538 SDNode OpNode, Operand immtype,
2539 SDPatternOperator immoperator,
2540 X86FoldableSchedWrite Sched> {
2541 def ri : Ii32<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, immtype:$cntl),
2542 !strconcat(OpcodeStr,
2543 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2544 [(set RC:$dst, (OpNode RC:$src1, immoperator:$cntl))]>,
2545 XOP, XOPA, Sched<[Sched]>;
2546 def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
2547 (ins x86memop:$src1, immtype:$cntl),
2548 !strconcat(OpcodeStr,
2549 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2550 [(set RC:$dst, (OpNode (ld_frag addr:$src1), immoperator:$cntl))]>,
2551 XOP, XOPA, Sched<[Sched.Folded]>;
2554 defm BEXTRI32 : tbm_ternary_imm<0x10, GR32, "bextr{l}", i32mem, loadi32,
2555 X86bextr, i32imm, imm, WriteBEXTR>;
2556 let ImmT = Imm32S in
2557 defm BEXTRI64 : tbm_ternary_imm<0x10, GR64, "bextr{q}", i64mem, loadi64,
2558 X86bextr, i64i32imm,
2559 i64immSExt32, WriteBEXTR>, VEX_W;
2561 multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
2562 RegisterClass RC, string OpcodeStr,
2563 X86MemOperand x86memop, X86FoldableSchedWrite Sched> {
2564 let hasSideEffects = 0 in {
2565 def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
2566 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2567 XOP_4V, XOP9, Sched<[Sched]>;
2569 def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
2570 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2571 XOP_4V, XOP9, Sched<[Sched.Folded]>;
2575 multiclass tbm_binary_intr<bits<8> opc, string OpcodeStr,
2576 X86FoldableSchedWrite Sched,
2577 Format FormReg, Format FormMem> {
2578 defm NAME#32 : tbm_binary_rm<opc, FormReg, FormMem, GR32, OpcodeStr#"{l}",
2580 defm NAME#64 : tbm_binary_rm<opc, FormReg, FormMem, GR64, OpcodeStr#"{q}",
2581 i64mem, Sched>, VEX_W;
2584 defm BLCFILL : tbm_binary_intr<0x01, "blcfill", WriteALU, MRM1r, MRM1m>;
2585 defm BLCI : tbm_binary_intr<0x02, "blci", WriteALU, MRM6r, MRM6m>;
2586 defm BLCIC : tbm_binary_intr<0x01, "blcic", WriteALU, MRM5r, MRM5m>;
2587 defm BLCMSK : tbm_binary_intr<0x02, "blcmsk", WriteALU, MRM1r, MRM1m>;
2588 defm BLCS : tbm_binary_intr<0x01, "blcs", WriteALU, MRM3r, MRM3m>;
2589 defm BLSFILL : tbm_binary_intr<0x01, "blsfill", WriteALU, MRM2r, MRM2m>;
2590 defm BLSIC : tbm_binary_intr<0x01, "blsic", WriteALU, MRM6r, MRM6m>;
2591 defm T1MSKC : tbm_binary_intr<0x01, "t1mskc", WriteALU, MRM7r, MRM7m>;
2592 defm TZMSK : tbm_binary_intr<0x01, "tzmsk", WriteALU, MRM4r, MRM4m>;
2595 // Use BEXTRI for 64-bit 'and' with large immediate 'mask'.
2596 let Predicates = [HasTBM] in {
2597 def : Pat<(and GR64:$src, AndMask64:$mask),
2598 (BEXTRI64ri GR64:$src, (BEXTRMaskXForm imm:$mask))>;
2600 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2601 (BEXTRI64mi addr:$src, (BEXTRMaskXForm imm:$mask))>;
2604 //===----------------------------------------------------------------------===//
2605 // Lightweight Profiling Instructions
2607 let Predicates = [HasLWP], SchedRW = [WriteSystem] in {
2609 def LLWPCB : I<0x12, MRM0r, (outs), (ins GR32:$src), "llwpcb\t$src",
2610 [(int_x86_llwpcb GR32:$src)]>, XOP, XOP9;
2611 def SLWPCB : I<0x12, MRM1r, (outs GR32:$dst), (ins), "slwpcb\t$dst",
2612 [(set GR32:$dst, (int_x86_slwpcb))]>, XOP, XOP9;
2614 def LLWPCB64 : I<0x12, MRM0r, (outs), (ins GR64:$src), "llwpcb\t$src",
2615 [(int_x86_llwpcb GR64:$src)]>, XOP, XOP9, VEX_W;
2616 def SLWPCB64 : I<0x12, MRM1r, (outs GR64:$dst), (ins), "slwpcb\t$dst",
2617 [(set GR64:$dst, (int_x86_slwpcb))]>, XOP, XOP9, VEX_W;
2619 multiclass lwpins_intr<RegisterClass RC> {
2620 def rri : Ii32<0x12, MRM0r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2621 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2622 [(set EFLAGS, (X86lwpins RC:$src0, GR32:$src1, imm:$cntl))]>,
2625 def rmi : Ii32<0x12, MRM0m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2626 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2627 [(set EFLAGS, (X86lwpins RC:$src0, (loadi32 addr:$src1), imm:$cntl))]>,
2631 let Defs = [EFLAGS] in {
2632 defm LWPINS32 : lwpins_intr<GR32>;
2633 defm LWPINS64 : lwpins_intr<GR64>, VEX_W;
2636 multiclass lwpval_intr<RegisterClass RC, Intrinsic Int> {
2637 def rri : Ii32<0x12, MRM1r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2638 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2639 [(Int RC:$src0, GR32:$src1, imm:$cntl)]>, XOP_4V, XOPA;
2641 def rmi : Ii32<0x12, MRM1m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2642 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2643 [(Int RC:$src0, (loadi32 addr:$src1), imm:$cntl)]>,
2647 defm LWPVAL32 : lwpval_intr<GR32, int_x86_lwpval32>;
2648 defm LWPVAL64 : lwpval_intr<GR64, int_x86_lwpval64>, VEX_W;
2650 } // HasLWP, SchedRW
2652 //===----------------------------------------------------------------------===//
2653 // MONITORX/MWAITX Instructions
2655 let SchedRW = [ WriteSystem ] in {
2656 let usesCustomInserter = 1 in {
2657 def MONITORX : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
2658 [(int_x86_monitorx addr:$src1, GR32:$src2, GR32:$src3)]>,
2659 Requires<[ HasMWAITX ]>;
2662 let Uses = [ EAX, ECX, EDX ] in {
2663 def MONITORXrrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2664 TB, Requires<[ HasMWAITX ]>;
2667 let Uses = [ ECX, EAX, EBX ] in {
2668 def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
2669 [(int_x86_mwaitx ECX, EAX, EBX)]>,
2670 TB, Requires<[ HasMWAITX ]>;
2674 def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
2675 Requires<[ Not64BitMode ]>;
2676 def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
2677 Requires<[ In64BitMode ]>;
2679 def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORXrrr)>,
2680 Requires<[ Not64BitMode ]>;
2681 def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORXrrr)>,
2682 Requires<[ In64BitMode ]>;
2684 //===----------------------------------------------------------------------===//
2685 // WAITPKG Instructions
2687 let SchedRW = [WriteSystem] in {
2688 def UMONITOR16 : I<0xAE, MRM6r, (outs), (ins GR16:$src),
2689 "umonitor\t$src", [(int_x86_umonitor GR16:$src)]>,
2690 XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
2691 def UMONITOR32 : I<0xAE, MRM6r, (outs), (ins GR32:$src),
2692 "umonitor\t$src", [(int_x86_umonitor GR32:$src)]>,
2693 XS, AdSize32, Requires<[HasWAITPKG]>;
2694 def UMONITOR64 : I<0xAE, MRM6r, (outs), (ins GR64:$src),
2695 "umonitor\t$src", [(int_x86_umonitor GR64:$src)]>,
2696 XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
2697 let Uses = [EAX, EDX], Defs = [EFLAGS] in {
2698 def UMWAIT : I<0xAE, MRM6r,
2699 (outs), (ins GR32orGR64:$src), "umwait\t$src",
2700 [(set EFLAGS, (X86umwait GR32orGR64:$src, EDX, EAX))]>,
2701 XD, Requires<[HasWAITPKG]>;
2702 def TPAUSE : I<0xAE, MRM6r,
2703 (outs), (ins GR32orGR64:$src), "tpause\t$src",
2704 [(set EFLAGS, (X86tpause GR32orGR64:$src, EDX, EAX))]>,
2705 PD, Requires<[HasWAITPKG]>, NotMemoryFoldable;
2709 //===----------------------------------------------------------------------===//
2710 // MOVDIRI - Move doubleword/quadword as direct store
2712 let SchedRW = [WriteStore] in {
2713 def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2714 "movdiri\t{$src, $dst|$dst, $src}",
2715 [(int_x86_directstore32 addr:$dst, GR32:$src)]>,
2716 T8, Requires<[HasMOVDIRI]>;
2717 def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2718 "movdiri\t{$src, $dst|$dst, $src}",
2719 [(int_x86_directstore64 addr:$dst, GR64:$src)]>,
2720 T8, Requires<[In64BitMode, HasMOVDIRI]>;
2723 //===----------------------------------------------------------------------===//
2724 // MOVDIR64B - Move 64 bytes as direct store
2726 let SchedRW = [WriteStore] in {
2727 def MOVDIR64B16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2728 "movdir64b\t{$src, $dst|$dst, $src}", []>,
2729 T8PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
2730 def MOVDIR64B32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2731 "movdir64b\t{$src, $dst|$dst, $src}",
2732 [(int_x86_movdir64b GR32:$dst, addr:$src)]>,
2733 T8PD, AdSize32, Requires<[HasMOVDIR64B]>;
2734 def MOVDIR64B64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2735 "movdir64b\t{$src, $dst|$dst, $src}",
2736 [(int_x86_movdir64b GR64:$dst, addr:$src)]>,
2737 T8PD, AdSize64, Requires<[HasMOVDIR64B, In64BitMode]>;
2740 //===----------------------------------------------------------------------===//
2741 // CLZERO Instruction
2743 let SchedRW = [WriteSystem] in {
2745 def CLZEROr : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2746 TB, Requires<[HasCLZERO]>;
2748 let usesCustomInserter = 1 in {
2749 def CLZERO : PseudoI<(outs), (ins i32mem:$src1),
2750 [(int_x86_clzero addr:$src1)]>, Requires<[HasCLZERO]>;
2754 def : InstAlias<"clzero\t{%eax|eax}", (CLZEROr)>, Requires<[Not64BitMode]>;
2755 def : InstAlias<"clzero\t{%rax|rax}", (CLZEROr)>, Requires<[In64BitMode]>;
2757 //===----------------------------------------------------------------------===//
2758 // Pattern fragments to auto generate TBM instructions.
2759 //===----------------------------------------------------------------------===//
2761 let Predicates = [HasTBM] in {
2762 // FIXME: patterns for the load versions are not implemented
2763 def : Pat<(and GR32:$src, (add GR32:$src, 1)),
2764 (BLCFILL32rr GR32:$src)>;
2765 def : Pat<(and GR64:$src, (add GR64:$src, 1)),
2766 (BLCFILL64rr GR64:$src)>;
2768 def : Pat<(or GR32:$src, (not (add GR32:$src, 1))),
2769 (BLCI32rr GR32:$src)>;
2770 def : Pat<(or GR64:$src, (not (add GR64:$src, 1))),
2771 (BLCI64rr GR64:$src)>;
2773 // Extra patterns because opt can optimize the above patterns to this.
2774 def : Pat<(or GR32:$src, (sub -2, GR32:$src)),
2775 (BLCI32rr GR32:$src)>;
2776 def : Pat<(or GR64:$src, (sub -2, GR64:$src)),
2777 (BLCI64rr GR64:$src)>;
2779 def : Pat<(and (not GR32:$src), (add GR32:$src, 1)),
2780 (BLCIC32rr GR32:$src)>;
2781 def : Pat<(and (not GR64:$src), (add GR64:$src, 1)),
2782 (BLCIC64rr GR64:$src)>;
2784 def : Pat<(xor GR32:$src, (add GR32:$src, 1)),
2785 (BLCMSK32rr GR32:$src)>;
2786 def : Pat<(xor GR64:$src, (add GR64:$src, 1)),
2787 (BLCMSK64rr GR64:$src)>;
2789 def : Pat<(or GR32:$src, (add GR32:$src, 1)),
2790 (BLCS32rr GR32:$src)>;
2791 def : Pat<(or GR64:$src, (add GR64:$src, 1)),
2792 (BLCS64rr GR64:$src)>;
2794 def : Pat<(or GR32:$src, (add GR32:$src, -1)),
2795 (BLSFILL32rr GR32:$src)>;
2796 def : Pat<(or GR64:$src, (add GR64:$src, -1)),
2797 (BLSFILL64rr GR64:$src)>;
2799 def : Pat<(or (not GR32:$src), (add GR32:$src, -1)),
2800 (BLSIC32rr GR32:$src)>;
2801 def : Pat<(or (not GR64:$src), (add GR64:$src, -1)),
2802 (BLSIC64rr GR64:$src)>;
2804 def : Pat<(or (not GR32:$src), (add GR32:$src, 1)),
2805 (T1MSKC32rr GR32:$src)>;
2806 def : Pat<(or (not GR64:$src), (add GR64:$src, 1)),
2807 (T1MSKC64rr GR64:$src)>;
2809 def : Pat<(and (not GR32:$src), (add GR32:$src, -1)),
2810 (TZMSK32rr GR32:$src)>;
2811 def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
2812 (TZMSK64rr GR64:$src)>;
2814 // Patterns to match flag producing ops.
2815 // X86and_flag nodes are rarely created. Those should use CMP+AND. We do
2816 // TESTrr matching in PostProcessISelDAG to allow BLSR/BLSI to be formed.
2817 def : Pat<(or_flag_nocf GR32:$src, (not (add GR32:$src, 1))),
2818 (BLCI32rr GR32:$src)>;
2819 def : Pat<(or_flag_nocf GR64:$src, (not (add GR64:$src, 1))),
2820 (BLCI64rr GR64:$src)>;
2822 // Extra patterns because opt can optimize the above patterns to this.
2823 def : Pat<(or_flag_nocf GR32:$src, (sub -2, GR32:$src)),
2824 (BLCI32rr GR32:$src)>;
2825 def : Pat<(or_flag_nocf GR64:$src, (sub -2, GR64:$src)),
2826 (BLCI64rr GR64:$src)>;
2828 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, 1)),
2829 (BLCMSK32rr GR32:$src)>;
2830 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, 1)),
2831 (BLCMSK64rr GR64:$src)>;
2833 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, 1)),
2834 (BLCS32rr GR32:$src)>;
2835 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, 1)),
2836 (BLCS64rr GR64:$src)>;
2838 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, -1)),
2839 (BLSFILL32rr GR32:$src)>;
2840 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, -1)),
2841 (BLSFILL64rr GR64:$src)>;
2843 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
2844 (BLSIC32rr GR32:$src)>;
2845 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
2846 (BLSIC64rr GR64:$src)>;
2848 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
2849 (T1MSKC32rr GR32:$src)>;
2850 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
2851 (T1MSKC64rr GR64:$src)>;
2854 //===----------------------------------------------------------------------===//
2855 // Memory Instructions
2858 let Predicates = [HasCLFLUSHOPT], SchedRW = [WriteLoad] in
2859 def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
2860 "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
2862 let Predicates = [HasCLWB], SchedRW = [WriteLoad] in
2863 def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
2864 [(int_x86_clwb addr:$src)]>, PD, NotMemoryFoldable;
2866 let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
2867 def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
2868 [(int_x86_cldemote addr:$src)]>, TB;
2870 //===----------------------------------------------------------------------===//
2872 //===----------------------------------------------------------------------===//
2874 include "X86InstrArithmetic.td"
2875 include "X86InstrCMovSetCC.td"
2876 include "X86InstrExtension.td"
2877 include "X86InstrControl.td"
2878 include "X86InstrShiftRotate.td"
2880 // X87 Floating Point Stack.
2881 include "X86InstrFPStack.td"
2883 // SIMD support (SSE, MMX and AVX)
2884 include "X86InstrFragmentsSIMD.td"
2886 // FMA - Fused Multiply-Add support (requires FMA)
2887 include "X86InstrFMA.td"
2890 include "X86InstrXOP.td"
2892 // SSE, MMX and 3DNow! vector support.
2893 include "X86InstrSSE.td"
2894 include "X86InstrAVX512.td"
2895 include "X86InstrMMX.td"
2896 include "X86Instr3DNow.td"
2899 include "X86InstrMPX.td"
2901 include "X86InstrVMX.td"
2902 include "X86InstrSVM.td"
2904 include "X86InstrTSX.td"
2905 include "X86InstrSGX.td"
2907 // System instructions.
2908 include "X86InstrSystem.td"
2910 // Compiler Pseudo Instructions and Pat Patterns
2911 include "X86InstrCompiler.td"
2912 include "X86InstrVecCompiler.td"
2914 //===----------------------------------------------------------------------===//
2915 // Assembler Mnemonic Aliases
2916 //===----------------------------------------------------------------------===//
2918 def : MnemonicAlias<"call", "callw", "att">, Requires<[In16BitMode]>;
2919 def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>;
2920 def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>;
2922 def : MnemonicAlias<"cbw", "cbtw", "att">;
2923 def : MnemonicAlias<"cwde", "cwtl", "att">;
2924 def : MnemonicAlias<"cwd", "cwtd", "att">;
2925 def : MnemonicAlias<"cdq", "cltd", "att">;
2926 def : MnemonicAlias<"cdqe", "cltq", "att">;
2927 def : MnemonicAlias<"cqo", "cqto", "att">;
2929 // In 64-bit mode lret maps to lretl; it is not ambiguous with lretq.
2930 def : MnemonicAlias<"lret", "lretw", "att">, Requires<[In16BitMode]>;
2931 def : MnemonicAlias<"lret", "lretl", "att">, Requires<[Not16BitMode]>;
2933 def : MnemonicAlias<"leavel", "leave", "att">, Requires<[Not64BitMode]>;
2934 def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>;
2936 def : MnemonicAlias<"loopz", "loope">;
2937 def : MnemonicAlias<"loopnz", "loopne">;
2939 def : MnemonicAlias<"pop", "popw", "att">, Requires<[In16BitMode]>;
2940 def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>;
2941 def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>;
2942 def : MnemonicAlias<"popf", "popfw", "att">, Requires<[In16BitMode]>;
2943 def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>;
2944 def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>;
2945 def : MnemonicAlias<"popf", "popfq", "intel">, Requires<[In64BitMode]>;
2946 def : MnemonicAlias<"popfd", "popfl", "att">;
2947 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In32BitMode]>;
2948 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In64BitMode]>;
2950 // FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
2951 // all modes. However: "push (addr)" and "push $42" should default to
2952 // pushl/pushq depending on the current mode. Similar for "pop %bx"
2953 def : MnemonicAlias<"push", "pushw", "att">, Requires<[In16BitMode]>;
2954 def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>;
2955 def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>;
2956 def : MnemonicAlias<"pushf", "pushfw", "att">, Requires<[In16BitMode]>;
2957 def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
2958 def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
2959 def : MnemonicAlias<"pushf", "pushfq", "intel">, Requires<[In64BitMode]>;
2960 def : MnemonicAlias<"pushfd", "pushfl", "att">;
2961 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In32BitMode]>;
2962 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In64BitMode]>;
2964 def : MnemonicAlias<"popad", "popal", "intel">, Requires<[Not64BitMode]>;
2965 def : MnemonicAlias<"pushad", "pushal", "intel">, Requires<[Not64BitMode]>;
2966 def : MnemonicAlias<"popa", "popaw", "intel">, Requires<[In16BitMode]>;
2967 def : MnemonicAlias<"pusha", "pushaw", "intel">, Requires<[In16BitMode]>;
2968 def : MnemonicAlias<"popa", "popal", "intel">, Requires<[In32BitMode]>;
2969 def : MnemonicAlias<"pusha", "pushal", "intel">, Requires<[In32BitMode]>;
2971 def : MnemonicAlias<"popa", "popaw", "att">, Requires<[In16BitMode]>;
2972 def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>;
2973 def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>;
2974 def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>;
2976 def : MnemonicAlias<"repe", "rep">;
2977 def : MnemonicAlias<"repz", "rep">;
2978 def : MnemonicAlias<"repnz", "repne">;
2980 def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>;
2981 def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>;
2982 def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>;
2984 // Apply 'ret' behavior to 'retn'
2985 def : MnemonicAlias<"retn", "retw", "att">, Requires<[In16BitMode]>;
2986 def : MnemonicAlias<"retn", "retl", "att">, Requires<[In32BitMode]>;
2987 def : MnemonicAlias<"retn", "retq", "att">, Requires<[In64BitMode]>;
2988 def : MnemonicAlias<"retn", "ret", "intel">;
2990 def : MnemonicAlias<"sal", "shl", "intel">;
2991 def : MnemonicAlias<"salb", "shlb", "att">;
2992 def : MnemonicAlias<"salw", "shlw", "att">;
2993 def : MnemonicAlias<"sall", "shll", "att">;
2994 def : MnemonicAlias<"salq", "shlq", "att">;
2996 def : MnemonicAlias<"smovb", "movsb", "att">;
2997 def : MnemonicAlias<"smovw", "movsw", "att">;
2998 def : MnemonicAlias<"smovl", "movsl", "att">;
2999 def : MnemonicAlias<"smovq", "movsq", "att">;
3001 def : MnemonicAlias<"ud2a", "ud2", "att">;
3002 def : MnemonicAlias<"verrw", "verr", "att">;
3004 // MS recognizes 'xacquire'/'xrelease' as 'acquire'/'release'
3005 def : MnemonicAlias<"acquire", "xacquire", "intel">;
3006 def : MnemonicAlias<"release", "xrelease", "intel">;
3008 // System instruction aliases.
3009 def : MnemonicAlias<"iret", "iretw", "att">, Requires<[In16BitMode]>;
3010 def : MnemonicAlias<"iret", "iretl", "att">, Requires<[Not16BitMode]>;
3011 def : MnemonicAlias<"sysret", "sysretl", "att">;
3012 def : MnemonicAlias<"sysexit", "sysexitl", "att">;
3014 def : MnemonicAlias<"lgdt", "lgdtw", "att">, Requires<[In16BitMode]>;
3015 def : MnemonicAlias<"lgdt", "lgdtl", "att">, Requires<[In32BitMode]>;
3016 def : MnemonicAlias<"lgdt", "lgdtq", "att">, Requires<[In64BitMode]>;
3017 def : MnemonicAlias<"lidt", "lidtw", "att">, Requires<[In16BitMode]>;
3018 def : MnemonicAlias<"lidt", "lidtl", "att">, Requires<[In32BitMode]>;
3019 def : MnemonicAlias<"lidt", "lidtq", "att">, Requires<[In64BitMode]>;
3020 def : MnemonicAlias<"sgdt", "sgdtw", "att">, Requires<[In16BitMode]>;
3021 def : MnemonicAlias<"sgdt", "sgdtl", "att">, Requires<[In32BitMode]>;
3022 def : MnemonicAlias<"sgdt", "sgdtq", "att">, Requires<[In64BitMode]>;
3023 def : MnemonicAlias<"sidt", "sidtw", "att">, Requires<[In16BitMode]>;
3024 def : MnemonicAlias<"sidt", "sidtl", "att">, Requires<[In32BitMode]>;
3025 def : MnemonicAlias<"sidt", "sidtq", "att">, Requires<[In64BitMode]>;
3026 def : MnemonicAlias<"lgdt", "lgdtw", "intel">, Requires<[In16BitMode]>;
3027 def : MnemonicAlias<"lgdt", "lgdtd", "intel">, Requires<[In32BitMode]>;
3028 def : MnemonicAlias<"lidt", "lidtw", "intel">, Requires<[In16BitMode]>;
3029 def : MnemonicAlias<"lidt", "lidtd", "intel">, Requires<[In32BitMode]>;
3030 def : MnemonicAlias<"sgdt", "sgdtw", "intel">, Requires<[In16BitMode]>;
3031 def : MnemonicAlias<"sgdt", "sgdtd", "intel">, Requires<[In32BitMode]>;
3032 def : MnemonicAlias<"sidt", "sidtw", "intel">, Requires<[In16BitMode]>;
3033 def : MnemonicAlias<"sidt", "sidtd", "intel">, Requires<[In32BitMode]>;
3036 // Floating point stack aliases.
3037 def : MnemonicAlias<"fcmovz", "fcmove", "att">;
3038 def : MnemonicAlias<"fcmova", "fcmovnbe", "att">;
3039 def : MnemonicAlias<"fcmovnae", "fcmovb", "att">;
3040 def : MnemonicAlias<"fcmovna", "fcmovbe", "att">;
3041 def : MnemonicAlias<"fcmovae", "fcmovnb", "att">;
3042 def : MnemonicAlias<"fcomip", "fcompi">;
3043 def : MnemonicAlias<"fildq", "fildll", "att">;
3044 def : MnemonicAlias<"fistpq", "fistpll", "att">;
3045 def : MnemonicAlias<"fisttpq", "fisttpll", "att">;
3046 def : MnemonicAlias<"fldcww", "fldcw", "att">;
3047 def : MnemonicAlias<"fnstcww", "fnstcw", "att">;
3048 def : MnemonicAlias<"fnstsww", "fnstsw", "att">;
3049 def : MnemonicAlias<"fucomip", "fucompi">;
3050 def : MnemonicAlias<"fwait", "wait">;
3052 def : MnemonicAlias<"fxsaveq", "fxsave64", "att">;
3053 def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">;
3054 def : MnemonicAlias<"xsaveq", "xsave64", "att">;
3055 def : MnemonicAlias<"xrstorq", "xrstor64", "att">;
3056 def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">;
3057 def : MnemonicAlias<"xrstorsq", "xrstors64", "att">;
3058 def : MnemonicAlias<"xsavecq", "xsavec64", "att">;
3059 def : MnemonicAlias<"xsavesq", "xsaves64", "att">;
3061 class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
3063 : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
3064 !strconcat(Prefix, NewCond, Suffix), VariantName>;
3066 /// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
3067 /// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
3068 /// example "setz" -> "sete".
3069 multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix,
3071 def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb
3072 def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete
3073 def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe
3074 def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae
3075 def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae
3076 def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle
3077 def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge
3078 def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne
3079 def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp
3080 def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp
3082 def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb
3083 def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta
3084 def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl
3085 def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg
3088 // Aliases for set<CC>
3089 defm : IntegerCondCodeMnemonicAlias<"set", "">;
3090 // Aliases for j<CC>
3091 defm : IntegerCondCodeMnemonicAlias<"j", "">;
3092 // Aliases for cmov<CC>{w,l,q}
3093 defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">;
3094 defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">;
3095 defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">;
3096 // No size suffix for intel-style asm.
3097 defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">;
3100 //===----------------------------------------------------------------------===//
3101 // Assembler Instruction Aliases
3102 //===----------------------------------------------------------------------===//
3104 // aad/aam default to base 10 if no operand is specified.
3105 def : InstAlias<"aad", (AAD8i8 10)>, Requires<[Not64BitMode]>;
3106 def : InstAlias<"aam", (AAM8i8 10)>, Requires<[Not64BitMode]>;
3108 // Disambiguate the mem/imm form of bt-without-a-suffix as btl.
3109 // Likewise for btc/btr/bts.
3110 def : InstAlias<"bt\t{$imm, $mem|$mem, $imm}",
3111 (BT32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
3112 def : InstAlias<"btc\t{$imm, $mem|$mem, $imm}",
3113 (BTC32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
3114 def : InstAlias<"btr\t{$imm, $mem|$mem, $imm}",
3115 (BTR32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
3116 def : InstAlias<"bts\t{$imm, $mem|$mem, $imm}",
3117 (BTS32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
3120 def : InstAlias<"clr{b}\t$reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>;
3121 def : InstAlias<"clr{w}\t$reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
3122 def : InstAlias<"clr{l}\t$reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
3123 def : InstAlias<"clr{q}\t$reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
3125 // lods aliases. Accept the destination being omitted because it's implicit
3126 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3127 // in the destination.
3128 def : InstAlias<"lodsb\t$src", (LODSB srcidx8:$src), 0>;
3129 def : InstAlias<"lodsw\t$src", (LODSW srcidx16:$src), 0>;
3130 def : InstAlias<"lods{l|d}\t$src", (LODSL srcidx32:$src), 0>;
3131 def : InstAlias<"lodsq\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3132 def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
3133 def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
3134 def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
3135 def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3136 def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0, "intel">;
3137 def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0, "intel">;
3138 def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0, "intel">;
3139 def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3142 // stos aliases. Accept the source being omitted because it's implicit in
3143 // the mnemonic, or the mnemonic suffix being omitted because it's implicit
3145 def : InstAlias<"stosb\t$dst", (STOSB dstidx8:$dst), 0>;
3146 def : InstAlias<"stosw\t$dst", (STOSW dstidx16:$dst), 0>;
3147 def : InstAlias<"stos{l|d}\t$dst", (STOSL dstidx32:$dst), 0>;
3148 def : InstAlias<"stosq\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3149 def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
3150 def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
3151 def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
3152 def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3153 def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0, "intel">;
3154 def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0, "intel">;
3155 def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0, "intel">;
3156 def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3159 // scas aliases. Accept the destination being omitted because it's implicit
3160 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3161 // in the destination.
3162 def : InstAlias<"scasb\t$dst", (SCASB dstidx8:$dst), 0>;
3163 def : InstAlias<"scasw\t$dst", (SCASW dstidx16:$dst), 0>;
3164 def : InstAlias<"scas{l|d}\t$dst", (SCASL dstidx32:$dst), 0>;
3165 def : InstAlias<"scasq\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3166 def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
3167 def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
3168 def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
3169 def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3170 def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0, "intel">;
3171 def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0, "intel">;
3172 def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0, "intel">;
3173 def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3175 // cmps aliases. Mnemonic suffix being omitted because it's implicit
3176 // in the destination.
3177 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3178 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3179 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3180 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3182 // movs aliases. Mnemonic suffix being omitted because it's implicit
3183 // in the destination.
3184 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3185 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3186 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3187 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3189 // div and idiv aliases for explicit A register.
3190 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
3191 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
3192 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>;
3193 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>;
3194 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>;
3195 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>;
3196 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>;
3197 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>;
3198 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>;
3199 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>;
3200 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>;
3201 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>;
3202 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>;
3203 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>;
3204 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>;
3205 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>;
3209 // Various unary fpstack operations default to operating on ST1.
3210 // For example, "fxch" -> "fxch %st(1)"
3211 def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
3212 def: InstAlias<"fadd", (ADD_FPrST0 ST1), 0>;
3213 def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>;
3214 def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>;
3215 def : InstAlias<"fmul", (MUL_FPrST0 ST1), 0>;
3216 def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>;
3217 def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>;
3218 def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>;
3219 def : InstAlias<"fxch", (XCH_F ST1), 0>;
3220 def : InstAlias<"fcom", (COM_FST0r ST1), 0>;
3221 def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>;
3222 def : InstAlias<"fcomi", (COM_FIr ST1), 0>;
3223 def : InstAlias<"fcompi", (COM_FIPr ST1), 0>;
3224 def : InstAlias<"fucom", (UCOM_Fr ST1), 0>;
3225 def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>;
3226 def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>;
3227 def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>;
3229 // Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
3230 // For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
3231 // instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
3233 multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
3234 def : InstAlias<!strconcat(Mnemonic, "\t{$op, %st(0)|st(0), $op}"),
3235 (Inst RST:$op), EmitAlias>;
3236 def : InstAlias<!strconcat(Mnemonic, "\t{%st(0), %st(0)|st(0), st(0)}"),
3237 (Inst ST0), EmitAlias>;
3240 defm : FpUnaryAlias<"fadd", ADD_FST0r>;
3241 defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
3242 defm : FpUnaryAlias<"fsub", SUB_FST0r>;
3243 defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0>;
3244 defm : FpUnaryAlias<"fsubr", SUBR_FST0r>;
3245 defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0>;
3246 defm : FpUnaryAlias<"fmul", MUL_FST0r>;
3247 defm : FpUnaryAlias<"fmulp", MUL_FPrST0>;
3248 defm : FpUnaryAlias<"fdiv", DIV_FST0r>;
3249 defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0>;
3250 defm : FpUnaryAlias<"fdivr", DIVR_FST0r>;
3251 defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0>;
3252 defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
3253 defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
3254 defm : FpUnaryAlias<"fcompi", COM_FIPr>;
3255 defm : FpUnaryAlias<"fucompi", UCOM_FIPr>;
3258 // Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they
3259 // commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
3260 // solely because gas supports it.
3261 def : InstAlias<"faddp\t{%st(0), $op|$op, st(0)}", (ADD_FPrST0 RST:$op), 0>;
3262 def : InstAlias<"fmulp\t{%st(0), $op|$op, st(0)}", (MUL_FPrST0 RST:$op)>;
3263 def : InstAlias<"fsub{|r}p\t{%st(0), $op|$op, st(0)}", (SUBR_FPrST0 RST:$op)>;
3264 def : InstAlias<"fsub{r|}p\t{%st(0), $op|$op, st(0)}", (SUB_FPrST0 RST:$op)>;
3265 def : InstAlias<"fdiv{|r}p\t{%st(0), $op|$op, st(0)}", (DIVR_FPrST0 RST:$op)>;
3266 def : InstAlias<"fdiv{r|}p\t{%st(0), $op|$op, st(0)}", (DIV_FPrST0 RST:$op)>;
3268 def : InstAlias<"fnstsw" , (FNSTSW16r), 0>;
3270 // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
3271 // this is compatible with what GAS does.
3272 def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3273 def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3274 def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3275 def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3276 def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3277 def : InstAlias<"ljmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3278 def : InstAlias<"lcall\t{*}$dst", (FARCALL16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3279 def : InstAlias<"ljmp\t{*}$dst", (FARJMP16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3281 def : InstAlias<"jmp\t{*}$dst", (JMP64m i64mem:$dst), 0, "att">, Requires<[In64BitMode]>;
3282 def : InstAlias<"jmp\t{*}$dst", (JMP32m i32mem:$dst), 0, "att">, Requires<[In32BitMode]>;
3283 def : InstAlias<"jmp\t{*}$dst", (JMP16m i16mem:$dst), 0, "att">, Requires<[In16BitMode]>;
3286 // "imul <imm>, B" is an alias for "imul <imm>, B, B".
3287 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>;
3288 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>;
3289 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>;
3290 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>;
3291 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
3292 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
3294 // ins aliases. Accept the mnemonic suffix being omitted because it's implicit
3295 // in the destination.
3296 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0, "intel">;
3297 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0, "intel">;
3298 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0, "intel">;
3300 // outs aliases. Accept the mnemonic suffix being omitted because it's implicit
3302 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0, "intel">;
3303 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0, "intel">;
3304 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0, "intel">;
3306 // inb %dx -> inb %al, %dx
3307 def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
3308 def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
3309 def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>;
3310 def : InstAlias<"inb\t$port", (IN8ri u8imm:$port), 0>;
3311 def : InstAlias<"inw\t$port", (IN16ri u8imm:$port), 0>;
3312 def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
3315 // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
3316 def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3317 def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3318 def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3319 def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3320 def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3321 def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3322 def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3323 def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3325 // Match 'movq <largeimm>, <reg>' as an alias for movabsq.
3326 def : InstAlias<"mov{q}\t{$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
3328 // Match 'movd GR64, MMX' as an alias for movq to be compatible with gas,
3329 // which supports this due to an old AMD documentation bug when 64-bit mode was
3331 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3332 (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
3333 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3334 (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
3337 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3338 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3339 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3340 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3341 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3342 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3343 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0, "att">;
3346 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3347 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3348 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3349 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3350 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3351 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3352 // Note: No GR32->GR64 movzx form.
3354 // outb %dx -> outb %al, %dx
3355 def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>;
3356 def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>;
3357 def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>;
3358 def : InstAlias<"outb\t$port", (OUT8ir u8imm:$port), 0>;
3359 def : InstAlias<"outw\t$port", (OUT16ir u8imm:$port), 0>;
3360 def : InstAlias<"outl\t$port", (OUT32ir u8imm:$port), 0>;
3362 // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
3363 // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
3364 // errors, since its encoding is the most compact.
3365 def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>;
3367 // shld/shrd op,op -> shld op, op, CL
3368 def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
3369 def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>;
3370 def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>;
3371 def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>;
3372 def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>;
3373 def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>;
3375 def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>;
3376 def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>;
3377 def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>;
3378 def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>;
3379 def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>;
3380 def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>;
3382 /* FIXME: This is disabled because the asm matcher is currently incapable of
3383 * matching a fixed immediate like $1.
3384 // "shl X, $1" is an alias for "shl X".
3385 multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
3386 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3387 (!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
3388 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3389 (!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
3390 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3391 (!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
3392 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3393 (!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
3394 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3395 (!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
3396 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3397 (!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
3398 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3399 (!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
3400 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3401 (!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
3404 defm : ShiftRotateByOneAlias<"rcl", "RCL">;
3405 defm : ShiftRotateByOneAlias<"rcr", "RCR">;
3406 defm : ShiftRotateByOneAlias<"rol", "ROL">;
3407 defm : ShiftRotateByOneAlias<"ror", "ROR">;
3410 // test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
3411 def : InstAlias<"test{b}\t{$mem, $val|$val, $mem}",
3412 (TEST8mr i8mem :$mem, GR8 :$val), 0>;
3413 def : InstAlias<"test{w}\t{$mem, $val|$val, $mem}",
3414 (TEST16mr i16mem:$mem, GR16:$val), 0>;
3415 def : InstAlias<"test{l}\t{$mem, $val|$val, $mem}",
3416 (TEST32mr i32mem:$mem, GR32:$val), 0>;
3417 def : InstAlias<"test{q}\t{$mem, $val|$val, $mem}",
3418 (TEST64mr i64mem:$mem, GR64:$val), 0>;
3420 // xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
3421 def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
3422 (XCHG8rm GR8 :$val, i8mem :$mem), 0>;
3423 def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
3424 (XCHG16rm GR16:$val, i16mem:$mem), 0>;
3425 def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
3426 (XCHG32rm GR32:$val, i32mem:$mem), 0>;
3427 def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
3428 (XCHG64rm GR64:$val, i64mem:$mem), 0>;
3430 // xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
3431 def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
3432 def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src), 0>;
3433 def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;
3435 // In 64-bit mode, xchg %eax, %eax can't be encoded with the 0x90 opcode we
3436 // would get by default because it's defined as NOP. But xchg %eax, %eax implies
3437 // implicit zeroing of the upper 32 bits. So alias to the longer encoding.
3438 def : InstAlias<"xchg{l}\t{%eax, %eax|eax, eax}",
3439 (XCHG32rr EAX, EAX), 0>, Requires<[In64BitMode]>;
3441 // xchg %rax, %rax is a nop in x86-64 and can be encoded as such. Without this
3442 // we emit an unneeded REX.w prefix.
3443 def : InstAlias<"xchg{q}\t{%rax, %rax|rax, rax}", (NOOP), 0>;
3445 // These aliases exist to get the parser to prioritize matching 8-bit
3446 // immediate encodings over matching the implicit ax/eax/rax encodings. By
3447 // explicitly mentioning the A register here, these entries will be ordered
3448 // first due to the more explicit immediate type.
3449 def : InstAlias<"adc{w}\t{$imm, %ax|ax, $imm}", (ADC16ri8 AX, i16i8imm:$imm), 0>;
3450 def : InstAlias<"add{w}\t{$imm, %ax|ax, $imm}", (ADD16ri8 AX, i16i8imm:$imm), 0>;
3451 def : InstAlias<"and{w}\t{$imm, %ax|ax, $imm}", (AND16ri8 AX, i16i8imm:$imm), 0>;
3452 def : InstAlias<"cmp{w}\t{$imm, %ax|ax, $imm}", (CMP16ri8 AX, i16i8imm:$imm), 0>;
3453 def : InstAlias<"or{w}\t{$imm, %ax|ax, $imm}", (OR16ri8 AX, i16i8imm:$imm), 0>;
3454 def : InstAlias<"sbb{w}\t{$imm, %ax|ax, $imm}", (SBB16ri8 AX, i16i8imm:$imm), 0>;
3455 def : InstAlias<"sub{w}\t{$imm, %ax|ax, $imm}", (SUB16ri8 AX, i16i8imm:$imm), 0>;
3456 def : InstAlias<"xor{w}\t{$imm, %ax|ax, $imm}", (XOR16ri8 AX, i16i8imm:$imm), 0>;
3458 def : InstAlias<"adc{l}\t{$imm, %eax|eax, $imm}", (ADC32ri8 EAX, i32i8imm:$imm), 0>;
3459 def : InstAlias<"add{l}\t{$imm, %eax|eax, $imm}", (ADD32ri8 EAX, i32i8imm:$imm), 0>;
3460 def : InstAlias<"and{l}\t{$imm, %eax|eax, $imm}", (AND32ri8 EAX, i32i8imm:$imm), 0>;
3461 def : InstAlias<"cmp{l}\t{$imm, %eax|eax, $imm}", (CMP32ri8 EAX, i32i8imm:$imm), 0>;
3462 def : InstAlias<"or{l}\t{$imm, %eax|eax, $imm}", (OR32ri8 EAX, i32i8imm:$imm), 0>;
3463 def : InstAlias<"sbb{l}\t{$imm, %eax|eax, $imm}", (SBB32ri8 EAX, i32i8imm:$imm), 0>;
3464 def : InstAlias<"sub{l}\t{$imm, %eax|eax, $imm}", (SUB32ri8 EAX, i32i8imm:$imm), 0>;
3465 def : InstAlias<"xor{l}\t{$imm, %eax|eax, $imm}", (XOR32ri8 EAX, i32i8imm:$imm), 0>;
3467 def : InstAlias<"adc{q}\t{$imm, %rax|rax, $imm}", (ADC64ri8 RAX, i64i8imm:$imm), 0>;
3468 def : InstAlias<"add{q}\t{$imm, %rax|rax, $imm}", (ADD64ri8 RAX, i64i8imm:$imm), 0>;
3469 def : InstAlias<"and{q}\t{$imm, %rax|rax, $imm}", (AND64ri8 RAX, i64i8imm:$imm), 0>;
3470 def : InstAlias<"cmp{q}\t{$imm, %rax|rax, $imm}", (CMP64ri8 RAX, i64i8imm:$imm), 0>;
3471 def : InstAlias<"or{q}\t{$imm, %rax|rax, $imm}", (OR64ri8 RAX, i64i8imm:$imm), 0>;
3472 def : InstAlias<"sbb{q}\t{$imm, %rax|rax, $imm}", (SBB64ri8 RAX, i64i8imm:$imm), 0>;
3473 def : InstAlias<"sub{q}\t{$imm, %rax|rax, $imm}", (SUB64ri8 RAX, i64i8imm:$imm), 0>;
3474 def : InstAlias<"xor{q}\t{$imm, %rax|rax, $imm}", (XOR64ri8 RAX, i64i8imm:$imm), 0>;