1 //===-- X86InstrInfo.td - Main X86 Instruction Definition --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 instruction set, defining the instructions, and
11 // properties of the instructions which are needed for code generation, machine
12 // code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // X86 specific DAG Nodes.
20 def SDTIntShiftDOp: SDTypeProfile<1, 3,
21 [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
22 SDTCisInt<0>, SDTCisInt<3>]>;
24 def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisSameAs<1, 2>]>;
26 def SDTX86Cmps : SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
27 //def SDTX86Cmpss : SDTypeProfile<1, 3, [SDTCisVT<0, f32>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
29 def SDTX86Cmov : SDTypeProfile<1, 4,
30 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
31 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
33 // Unary and binary operator instructions that set EFLAGS as a side-effect.
34 def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
36 SDTCisInt<0>, SDTCisVT<1, i32>]>;
38 def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
41 SDTCisInt<0>, SDTCisVT<1, i32>]>;
43 // SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
44 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
50 // RES1, RES2, FLAGS = op LHS, RHS
51 def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
55 SDTCisInt<0>, SDTCisVT<1, i32>]>;
56 def SDTX86BrCond : SDTypeProfile<0, 3,
57 [SDTCisVT<0, OtherVT>,
58 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
60 def SDTX86SetCC : SDTypeProfile<1, 2,
62 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
63 def SDTX86SetCC_C : SDTypeProfile<1, 2,
65 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
67 def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>;
69 def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
71 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
73 def SDTX86caspair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
74 def SDTX86caspairSaveEbx8 : SDTypeProfile<1, 3,
75 [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
76 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
77 def SDTX86caspairSaveRbx16 : SDTypeProfile<1, 3,
78 [SDTCisVT<0, i64>, SDTCisPtrTy<1>,
79 SDTCisVT<2, i64>, SDTCisVT<3, i64>]>;
81 def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
85 def SDTLockUnaryArithWithFlags : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
88 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
90 def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
92 def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
95 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
97 def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
99 def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
103 def SDT_X86VAARG_64 : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
109 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
111 def SDTX86Void : SDTypeProfile<0, 0, []>;
113 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
115 def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
117 def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
119 def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
121 def SDT_X86WIN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
123 def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
125 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
127 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
129 def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
131 def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
132 [SDNPHasChain,SDNPSideEffect]>;
133 def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
137 def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
138 def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
139 def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
140 def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>;
142 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
143 def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
145 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
146 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
148 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
149 def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
151 def X86sahf : SDNode<"X86ISD::SAHF", SDTX86sahf>;
153 def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand,
154 [SDNPHasChain, SDNPSideEffect]>;
156 def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand,
157 [SDNPHasChain, SDNPSideEffect]>;
159 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
160 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
161 SDNPMayLoad, SDNPMemOperand]>;
162 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86caspair,
163 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
164 SDNPMayLoad, SDNPMemOperand]>;
165 def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair,
166 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
167 SDNPMayLoad, SDNPMemOperand]>;
168 def X86cas8save_ebx : SDNode<"X86ISD::LCMPXCHG8_SAVE_EBX_DAG",
169 SDTX86caspairSaveEbx8,
170 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
171 SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
172 def X86cas16save_rbx : SDNode<"X86ISD::LCMPXCHG16_SAVE_RBX_DAG",
173 SDTX86caspairSaveRbx16,
174 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
175 SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
177 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
178 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
179 def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret,
180 [SDNPHasChain, SDNPOptInGlue]>;
182 def X86vastart_save_xmm_regs :
183 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
184 SDT_X86VASTART_SAVE_XMM_REGS,
185 [SDNPHasChain, SDNPVariadic]>;
187 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG_64,
188 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
190 def X86callseq_start :
191 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
192 [SDNPHasChain, SDNPOutGlue]>;
194 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
195 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
197 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
198 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
201 def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call,
202 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
204 def X86NoTrackBrind : SDNode<"X86ISD::NT_BRIND", SDT_X86NtBrind,
207 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
208 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>;
209 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
210 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
213 def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG", SDTX86Void,
214 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
215 def X86rdtscp : SDNode<"X86ISD::RDTSCP_DAG", SDTX86Void,
216 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
217 def X86rdpmc : SDNode<"X86ISD::RDPMC_DAG", SDTX86Void,
218 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
220 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
221 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
223 def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER",
224 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
227 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
228 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
230 def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
231 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
233 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
236 def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
237 SDTypeProfile<1, 1, [SDTCisInt<0>,
239 [SDNPHasChain, SDNPSideEffect]>;
240 def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
241 SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
242 [SDNPHasChain, SDNPSideEffect]>;
243 def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
244 SDTypeProfile<0, 0, []>,
245 [SDNPHasChain, SDNPSideEffect]>;
247 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
248 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
250 def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
252 def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
253 def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
255 def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
257 def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>;
258 def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>;
260 def X86inc_flag : SDNode<"X86ISD::INC", SDTUnaryArithWithFlags>;
261 def X86dec_flag : SDNode<"X86ISD::DEC", SDTUnaryArithWithFlags>;
262 def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
264 def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
266 def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
269 def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
270 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
272 def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
273 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
275 def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
276 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
278 def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
279 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
281 def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
282 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
285 def X86lock_inc : SDNode<"X86ISD::LINC", SDTLockUnaryArithWithFlags,
286 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
288 def X86lock_dec : SDNode<"X86ISD::LDEC", SDTLockUnaryArithWithFlags,
289 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
292 def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
294 def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
296 def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDT_X86WIN_ALLOCA,
297 [SDNPHasChain, SDNPOutGlue]>;
299 def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
302 def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
303 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
305 def X86lwpins : SDNode<"X86ISD::LWPINS",
306 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
307 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
308 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>;
310 def X86umwait : SDNode<"X86ISD::UMWAIT",
311 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
312 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
313 [SDNPHasChain, SDNPSideEffect]>;
315 def X86tpause : SDNode<"X86ISD::TPAUSE",
316 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
317 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
318 [SDNPHasChain, SDNPSideEffect]>;
320 //===----------------------------------------------------------------------===//
321 // X86 Operand Definitions.
324 // A version of ptr_rc which excludes SP, ESP, and RSP. This is used for
325 // the index operand of an address, to conform to x86 encoding restrictions.
326 def ptr_rc_nosp : PointerLikeRegClass<1>;
328 // *mem - Operand definitions for the funky X86 addressing mode operands.
330 def X86MemAsmOperand : AsmOperandClass {
333 let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
334 def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
335 def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
336 def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
337 def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; }
338 def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; }
339 def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; }
340 def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
341 def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
342 // Gather mem operands
343 def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
344 def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
345 def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
346 def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
347 def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
349 def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
350 def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
351 def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
352 def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
353 def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
354 def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
355 def X86Mem256_RC512Operand : AsmOperandClass { let Name = "Mem256_RC512"; }
356 def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
359 def X86AbsMemAsmOperand : AsmOperandClass {
361 let SuperClasses = [X86MemAsmOperand];
364 class X86MemOperand<string printMethod,
365 AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
366 let PrintMethod = printMethod;
367 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
368 let ParserMatchClass = parserMatchClass;
369 let OperandType = "OPERAND_MEMORY";
372 // Gather mem operands
373 class X86VMemOperand<RegisterClass RC, string printMethod,
374 AsmOperandClass parserMatchClass>
375 : X86MemOperand<printMethod, parserMatchClass> {
376 let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
379 def anymem : X86MemOperand<"printanymem">;
381 // FIXME: Right now we allow any size during parsing, but we might want to
382 // restrict to only unsized memory.
383 def opaquemem : X86MemOperand<"printopaquemem">;
385 def i8mem : X86MemOperand<"printi8mem", X86Mem8AsmOperand>;
386 def i16mem : X86MemOperand<"printi16mem", X86Mem16AsmOperand>;
387 def i32mem : X86MemOperand<"printi32mem", X86Mem32AsmOperand>;
388 def i64mem : X86MemOperand<"printi64mem", X86Mem64AsmOperand>;
389 def i128mem : X86MemOperand<"printi128mem", X86Mem128AsmOperand>;
390 def i256mem : X86MemOperand<"printi256mem", X86Mem256AsmOperand>;
391 def i512mem : X86MemOperand<"printi512mem", X86Mem512AsmOperand>;
392 def f32mem : X86MemOperand<"printf32mem", X86Mem32AsmOperand>;
393 def f64mem : X86MemOperand<"printf64mem", X86Mem64AsmOperand>;
394 def f80mem : X86MemOperand<"printf80mem", X86Mem80AsmOperand>;
395 def f128mem : X86MemOperand<"printf128mem", X86Mem128AsmOperand>;
396 def f256mem : X86MemOperand<"printf256mem", X86Mem256AsmOperand>;
397 def f512mem : X86MemOperand<"printf512mem", X86Mem512AsmOperand>;
399 def v512mem : X86VMemOperand<VR512, "printf512mem", X86Mem512AsmOperand>;
401 // Gather mem operands
402 def vx64mem : X86VMemOperand<VR128, "printi64mem", X86Mem64_RC128Operand>;
403 def vx128mem : X86VMemOperand<VR128, "printi128mem", X86Mem128_RC128Operand>;
404 def vx256mem : X86VMemOperand<VR128, "printi256mem", X86Mem256_RC128Operand>;
405 def vy128mem : X86VMemOperand<VR256, "printi128mem", X86Mem128_RC256Operand>;
406 def vy256mem : X86VMemOperand<VR256, "printi256mem", X86Mem256_RC256Operand>;
408 def vx64xmem : X86VMemOperand<VR128X, "printi64mem", X86Mem64_RC128XOperand>;
409 def vx128xmem : X86VMemOperand<VR128X, "printi128mem", X86Mem128_RC128XOperand>;
410 def vx256xmem : X86VMemOperand<VR128X, "printi256mem", X86Mem256_RC128XOperand>;
411 def vy128xmem : X86VMemOperand<VR256X, "printi128mem", X86Mem128_RC256XOperand>;
412 def vy256xmem : X86VMemOperand<VR256X, "printi256mem", X86Mem256_RC256XOperand>;
413 def vy512xmem : X86VMemOperand<VR256X, "printi512mem", X86Mem512_RC256XOperand>;
414 def vz256mem : X86VMemOperand<VR512, "printi256mem", X86Mem256_RC512Operand>;
415 def vz512mem : X86VMemOperand<VR512, "printi512mem", X86Mem512_RC512Operand>;
417 // A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
418 // of a plain GPR, so that it doesn't potentially require a REX prefix.
419 def ptr_rc_norex : PointerLikeRegClass<2>;
420 def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
422 def i8mem_NOREX : Operand<iPTR> {
423 let PrintMethod = "printi8mem";
424 let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
426 let ParserMatchClass = X86Mem8AsmOperand;
427 let OperandType = "OPERAND_MEMORY";
430 // GPRs available for tailcall.
431 // It represents GR32_TC, GR64_TC or GR64_TCW64.
432 def ptr_rc_tailcall : PointerLikeRegClass<4>;
434 // Special i32mem for addresses of load folding tail calls. These are not
435 // allowed to use callee-saved registers since they must be scheduled
436 // after callee-saved register are popped.
437 def i32mem_TC : Operand<i32> {
438 let PrintMethod = "printi32mem";
439 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
440 i32imm, SEGMENT_REG);
441 let ParserMatchClass = X86Mem32AsmOperand;
442 let OperandType = "OPERAND_MEMORY";
445 // Special i64mem for addresses of load folding tail calls. These are not
446 // allowed to use callee-saved registers since they must be scheduled
447 // after callee-saved register are popped.
448 def i64mem_TC : Operand<i64> {
449 let PrintMethod = "printi64mem";
450 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
451 ptr_rc_tailcall, i32imm, SEGMENT_REG);
452 let ParserMatchClass = X86Mem64AsmOperand;
453 let OperandType = "OPERAND_MEMORY";
456 let OperandType = "OPERAND_PCREL",
457 ParserMatchClass = X86AbsMemAsmOperand,
458 PrintMethod = "printPCRelImm" in {
459 def i32imm_pcrel : Operand<i32>;
460 def i16imm_pcrel : Operand<i16>;
462 // Branch targets have OtherVT type and print as pc-relative values.
463 def brtarget : Operand<OtherVT>;
464 def brtarget8 : Operand<OtherVT>;
468 // Special parser to detect 16-bit mode to select 16-bit displacement.
469 def X86AbsMem16AsmOperand : AsmOperandClass {
470 let Name = "AbsMem16";
471 let RenderMethod = "addAbsMemOperands";
472 let SuperClasses = [X86AbsMemAsmOperand];
475 // Branch targets have OtherVT type and print as pc-relative values.
476 let OperandType = "OPERAND_PCREL",
477 PrintMethod = "printPCRelImm" in {
478 let ParserMatchClass = X86AbsMem16AsmOperand in
479 def brtarget16 : Operand<OtherVT>;
480 let ParserMatchClass = X86AbsMemAsmOperand in
481 def brtarget32 : Operand<OtherVT>;
484 let RenderMethod = "addSrcIdxOperands" in {
485 def X86SrcIdx8Operand : AsmOperandClass {
486 let Name = "SrcIdx8";
487 let SuperClasses = [X86Mem8AsmOperand];
489 def X86SrcIdx16Operand : AsmOperandClass {
490 let Name = "SrcIdx16";
491 let SuperClasses = [X86Mem16AsmOperand];
493 def X86SrcIdx32Operand : AsmOperandClass {
494 let Name = "SrcIdx32";
495 let SuperClasses = [X86Mem32AsmOperand];
497 def X86SrcIdx64Operand : AsmOperandClass {
498 let Name = "SrcIdx64";
499 let SuperClasses = [X86Mem64AsmOperand];
501 } // RenderMethod = "addSrcIdxOperands"
503 let RenderMethod = "addDstIdxOperands" in {
504 def X86DstIdx8Operand : AsmOperandClass {
505 let Name = "DstIdx8";
506 let SuperClasses = [X86Mem8AsmOperand];
508 def X86DstIdx16Operand : AsmOperandClass {
509 let Name = "DstIdx16";
510 let SuperClasses = [X86Mem16AsmOperand];
512 def X86DstIdx32Operand : AsmOperandClass {
513 let Name = "DstIdx32";
514 let SuperClasses = [X86Mem32AsmOperand];
516 def X86DstIdx64Operand : AsmOperandClass {
517 let Name = "DstIdx64";
518 let SuperClasses = [X86Mem64AsmOperand];
520 } // RenderMethod = "addDstIdxOperands"
522 let RenderMethod = "addMemOffsOperands" in {
523 def X86MemOffs16_8AsmOperand : AsmOperandClass {
524 let Name = "MemOffs16_8";
525 let SuperClasses = [X86Mem8AsmOperand];
527 def X86MemOffs16_16AsmOperand : AsmOperandClass {
528 let Name = "MemOffs16_16";
529 let SuperClasses = [X86Mem16AsmOperand];
531 def X86MemOffs16_32AsmOperand : AsmOperandClass {
532 let Name = "MemOffs16_32";
533 let SuperClasses = [X86Mem32AsmOperand];
535 def X86MemOffs32_8AsmOperand : AsmOperandClass {
536 let Name = "MemOffs32_8";
537 let SuperClasses = [X86Mem8AsmOperand];
539 def X86MemOffs32_16AsmOperand : AsmOperandClass {
540 let Name = "MemOffs32_16";
541 let SuperClasses = [X86Mem16AsmOperand];
543 def X86MemOffs32_32AsmOperand : AsmOperandClass {
544 let Name = "MemOffs32_32";
545 let SuperClasses = [X86Mem32AsmOperand];
547 def X86MemOffs32_64AsmOperand : AsmOperandClass {
548 let Name = "MemOffs32_64";
549 let SuperClasses = [X86Mem64AsmOperand];
551 def X86MemOffs64_8AsmOperand : AsmOperandClass {
552 let Name = "MemOffs64_8";
553 let SuperClasses = [X86Mem8AsmOperand];
555 def X86MemOffs64_16AsmOperand : AsmOperandClass {
556 let Name = "MemOffs64_16";
557 let SuperClasses = [X86Mem16AsmOperand];
559 def X86MemOffs64_32AsmOperand : AsmOperandClass {
560 let Name = "MemOffs64_32";
561 let SuperClasses = [X86Mem32AsmOperand];
563 def X86MemOffs64_64AsmOperand : AsmOperandClass {
564 let Name = "MemOffs64_64";
565 let SuperClasses = [X86Mem64AsmOperand];
567 } // RenderMethod = "addMemOffsOperands"
569 class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
570 : X86MemOperand<printMethod, parserMatchClass> {
571 let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
574 class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
575 : X86MemOperand<printMethod, parserMatchClass> {
576 let MIOperandInfo = (ops ptr_rc);
579 def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>;
580 def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>;
581 def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>;
582 def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>;
583 def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>;
584 def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>;
585 def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>;
586 def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
588 class X86MemOffsOperand<Operand immOperand, string printMethod,
589 AsmOperandClass parserMatchClass>
590 : X86MemOperand<printMethod, parserMatchClass> {
591 let MIOperandInfo = (ops immOperand, SEGMENT_REG);
594 def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
595 X86MemOffs16_8AsmOperand>;
596 def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16",
597 X86MemOffs16_16AsmOperand>;
598 def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32",
599 X86MemOffs16_32AsmOperand>;
600 def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8",
601 X86MemOffs32_8AsmOperand>;
602 def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16",
603 X86MemOffs32_16AsmOperand>;
604 def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32",
605 X86MemOffs32_32AsmOperand>;
606 def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64",
607 X86MemOffs32_64AsmOperand>;
608 def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8",
609 X86MemOffs64_8AsmOperand>;
610 def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16",
611 X86MemOffs64_16AsmOperand>;
612 def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32",
613 X86MemOffs64_32AsmOperand>;
614 def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64",
615 X86MemOffs64_64AsmOperand>;
617 def SSECC : Operand<i8> {
618 let PrintMethod = "printSSEAVXCC";
619 let OperandType = "OPERAND_IMMEDIATE";
622 def AVXCC : Operand<i8> {
623 let PrintMethod = "printSSEAVXCC";
624 let OperandType = "OPERAND_IMMEDIATE";
627 def AVX512ICC : Operand<i8> {
628 let PrintMethod = "printSSEAVXCC";
629 let OperandType = "OPERAND_IMMEDIATE";
632 def XOPCC : Operand<i8> {
633 let PrintMethod = "printXOPCC";
634 let OperandType = "OPERAND_IMMEDIATE";
637 class ImmSExtAsmOperandClass : AsmOperandClass {
638 let SuperClasses = [ImmAsmOperand];
639 let RenderMethod = "addImmOperands";
642 def X86GR32orGR64AsmOperand : AsmOperandClass {
643 let Name = "GR32orGR64";
646 def GR32orGR64 : RegisterOperand<GR32> {
647 let ParserMatchClass = X86GR32orGR64AsmOperand;
649 def AVX512RCOperand : AsmOperandClass {
650 let Name = "AVX512RC";
652 def AVX512RC : Operand<i32> {
653 let PrintMethod = "printRoundingControl";
654 let OperandType = "OPERAND_IMMEDIATE";
655 let ParserMatchClass = AVX512RCOperand;
658 // Sign-extended immediate classes. We don't need to define the full lattice
659 // here because there is no instruction with an ambiguity between ImmSExti64i32
662 // The strange ranges come from the fact that the assembler always works with
663 // 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
664 // (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
667 // [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
668 def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
669 let Name = "ImmSExti64i32";
672 // [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
673 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
674 def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
675 let Name = "ImmSExti16i8";
676 let SuperClasses = [ImmSExti64i32AsmOperand];
679 // [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
680 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
681 def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
682 let Name = "ImmSExti32i8";
686 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
687 def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
688 let Name = "ImmSExti64i8";
689 let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
690 ImmSExti64i32AsmOperand];
693 // Unsigned immediate used by SSE/AVX instructions
695 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
696 def ImmUnsignedi8AsmOperand : AsmOperandClass {
697 let Name = "ImmUnsignedi8";
698 let RenderMethod = "addImmOperands";
701 // A couple of more descriptive operand definitions.
702 // 16-bits but only 8 bits are significant.
703 def i16i8imm : Operand<i16> {
704 let ParserMatchClass = ImmSExti16i8AsmOperand;
705 let OperandType = "OPERAND_IMMEDIATE";
707 // 32-bits but only 8 bits are significant.
708 def i32i8imm : Operand<i32> {
709 let ParserMatchClass = ImmSExti32i8AsmOperand;
710 let OperandType = "OPERAND_IMMEDIATE";
713 // 64-bits but only 32 bits are significant.
714 def i64i32imm : Operand<i64> {
715 let ParserMatchClass = ImmSExti64i32AsmOperand;
716 let OperandType = "OPERAND_IMMEDIATE";
719 // 64-bits but only 8 bits are significant.
720 def i64i8imm : Operand<i64> {
721 let ParserMatchClass = ImmSExti64i8AsmOperand;
722 let OperandType = "OPERAND_IMMEDIATE";
725 // Unsigned 8-bit immediate used by SSE/AVX instructions.
726 def u8imm : Operand<i8> {
727 let PrintMethod = "printU8Imm";
728 let ParserMatchClass = ImmUnsignedi8AsmOperand;
729 let OperandType = "OPERAND_IMMEDIATE";
732 // 32-bit immediate but only 8-bits are significant and they are unsigned.
733 // Used by some SSE/AVX instructions that use intrinsics.
734 def i32u8imm : Operand<i32> {
735 let PrintMethod = "printU8Imm";
736 let ParserMatchClass = ImmUnsignedi8AsmOperand;
737 let OperandType = "OPERAND_IMMEDIATE";
740 // 64-bits but only 32 bits are significant, and those bits are treated as being
742 def i64i32imm_pcrel : Operand<i64> {
743 let PrintMethod = "printPCRelImm";
744 let ParserMatchClass = X86AbsMemAsmOperand;
745 let OperandType = "OPERAND_PCREL";
748 def lea64_32mem : Operand<i32> {
749 let PrintMethod = "printanymem";
750 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
751 let ParserMatchClass = X86MemAsmOperand;
754 // Memory operands that use 64-bit pointers in both ILP32 and LP64.
755 def lea64mem : Operand<i64> {
756 let PrintMethod = "printanymem";
757 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
758 let ParserMatchClass = X86MemAsmOperand;
762 //===----------------------------------------------------------------------===//
763 // X86 Complex Pattern Definitions.
766 // Define X86-specific addressing mode.
767 def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>;
768 def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr",
769 [add, sub, mul, X86mul_imm, shl, or, frameindex],
771 // In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
772 def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr",
773 [add, sub, mul, X86mul_imm, shl, or,
774 frameindex, X86WrapperRIP],
777 def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
778 [tglobaltlsaddr], []>;
780 def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
781 [tglobaltlsaddr], []>;
783 def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr",
784 [add, sub, mul, X86mul_imm, shl, or, frameindex,
787 def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
788 [tglobaltlsaddr], []>;
790 def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
791 [tglobaltlsaddr], []>;
793 def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>;
795 // A relocatable immediate is either an immediate operand or an operand that can
796 // be relocated by the linker to an immediate, such as a regular symbol in
798 def relocImm : ComplexPattern<iAny, 1, "selectRelocImm", [imm, X86Wrapper], [],
801 //===----------------------------------------------------------------------===//
802 // X86 Instruction Predicate Definitions.
803 def TruePredicate : Predicate<"true">;
805 def HasCMov : Predicate<"Subtarget->hasCMov()">;
806 def NoCMov : Predicate<"!Subtarget->hasCMov()">;
808 def HasMMX : Predicate<"Subtarget->hasMMX()">;
809 def Has3DNow : Predicate<"Subtarget->has3DNow()">;
810 def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
811 def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
812 def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
813 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
814 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
815 def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
816 def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
817 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
818 def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
819 def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
820 def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">;
821 def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
822 def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
823 def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
824 def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
825 def NoAVX : Predicate<"!Subtarget->hasAVX()">;
826 def HasAVX : Predicate<"Subtarget->hasAVX()">;
827 def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
828 def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
829 def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
830 def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
831 def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
832 def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
833 def HasCDI : Predicate<"Subtarget->hasCDI()">;
834 def HasVPOPCNTDQ : Predicate<"Subtarget->hasVPOPCNTDQ()">;
835 def HasPFI : Predicate<"Subtarget->hasPFI()">;
836 def HasERI : Predicate<"Subtarget->hasERI()">;
837 def HasDQI : Predicate<"Subtarget->hasDQI()">;
838 def NoDQI : Predicate<"!Subtarget->hasDQI()">;
839 def HasBWI : Predicate<"Subtarget->hasBWI()">;
840 def NoBWI : Predicate<"!Subtarget->hasBWI()">;
841 def HasVLX : Predicate<"Subtarget->hasVLX()">;
842 def NoVLX : Predicate<"!Subtarget->hasVLX()">;
843 def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
844 def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
845 def PKU : Predicate<"Subtarget->hasPKU()">;
846 def HasVNNI : Predicate<"Subtarget->hasVNNI()">;
848 def HasBITALG : Predicate<"Subtarget->hasBITALG()">;
849 def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
850 def HasAES : Predicate<"Subtarget->hasAES()">;
851 def HasVAES : Predicate<"Subtarget->hasVAES()">;
852 def NoVLX_Or_NoVAES : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVAES()">;
853 def HasFXSR : Predicate<"Subtarget->hasFXSR()">;
854 def HasXSAVE : Predicate<"Subtarget->hasXSAVE()">;
855 def HasXSAVEOPT : Predicate<"Subtarget->hasXSAVEOPT()">;
856 def HasXSAVEC : Predicate<"Subtarget->hasXSAVEC()">;
857 def HasXSAVES : Predicate<"Subtarget->hasXSAVES()">;
858 def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
859 def NoVLX_Or_NoVPCLMULQDQ :
860 Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">;
861 def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">;
862 def HasGFNI : Predicate<"Subtarget->hasGFNI()">;
863 def HasFMA : Predicate<"Subtarget->hasFMA()">;
864 def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
865 def NoFMA4 : Predicate<"!Subtarget->hasFMA4()">;
866 def HasXOP : Predicate<"Subtarget->hasXOP()">;
867 def HasTBM : Predicate<"Subtarget->hasTBM()">;
868 def NoTBM : Predicate<"!Subtarget->hasTBM()">;
869 def HasLWP : Predicate<"Subtarget->hasLWP()">;
870 def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
871 def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
872 def HasF16C : Predicate<"Subtarget->hasF16C()">;
873 def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
874 def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
875 def HasBMI : Predicate<"Subtarget->hasBMI()">;
876 def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
877 def NoBMI2 : Predicate<"!Subtarget->hasBMI2()">;
878 def HasVBMI : Predicate<"Subtarget->hasVBMI()">;
879 def HasVBMI2 : Predicate<"Subtarget->hasVBMI2()">;
880 def HasIFMA : Predicate<"Subtarget->hasIFMA()">;
881 def HasRTM : Predicate<"Subtarget->hasRTM()">;
882 def HasADX : Predicate<"Subtarget->hasADX()">;
883 def HasSHA : Predicate<"Subtarget->hasSHA()">;
884 def HasSGX : Predicate<"Subtarget->hasSGX()">;
885 def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">;
886 def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
887 def HasSSEPrefetch : Predicate<"Subtarget->hasSSEPrefetch()">;
888 def NoSSEPrefetch : Predicate<"!Subtarget->hasSSEPrefetch()">;
889 def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">;
890 def HasPREFETCHWT1 : Predicate<"Subtarget->hasPREFETCHWT1()">;
891 def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
892 def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
893 def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">;
894 def HasCLDEMOTE : Predicate<"Subtarget->hasCLDEMOTE()">;
895 def HasMOVDIRI : Predicate<"Subtarget->hasMOVDIRI()">;
896 def HasMOVDIR64B : Predicate<"Subtarget->hasMOVDIR64B()">;
897 def HasPTWRITE : Predicate<"Subtarget->hasPTWRITE()">;
898 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
899 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
900 def HasMPX : Predicate<"Subtarget->hasMPX()">;
901 def HasSHSTK : Predicate<"Subtarget->hasSHSTK()">;
902 def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">;
903 def HasCLWB : Predicate<"Subtarget->hasCLWB()">;
904 def HasWBNOINVD : Predicate<"Subtarget->hasWBNOINVD()">;
905 def HasRDPID : Predicate<"Subtarget->hasRDPID()">;
906 def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">;
907 def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">;
908 def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
909 def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">;
910 def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
911 AssemblerPredicate<"!Mode64Bit", "Not 64-bit mode">;
912 def In64BitMode : Predicate<"Subtarget->is64Bit()">,
913 AssemblerPredicate<"Mode64Bit", "64-bit mode">;
914 def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">;
915 def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">;
916 def In16BitMode : Predicate<"Subtarget->is16Bit()">,
917 AssemblerPredicate<"Mode16Bit", "16-bit mode">;
918 def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
919 AssemblerPredicate<"!Mode16Bit", "Not 16-bit mode">;
920 def In32BitMode : Predicate<"Subtarget->is32Bit()">,
921 AssemblerPredicate<"Mode32Bit", "32-bit mode">;
922 def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
923 def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
924 def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
925 "Subtarget->getFrameLowering()->hasFP(*MF)"> {
926 let RecomputePerFunction = 1;
928 def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
929 def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
930 def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
931 def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
932 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
933 def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
934 def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
935 "TM.getCodeModel() == CodeModel::Kernel">;
936 def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
938 // We could compute these on a per-module basis but doing so requires accessing
939 // the Function object through the <Target>Subtarget and objections were raised
940 // to that (see post-commit review comments for r301750).
941 let RecomputePerFunction = 1 in {
942 def OptForSize : Predicate<"MF->getFunction().optForSize()">;
943 def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">;
944 def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">;
945 def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
946 "MF->getFunction().optForSize()">;
947 def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().optForSize() || "
948 "!Subtarget->hasSSE41()">;
951 def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
952 def FavorMemIndirectCall : Predicate<"!Subtarget->slowTwoMemOps()">;
953 def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
954 def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
955 def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
956 def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
957 def HasMFence : Predicate<"Subtarget->hasMFence()">;
958 def UseRetpoline : Predicate<"Subtarget->useRetpoline()">;
959 def NotUseRetpoline : Predicate<"!Subtarget->useRetpoline()">;
961 //===----------------------------------------------------------------------===//
962 // X86 Instruction Format Definitions.
965 include "X86InstrFormats.td"
967 //===----------------------------------------------------------------------===//
968 // Pattern fragments.
971 // X86 specific condition code. These correspond to CondCode in
972 // X86InstrInfo.h. They must be kept in synch.
973 def X86_COND_A : PatLeaf<(i8 0)>; // alt. COND_NBE
974 def X86_COND_AE : PatLeaf<(i8 1)>; // alt. COND_NC
975 def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
976 def X86_COND_BE : PatLeaf<(i8 3)>; // alt. COND_NA
977 def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
978 def X86_COND_G : PatLeaf<(i8 5)>; // alt. COND_NLE
979 def X86_COND_GE : PatLeaf<(i8 6)>; // alt. COND_NL
980 def X86_COND_L : PatLeaf<(i8 7)>; // alt. COND_NGE
981 def X86_COND_LE : PatLeaf<(i8 8)>; // alt. COND_NG
982 def X86_COND_NE : PatLeaf<(i8 9)>; // alt. COND_NZ
983 def X86_COND_NO : PatLeaf<(i8 10)>;
984 def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
985 def X86_COND_NS : PatLeaf<(i8 12)>;
986 def X86_COND_O : PatLeaf<(i8 13)>;
987 def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
988 def X86_COND_S : PatLeaf<(i8 15)>;
990 def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
991 def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
992 def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
993 def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
995 // FIXME: Ideally we would just replace the above i*immSExt* matchers with
996 // relocImm-based matchers, but then FastISel would be unable to use them.
997 def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
998 return isSExtRelocImm<8>(N);
1000 def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
1001 return isSExtRelocImm<32>(N);
1004 // If we have multiple users of an immediate, it's much smaller to reuse
1005 // the register, rather than encode the immediate in every instruction.
1006 // This has the risk of increasing register pressure from stretched live
1007 // ranges, however, the immediates should be trivial to rematerialize by
1008 // the RA in the event of high register pressure.
1009 // TODO : This is currently enabled for stores and binary ops. There are more
1010 // cases for which this can be enabled, though this catches the bulk of the
1012 // TODO2 : This should really also be enabled under O2, but there's currently
1013 // an issue with RA where we don't pull the constants into their users
1014 // when we rematerialize them. I'll follow-up on enabling O2 after we fix that
1016 // TODO3 : This is currently limited to single basic blocks (DAG creation
1017 // pulls block immediates to the top and merges them if necessary).
1018 // Eventually, it would be nice to allow ConstantHoisting to merge constants
1019 // globally for potentially added savings.
1021 def imm8_su : PatLeaf<(i8 relocImm), [{
1022 return !shouldAvoidImmediateInstFormsForSize(N);
1024 def imm16_su : PatLeaf<(i16 relocImm), [{
1025 return !shouldAvoidImmediateInstFormsForSize(N);
1027 def imm32_su : PatLeaf<(i32 relocImm), [{
1028 return !shouldAvoidImmediateInstFormsForSize(N);
1030 def i64immSExt32_su : PatLeaf<(i64immSExt32), [{
1031 return !shouldAvoidImmediateInstFormsForSize(N);
1034 def i16immSExt8_su : PatLeaf<(i16immSExt8), [{
1035 return !shouldAvoidImmediateInstFormsForSize(N);
1037 def i32immSExt8_su : PatLeaf<(i32immSExt8), [{
1038 return !shouldAvoidImmediateInstFormsForSize(N);
1040 def i64immSExt8_su : PatLeaf<(i64immSExt8), [{
1041 return !shouldAvoidImmediateInstFormsForSize(N);
1044 def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
1045 return !shouldAvoidImmediateInstFormsForSize(N);
1047 def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
1048 return !shouldAvoidImmediateInstFormsForSize(N);
1051 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
1053 def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
1055 def i64immZExt32SExt8 : ImmLeaf<i64, [{
1056 return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm));
1059 // Helper fragments for loads.
1061 // It's safe to fold a zextload/extload from i1 as a regular i8 load. The
1062 // upper bits are guaranteed to be zero and we were going to emit a MOV8rm
1063 // which might get folded during peephole anyway.
1064 def loadi8 : PatFrag<(ops node:$ptr), (i8 (unindexedload node:$ptr)), [{
1065 LoadSDNode *LD = cast<LoadSDNode>(N);
1066 ISD::LoadExtType ExtType = LD->getExtensionType();
1067 return ExtType == ISD::NON_EXTLOAD || ExtType == ISD::EXTLOAD ||
1068 ExtType == ISD::ZEXTLOAD;
1071 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
1072 // known to be 32-bit aligned or better. Ditto for i8 to i16.
1073 def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
1074 LoadSDNode *LD = cast<LoadSDNode>(N);
1075 ISD::LoadExtType ExtType = LD->getExtensionType();
1076 if (ExtType == ISD::NON_EXTLOAD)
1078 if (ExtType == ISD::EXTLOAD)
1079 return LD->getAlignment() >= 2 && !LD->isVolatile();
1083 def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
1084 LoadSDNode *LD = cast<LoadSDNode>(N);
1085 ISD::LoadExtType ExtType = LD->getExtensionType();
1086 if (ExtType == ISD::NON_EXTLOAD)
1088 if (ExtType == ISD::EXTLOAD)
1089 return LD->getAlignment() >= 4 && !LD->isVolatile();
1093 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
1094 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
1095 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
1096 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
1097 def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>;
1098 def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1099 LoadSDNode *Ld = cast<LoadSDNode>(N);
1100 return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1102 def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1103 LoadSDNode *Ld = cast<LoadSDNode>(N);
1104 return Subtarget->hasSSEUnalignedMem() ||
1105 Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1108 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
1109 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
1110 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
1111 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
1112 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
1113 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
1115 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
1116 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
1117 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
1118 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
1119 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
1120 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
1121 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
1122 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
1123 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
1124 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
1126 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
1127 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
1128 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
1129 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
1130 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
1131 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
1132 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
1133 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
1134 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
1135 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
1138 // An 'and' node with a single use.
1139 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
1140 return N->hasOneUse();
1142 // An 'srl' node with a single use.
1143 def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
1144 return N->hasOneUse();
1146 // An 'trunc' node with a single use.
1147 def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
1148 return N->hasOneUse();
1151 //===----------------------------------------------------------------------===//
1152 // Instruction list.
1156 let hasSideEffects = 0, SchedRW = [WriteNop] in {
1157 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
1158 def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
1159 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1160 def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
1161 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1162 def NOOPQ : RI<0x1f, MRMXm, (outs), (ins i64mem:$zero),
1163 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1164 Requires<[In64BitMode]>;
1165 // Also allow register so we can assemble/disassemble
1166 def NOOPWr : I<0x1f, MRMXr, (outs), (ins GR16:$zero),
1167 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1168 def NOOPLr : I<0x1f, MRMXr, (outs), (ins GR32:$zero),
1169 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1170 def NOOPQr : RI<0x1f, MRMXr, (outs), (ins GR64:$zero),
1171 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1172 Requires<[In64BitMode]>;
1176 // Constructing a stack frame.
1177 def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
1178 "enter\t$len, $lvl", []>, Sched<[WriteMicrocoded]>;
1180 let SchedRW = [WriteALU] in {
1181 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
1182 def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1183 Requires<[Not64BitMode]>;
1185 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
1186 def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1187 Requires<[In64BitMode]>;
1190 //===----------------------------------------------------------------------===//
1191 // Miscellaneous Instructions.
1194 let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1,
1195 SchedRW = [WriteSystem] in
1196 def Int_eh_sjlj_setup_dispatch
1197 : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
1199 let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
1200 let mayLoad = 1, SchedRW = [WriteLoad] in {
1201 def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1203 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1204 OpSize32, Requires<[Not64BitMode]>;
1205 // Long form for the disassembler.
1206 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1207 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1208 OpSize16, NotMemoryFoldable;
1209 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1210 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1211 } // isCodeGenOnly = 1, ForceDisassemble = 1
1212 } // mayLoad, SchedRW
1213 let mayStore = 1, mayLoad = 1, SchedRW = [WriteRMW] in {
1214 def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", []>,
1216 def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", []>,
1217 OpSize32, Requires<[Not64BitMode]>;
1218 } // mayStore, mayLoad, WriteRMW
1220 let mayStore = 1, SchedRW = [WriteStore] in {
1221 def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1223 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1224 OpSize32, Requires<[Not64BitMode]>;
1225 // Long form for the disassembler.
1226 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1227 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1228 OpSize16, NotMemoryFoldable;
1229 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1230 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1231 } // isCodeGenOnly = 1, ForceDisassemble = 1
1233 def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
1234 "push{w}\t$imm", []>, OpSize16;
1235 def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
1236 "push{w}\t$imm", []>, OpSize16;
1238 def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
1239 "push{l}\t$imm", []>, OpSize32,
1240 Requires<[Not64BitMode]>;
1241 def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
1242 "push{l}\t$imm", []>, OpSize32,
1243 Requires<[Not64BitMode]>;
1244 } // mayStore, SchedRW
1246 let mayLoad = 1, mayStore = 1, SchedRW = [WriteRMW] in {
1247 def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src", []>,
1249 def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src", []>,
1250 OpSize32, Requires<[Not64BitMode]>;
1251 } // mayLoad, mayStore, SchedRW
1255 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1256 SchedRW = [WriteRMW], Defs = [ESP] in {
1258 def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
1259 [(set GR32:$dst, (int_x86_flags_read_u32))]>,
1260 Requires<[Not64BitMode]>;
1263 def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
1264 [(set GR64:$dst, (int_x86_flags_read_u64))]>,
1265 Requires<[In64BitMode]>;
1268 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1269 SchedRW = [WriteRMW] in {
1270 let Defs = [ESP, EFLAGS, DF], Uses = [ESP] in
1271 def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
1272 [(int_x86_flags_write_u32 GR32:$src)]>,
1273 Requires<[Not64BitMode]>;
1275 let Defs = [RSP, EFLAGS, DF], Uses = [RSP] in
1276 def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
1277 [(int_x86_flags_write_u64 GR64:$src)]>,
1278 Requires<[In64BitMode]>;
1281 let Defs = [ESP, EFLAGS, DF], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
1282 SchedRW = [WriteLoad] in {
1283 def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize16;
1284 def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>, OpSize32,
1285 Requires<[Not64BitMode]>;
1288 let Defs = [ESP], Uses = [ESP, EFLAGS, DF], mayStore = 1, hasSideEffects=0,
1289 SchedRW = [WriteStore] in {
1290 def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize16;
1291 def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>, OpSize32,
1292 Requires<[Not64BitMode]>;
1295 let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
1296 let mayLoad = 1, SchedRW = [WriteLoad] in {
1297 def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1298 OpSize32, Requires<[In64BitMode]>;
1299 // Long form for the disassembler.
1300 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1301 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1302 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1303 } // isCodeGenOnly = 1, ForceDisassemble = 1
1304 } // mayLoad, SchedRW
1305 let mayLoad = 1, mayStore = 1, SchedRW = [WriteRMW] in
1306 def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", []>,
1307 OpSize32, Requires<[In64BitMode]>;
1308 let mayStore = 1, SchedRW = [WriteStore] in {
1309 def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1310 OpSize32, Requires<[In64BitMode]>;
1311 // Long form for the disassembler.
1312 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1313 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1314 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1315 } // isCodeGenOnly = 1, ForceDisassemble = 1
1316 } // mayStore, SchedRW
1317 let mayLoad = 1, mayStore = 1, SchedRW = [WriteRMW] in {
1318 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
1319 OpSize32, Requires<[In64BitMode]>;
1320 } // mayLoad, mayStore, SchedRW
1323 let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
1324 SchedRW = [WriteStore] in {
1325 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
1326 "push{q}\t$imm", []>, OpSize32,
1327 Requires<[In64BitMode]>;
1328 def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
1329 "push{q}\t$imm", []>, OpSize32,
1330 Requires<[In64BitMode]>;
1333 let Defs = [RSP, EFLAGS, DF], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
1334 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
1335 OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
1336 let Defs = [RSP], Uses = [RSP, EFLAGS, DF], mayStore = 1, hasSideEffects=0 in
1337 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
1338 OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
1340 let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
1341 mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
1342 def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", []>,
1343 OpSize32, Requires<[Not64BitMode]>;
1344 def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", []>,
1345 OpSize16, Requires<[Not64BitMode]>;
1347 let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
1348 mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
1349 def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", []>,
1350 OpSize32, Requires<[Not64BitMode]>;
1351 def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", []>,
1352 OpSize16, Requires<[Not64BitMode]>;
1355 let Constraints = "$src = $dst", SchedRW = [WriteBSWAP32] in {
1356 // This instruction is a consequence of BSWAP32r observing operand size. The
1357 // encoding is valid, but the behavior is undefined.
1358 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
1359 def BSWAP16r_BAD : I<0xC8, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
1360 "bswap{w}\t$dst", []>, OpSize16, TB;
1361 // GR32 = bswap GR32
1362 def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
1364 [(set GR32:$dst, (bswap GR32:$src))]>, OpSize32, TB;
1366 let SchedRW = [WriteBSWAP64] in
1367 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
1369 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
1370 } // Constraints = "$src = $dst", SchedRW
1372 // Bit scan instructions.
1373 let Defs = [EFLAGS] in {
1374 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1375 "bsf{w}\t{$src, $dst|$dst, $src}",
1376 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
1377 PS, OpSize16, Sched<[WriteBSF]>;
1378 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1379 "bsf{w}\t{$src, $dst|$dst, $src}",
1380 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
1381 PS, OpSize16, Sched<[WriteBSFLd]>;
1382 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1383 "bsf{l}\t{$src, $dst|$dst, $src}",
1384 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
1385 PS, OpSize32, Sched<[WriteBSF]>;
1386 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1387 "bsf{l}\t{$src, $dst|$dst, $src}",
1388 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
1389 PS, OpSize32, Sched<[WriteBSFLd]>;
1390 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1391 "bsf{q}\t{$src, $dst|$dst, $src}",
1392 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
1393 PS, Sched<[WriteBSF]>;
1394 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1395 "bsf{q}\t{$src, $dst|$dst, $src}",
1396 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
1397 PS, Sched<[WriteBSFLd]>;
1399 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1400 "bsr{w}\t{$src, $dst|$dst, $src}",
1401 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
1402 PS, OpSize16, Sched<[WriteBSR]>;
1403 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1404 "bsr{w}\t{$src, $dst|$dst, $src}",
1405 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
1406 PS, OpSize16, Sched<[WriteBSRLd]>;
1407 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1408 "bsr{l}\t{$src, $dst|$dst, $src}",
1409 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
1410 PS, OpSize32, Sched<[WriteBSR]>;
1411 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1412 "bsr{l}\t{$src, $dst|$dst, $src}",
1413 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
1414 PS, OpSize32, Sched<[WriteBSRLd]>;
1415 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1416 "bsr{q}\t{$src, $dst|$dst, $src}",
1417 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
1418 PS, Sched<[WriteBSR]>;
1419 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1420 "bsr{q}\t{$src, $dst|$dst, $src}",
1421 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
1422 PS, Sched<[WriteBSRLd]>;
1423 } // Defs = [EFLAGS]
1425 let SchedRW = [WriteMicrocoded] in {
1426 let Defs = [EDI,ESI], Uses = [EDI,ESI,DF] in {
1427 def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1428 "movsb\t{$src, $dst|$dst, $src}", []>;
1429 def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1430 "movsw\t{$src, $dst|$dst, $src}", []>, OpSize16;
1431 def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1432 "movs{l|d}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1433 def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1434 "movsq\t{$src, $dst|$dst, $src}", []>,
1435 Requires<[In64BitMode]>;
1438 let Defs = [EDI], Uses = [AL,EDI,DF] in
1439 def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
1440 "stosb\t{%al, $dst|$dst, al}", []>;
1441 let Defs = [EDI], Uses = [AX,EDI,DF] in
1442 def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
1443 "stosw\t{%ax, $dst|$dst, ax}", []>, OpSize16;
1444 let Defs = [EDI], Uses = [EAX,EDI,DF] in
1445 def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
1446 "stos{l|d}\t{%eax, $dst|$dst, eax}", []>, OpSize32;
1447 let Defs = [RDI], Uses = [RAX,RDI,DF] in
1448 def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
1449 "stosq\t{%rax, $dst|$dst, rax}", []>,
1450 Requires<[In64BitMode]>;
1452 let Defs = [EDI,EFLAGS], Uses = [AL,EDI,DF] in
1453 def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
1454 "scasb\t{$dst, %al|al, $dst}", []>;
1455 let Defs = [EDI,EFLAGS], Uses = [AX,EDI,DF] in
1456 def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
1457 "scasw\t{$dst, %ax|ax, $dst}", []>, OpSize16;
1458 let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,DF] in
1459 def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
1460 "scas{l|d}\t{$dst, %eax|eax, $dst}", []>, OpSize32;
1461 let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,DF] in
1462 def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
1463 "scasq\t{$dst, %rax|rax, $dst}", []>,
1464 Requires<[In64BitMode]>;
1466 let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,DF] in {
1467 def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1468 "cmpsb\t{$dst, $src|$src, $dst}", []>;
1469 def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1470 "cmpsw\t{$dst, $src|$src, $dst}", []>, OpSize16;
1471 def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1472 "cmps{l|d}\t{$dst, $src|$src, $dst}", []>, OpSize32;
1473 def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1474 "cmpsq\t{$dst, $src|$src, $dst}", []>,
1475 Requires<[In64BitMode]>;
1479 //===----------------------------------------------------------------------===//
1480 // Move Instructions.
1482 let SchedRW = [WriteMove] in {
1483 let hasSideEffects = 0, isMoveReg = 1 in {
1484 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
1485 "mov{b}\t{$src, $dst|$dst, $src}", []>;
1486 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1487 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
1488 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1489 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1490 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1491 "mov{q}\t{$src, $dst|$dst, $src}", []>;
1494 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
1495 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
1496 "mov{b}\t{$src, $dst|$dst, $src}",
1497 [(set GR8:$dst, imm:$src)]>;
1498 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
1499 "mov{w}\t{$src, $dst|$dst, $src}",
1500 [(set GR16:$dst, imm:$src)]>, OpSize16;
1501 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
1502 "mov{l}\t{$src, $dst|$dst, $src}",
1503 [(set GR32:$dst, relocImm:$src)]>, OpSize32;
1504 def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
1505 "mov{q}\t{$src, $dst|$dst, $src}",
1506 [(set GR64:$dst, i64immSExt32:$src)]>;
1508 let isReMaterializable = 1 in {
1509 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
1510 "movabs{q}\t{$src, $dst|$dst, $src}",
1511 [(set GR64:$dst, relocImm:$src)]>;
1514 // Longer forms that use a ModR/M byte. Needed for disassembler
1515 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1516 def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
1517 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1518 FoldGenData<"MOV8ri">;
1519 def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
1520 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1521 FoldGenData<"MOV16ri">;
1522 def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
1523 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1524 FoldGenData<"MOV32ri">;
1528 let SchedRW = [WriteStore] in {
1529 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
1530 "mov{b}\t{$src, $dst|$dst, $src}",
1531 [(store (i8 imm8_su:$src), addr:$dst)]>;
1532 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
1533 "mov{w}\t{$src, $dst|$dst, $src}",
1534 [(store (i16 imm16_su:$src), addr:$dst)]>, OpSize16;
1535 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
1536 "mov{l}\t{$src, $dst|$dst, $src}",
1537 [(store (i32 imm32_su:$src), addr:$dst)]>, OpSize32;
1538 def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1539 "mov{q}\t{$src, $dst|$dst, $src}",
1540 [(store i64immSExt32_su:$src, addr:$dst)]>,
1541 Requires<[In64BitMode]>;
1544 let hasSideEffects = 0 in {
1546 /// Memory offset versions of moves. The immediate is an address mode sized
1547 /// offset from the segment base.
1548 let SchedRW = [WriteALU] in {
1549 let mayLoad = 1 in {
1551 def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
1552 "mov{b}\t{$src, %al|al, $src}", []>,
1555 def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
1556 "mov{w}\t{$src, %ax|ax, $src}", []>,
1559 def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
1560 "mov{l}\t{$src, %eax|eax, $src}", []>,
1563 def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
1564 "mov{q}\t{$src, %rax|rax, $src}", []>,
1568 def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
1569 "mov{b}\t{$src, %al|al, $src}", []>, AdSize16;
1571 def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
1572 "mov{w}\t{$src, %ax|ax, $src}", []>,
1575 def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
1576 "mov{l}\t{$src, %eax|eax, $src}", []>,
1579 let mayStore = 1 in {
1581 def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
1582 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize32;
1584 def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
1585 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1588 def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
1589 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1592 def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
1593 "mov{q}\t{%rax, $dst|$dst, rax}", []>,
1597 def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
1598 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize16;
1600 def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
1601 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1604 def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
1605 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1609 // These forms all have full 64-bit absolute addresses in their instructions
1610 // and use the movabs mnemonic to indicate this specific form.
1611 let mayLoad = 1 in {
1613 def MOV8ao64 : Ii64<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
1614 "movabs{b}\t{$src, %al|al, $src}", []>,
1617 def MOV16ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
1618 "movabs{w}\t{$src, %ax|ax, $src}", []>,
1621 def MOV32ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
1622 "movabs{l}\t{$src, %eax|eax, $src}", []>,
1625 def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
1626 "movabs{q}\t{$src, %rax|rax, $src}", []>,
1630 let mayStore = 1 in {
1632 def MOV8o64a : Ii64<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
1633 "movabs{b}\t{%al, $dst|$dst, al}", []>,
1636 def MOV16o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
1637 "movabs{w}\t{%ax, $dst|$dst, ax}", []>,
1640 def MOV32o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
1641 "movabs{l}\t{%eax, $dst|$dst, eax}", []>,
1644 def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
1645 "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
1649 } // hasSideEffects = 0
1651 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
1652 SchedRW = [WriteMove], isMoveReg = 1 in {
1653 def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
1654 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1655 FoldGenData<"MOV8rr">;
1656 def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1657 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1658 FoldGenData<"MOV16rr">;
1659 def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1660 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1661 FoldGenData<"MOV32rr">;
1662 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1663 "mov{q}\t{$src, $dst|$dst, $src}", []>,
1664 FoldGenData<"MOV64rr">;
1667 // Reversed version with ".s" suffix for GAS compatibility.
1668 def : InstAlias<"mov{b}.s\t{$src, $dst|$dst, $src}",
1669 (MOV8rr_REV GR8:$dst, GR8:$src), 0>;
1670 def : InstAlias<"mov{w}.s\t{$src, $dst|$dst, $src}",
1671 (MOV16rr_REV GR16:$dst, GR16:$src), 0>;
1672 def : InstAlias<"mov{l}.s\t{$src, $dst|$dst, $src}",
1673 (MOV32rr_REV GR32:$dst, GR32:$src), 0>;
1674 def : InstAlias<"mov{q}.s\t{$src, $dst|$dst, $src}",
1675 (MOV64rr_REV GR64:$dst, GR64:$src), 0>;
1676 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1677 (MOV8rr_REV GR8:$dst, GR8:$src), 0, "att">;
1678 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1679 (MOV16rr_REV GR16:$dst, GR16:$src), 0, "att">;
1680 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1681 (MOV32rr_REV GR32:$dst, GR32:$src), 0, "att">;
1682 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1683 (MOV64rr_REV GR64:$dst, GR64:$src), 0, "att">;
1685 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1686 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
1687 "mov{b}\t{$src, $dst|$dst, $src}",
1688 [(set GR8:$dst, (loadi8 addr:$src))]>;
1689 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1690 "mov{w}\t{$src, $dst|$dst, $src}",
1691 [(set GR16:$dst, (loadi16 addr:$src))]>, OpSize16;
1692 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1693 "mov{l}\t{$src, $dst|$dst, $src}",
1694 [(set GR32:$dst, (loadi32 addr:$src))]>, OpSize32;
1695 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1696 "mov{q}\t{$src, $dst|$dst, $src}",
1697 [(set GR64:$dst, (load addr:$src))]>;
1700 let SchedRW = [WriteStore] in {
1701 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
1702 "mov{b}\t{$src, $dst|$dst, $src}",
1703 [(store GR8:$src, addr:$dst)]>;
1704 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1705 "mov{w}\t{$src, $dst|$dst, $src}",
1706 [(store GR16:$src, addr:$dst)]>, OpSize16;
1707 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1708 "mov{l}\t{$src, $dst|$dst, $src}",
1709 [(store GR32:$src, addr:$dst)]>, OpSize32;
1710 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1711 "mov{q}\t{$src, $dst|$dst, $src}",
1712 [(store GR64:$src, addr:$dst)]>;
1715 // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
1716 // that they can be used for copying and storing h registers, which can't be
1717 // encoded when a REX prefix is present.
1718 let isCodeGenOnly = 1 in {
1719 let hasSideEffects = 0, isMoveReg = 1 in
1720 def MOV8rr_NOREX : I<0x88, MRMDestReg,
1721 (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
1722 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1724 let mayStore = 1, hasSideEffects = 0 in
1725 def MOV8mr_NOREX : I<0x88, MRMDestMem,
1726 (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
1727 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1728 Sched<[WriteStore]>;
1729 let mayLoad = 1, hasSideEffects = 0,
1730 canFoldAsLoad = 1, isReMaterializable = 1 in
1731 def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
1732 (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
1733 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1738 // Condition code ops, incl. set if equal/not equal/...
1739 let SchedRW = [WriteLAHFSAHF] in {
1740 let Defs = [EFLAGS], Uses = [AH] in
1741 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf",
1742 [(set EFLAGS, (X86sahf AH))]>,
1743 Requires<[HasLAHFSAHF]>;
1744 let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
1745 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>, // AH = flags
1746 Requires<[HasLAHFSAHF]>;
1749 //===----------------------------------------------------------------------===//
1750 // Bit tests instructions: BT, BTS, BTR, BTC.
1752 let Defs = [EFLAGS] in {
1753 let SchedRW = [WriteBitTest] in {
1754 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1755 "bt{w}\t{$src2, $src1|$src1, $src2}",
1756 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>,
1757 OpSize16, TB, NotMemoryFoldable;
1758 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1759 "bt{l}\t{$src2, $src1|$src1, $src2}",
1760 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>,
1761 OpSize32, TB, NotMemoryFoldable;
1762 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1763 "bt{q}\t{$src2, $src1|$src1, $src2}",
1764 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB,
1768 // Unlike with the register+register form, the memory+register form of the
1769 // bt instruction does not ignore the high bits of the index. From ISel's
1770 // perspective, this is pretty bizarre. Make these instructions disassembly
1771 // only for now. These instructions are also slow on modern CPUs so that's
1772 // another reason to avoid generating them.
1774 let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteALULd] in {
1775 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1776 "bt{w}\t{$src2, $src1|$src1, $src2}",
1777 []>, OpSize16, TB, NotMemoryFoldable;
1778 def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1779 "bt{l}\t{$src2, $src1|$src1, $src2}",
1780 []>, OpSize32, TB, NotMemoryFoldable;
1781 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1782 "bt{q}\t{$src2, $src1|$src1, $src2}",
1783 []>, TB, NotMemoryFoldable;
1786 let SchedRW = [WriteBitTest] in {
1787 def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
1788 "bt{w}\t{$src2, $src1|$src1, $src2}",
1789 [(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))]>,
1791 def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
1792 "bt{l}\t{$src2, $src1|$src1, $src2}",
1793 [(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))]>,
1795 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1796 "bt{q}\t{$src2, $src1|$src1, $src2}",
1797 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
1800 // Note that these instructions aren't slow because that only applies when the
1801 // other operand is in a register. When it's an immediate, bt is still fast.
1802 let SchedRW = [WriteALU] in {
1803 def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1804 "bt{w}\t{$src2, $src1|$src1, $src2}",
1805 [(set EFLAGS, (X86bt (loadi16 addr:$src1),
1806 i16immSExt8:$src2))]>,
1808 def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1809 "bt{l}\t{$src2, $src1|$src1, $src2}",
1810 [(set EFLAGS, (X86bt (loadi32 addr:$src1),
1811 i32immSExt8:$src2))]>,
1813 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1814 "bt{q}\t{$src2, $src1|$src1, $src2}",
1815 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1816 i64immSExt8:$src2))]>, TB,
1817 Requires<[In64BitMode]>;
1820 let hasSideEffects = 0 in {
1821 let SchedRW = [WriteBitTest], Constraints = "$src1 = $dst" in {
1822 def BTC16rr : I<0xBB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1823 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1824 OpSize16, TB, NotMemoryFoldable;
1825 def BTC32rr : I<0xBB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1826 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1827 OpSize32, TB, NotMemoryFoldable;
1828 def BTC64rr : RI<0xBB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1829 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1833 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1834 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1835 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1836 OpSize16, TB, NotMemoryFoldable;
1837 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1838 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1839 OpSize32, TB, NotMemoryFoldable;
1840 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1841 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1845 let SchedRW = [WriteBitTest], Constraints = "$src1 = $dst" in {
1846 def BTC16ri8 : Ii8<0xBA, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1847 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1848 def BTC32ri8 : Ii8<0xBA, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1849 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1850 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1851 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1854 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1855 def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1856 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1857 def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1858 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1859 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1860 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1861 Requires<[In64BitMode]>;
1864 let SchedRW = [WriteBitTest], Constraints = "$src1 = $dst" in {
1865 def BTR16rr : I<0xB3, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1866 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1867 OpSize16, TB, NotMemoryFoldable;
1868 def BTR32rr : I<0xB3, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1869 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1870 OpSize32, TB, NotMemoryFoldable;
1871 def BTR64rr : RI<0xB3, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1872 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1876 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1877 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1878 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1879 OpSize16, TB, NotMemoryFoldable;
1880 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1881 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1882 OpSize32, TB, NotMemoryFoldable;
1883 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1884 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1888 let SchedRW = [WriteBitTest], Constraints = "$src1 = $dst" in {
1889 def BTR16ri8 : Ii8<0xBA, MRM6r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1890 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1892 def BTR32ri8 : Ii8<0xBA, MRM6r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1893 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1895 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1896 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1899 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1900 def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1901 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1903 def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1904 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1906 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1907 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1908 Requires<[In64BitMode]>;
1911 let SchedRW = [WriteBitTest], Constraints = "$src1 = $dst" in {
1912 def BTS16rr : I<0xAB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1913 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
1914 OpSize16, TB, NotMemoryFoldable;
1915 def BTS32rr : I<0xAB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1916 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
1917 OpSize32, TB, NotMemoryFoldable;
1918 def BTS64rr : RI<0xAB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1919 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1923 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1924 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1925 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
1926 OpSize16, TB, NotMemoryFoldable;
1927 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1928 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
1929 OpSize32, TB, NotMemoryFoldable;
1930 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1931 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1935 let SchedRW = [WriteBitTest], Constraints = "$src1 = $dst" in {
1936 def BTS16ri8 : Ii8<0xBA, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1937 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1938 def BTS32ri8 : Ii8<0xBA, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1939 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1940 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1941 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1944 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1945 def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1946 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1947 def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1948 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1949 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1950 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1951 Requires<[In64BitMode]>;
1953 } // hasSideEffects = 0
1954 } // Defs = [EFLAGS]
1957 //===----------------------------------------------------------------------===//
1961 // Atomic swap. These are just normal xchg instructions. But since a memory
1962 // operand is referenced, the atomicity is ensured.
1963 multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag> {
1964 let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in {
1965 def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
1966 (ins GR8:$val, i8mem:$ptr),
1967 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
1970 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
1971 def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
1972 (ins GR16:$val, i16mem:$ptr),
1973 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
1976 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
1978 def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
1979 (ins GR32:$val, i32mem:$ptr),
1980 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
1983 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
1985 def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
1986 (ins GR64:$val, i64mem:$ptr),
1987 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
1990 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
1994 defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">, NotMemoryFoldable;
1996 // Swap between registers.
1997 let SchedRW = [WriteALU] in {
1998 let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in {
1999 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2),
2000 (ins GR8:$src1, GR8:$src2),
2001 "xchg{b}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2002 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2),
2003 (ins GR16:$src1, GR16:$src2),
2004 "xchg{w}\t{$src2, $src1|$src1, $src2}", []>,
2005 OpSize16, NotMemoryFoldable;
2006 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2),
2007 (ins GR32:$src1, GR32:$src2),
2008 "xchg{l}\t{$src2, $src1|$src1, $src2}", []>,
2009 OpSize32, NotMemoryFoldable;
2010 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2),
2011 (ins GR64:$src1 ,GR64:$src2),
2012 "xchg{q}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2015 // Swap between EAX and other registers.
2016 let Constraints = "$src = $dst", hasSideEffects = 0 in {
2017 let Uses = [AX], Defs = [AX] in
2018 def XCHG16ar : I<0x90, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
2019 "xchg{w}\t{$src, %ax|ax, $src}", []>, OpSize16;
2020 let Uses = [EAX], Defs = [EAX] in
2021 def XCHG32ar : I<0x90, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
2022 "xchg{l}\t{$src, %eax|eax, $src}", []>, OpSize32;
2023 let Uses = [RAX], Defs = [RAX] in
2024 def XCHG64ar : RI<0x90, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
2025 "xchg{q}\t{$src, %rax|rax, $src}", []>;
2029 let hasSideEffects = 0, Constraints = "$src1 = $dst1, $src2 = $dst2",
2030 Defs = [EFLAGS], SchedRW = [WriteALU] in {
2031 def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst1, GR8:$dst2),
2032 (ins GR8:$src1, GR8:$src2),
2033 "xadd{b}\t{$src2, $src1|$src1, $src2}", []>, TB;
2034 def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst1, GR16:$dst2),
2035 (ins GR16:$src1, GR16:$src2),
2036 "xadd{w}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize16;
2037 def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst1, GR32:$dst2),
2038 (ins GR32:$src1, GR32:$src2),
2039 "xadd{l}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize32;
2040 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst1, GR64:$dst2),
2041 (ins GR64:$src1, GR64:$src2),
2042 "xadd{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2045 let mayLoad = 1, mayStore = 1, hasSideEffects = 0, Constraints = "$val = $dst",
2046 Defs = [EFLAGS], SchedRW = [WriteALULd, WriteRMW] in {
2047 def XADD8rm : I<0xC0, MRMSrcMem, (outs GR8:$dst),
2048 (ins GR8:$val, i8mem:$ptr),
2049 "xadd{b}\t{$val, $ptr|$ptr, $val}", []>, TB;
2050 def XADD16rm : I<0xC1, MRMSrcMem, (outs GR16:$dst),
2051 (ins GR16:$val, i16mem:$ptr),
2052 "xadd{w}\t{$val, $ptr|$ptr, $val}", []>, TB,
2054 def XADD32rm : I<0xC1, MRMSrcMem, (outs GR32:$dst),
2055 (ins GR32:$val, i32mem:$ptr),
2056 "xadd{l}\t{$val, $ptr|$ptr, $val}", []>, TB,
2058 def XADD64rm : RI<0xC1, MRMSrcMem, (outs GR64:$dst),
2059 (ins GR64:$val, i64mem:$ptr),
2060 "xadd{q}\t{$val, $ptr|$ptr, $val}", []>, TB;
2064 let SchedRW = [WriteALU], hasSideEffects = 0 in {
2065 let Defs = [AL, EFLAGS], Uses = [AL] in
2066 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
2067 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2069 let Defs = [AX, EFLAGS], Uses = [AX] in
2070 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2071 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2073 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2074 def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
2075 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2077 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2078 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
2079 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2081 } // SchedRW, hasSideEffects
2083 let SchedRW = [WriteALULd, WriteRMW], mayLoad = 1, mayStore = 1,
2084 hasSideEffects = 0 in {
2085 let Defs = [AL, EFLAGS], Uses = [AL] in
2086 def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
2087 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2089 let Defs = [AX, EFLAGS], Uses = [AX] in
2090 def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2091 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2093 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2094 def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2095 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2097 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2098 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2099 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2102 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
2103 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
2104 "cmpxchg8b\t$dst", []>, TB;
2106 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
2107 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
2108 "cmpxchg16b\t$dst", []>,
2109 TB, Requires<[HasCmpxchg16b, In64BitMode]>;
2110 } // SchedRW, mayLoad, mayStore, hasSideEffects
2113 // Lock instruction prefix
2114 let SchedRW = [WriteMicrocoded] in
2115 def LOCK_PREFIX : I<0xF0, RawFrm, (outs), (ins), "lock", []>;
2117 let SchedRW = [WriteNop] in {
2119 // Rex64 instruction prefix
2120 def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>,
2121 Requires<[In64BitMode]>;
2123 // Data16 instruction prefix
2124 def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>;
2127 // Repeat string operation instruction prefixes
2128 let Defs = [ECX], Uses = [ECX,DF], SchedRW = [WriteMicrocoded] in {
2129 // Repeat (used with INS, OUTS, MOVS, LODS and STOS)
2130 def REP_PREFIX : I<0xF3, RawFrm, (outs), (ins), "rep", []>;
2131 // Repeat while not equal (used with CMPS and SCAS)
2132 def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>;
2135 // String manipulation instructions
2136 let SchedRW = [WriteMicrocoded] in {
2137 let Defs = [AL,ESI], Uses = [ESI,DF] in
2138 def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
2139 "lodsb\t{$src, %al|al, $src}", []>;
2140 let Defs = [AX,ESI], Uses = [ESI,DF] in
2141 def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
2142 "lodsw\t{$src, %ax|ax, $src}", []>, OpSize16;
2143 let Defs = [EAX,ESI], Uses = [ESI,DF] in
2144 def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
2145 "lods{l|d}\t{$src, %eax|eax, $src}", []>, OpSize32;
2146 let Defs = [RAX,ESI], Uses = [ESI,DF] in
2147 def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
2148 "lodsq\t{$src, %rax|rax, $src}", []>,
2149 Requires<[In64BitMode]>;
2152 let SchedRW = [WriteSystem] in {
2153 let Defs = [ESI], Uses = [DX,ESI,DF] in {
2154 def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
2155 "outsb\t{$src, %dx|dx, $src}", []>;
2156 def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
2157 "outsw\t{$src, %dx|dx, $src}", []>, OpSize16;
2158 def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
2159 "outs{l|d}\t{$src, %dx|dx, $src}", []>, OpSize32;
2162 let Defs = [EDI], Uses = [DX,EDI,DF] in {
2163 def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
2164 "insb\t{%dx, $dst|$dst, dx}", []>;
2165 def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
2166 "insw\t{%dx, $dst|$dst, dx}", []>, OpSize16;
2167 def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
2168 "ins{l|d}\t{%dx, $dst|$dst, dx}", []>, OpSize32;
2172 // EFLAGS management instructions.
2173 let SchedRW = [WriteALU], Defs = [EFLAGS], Uses = [EFLAGS] in {
2174 def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
2175 def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
2176 def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
2179 // DF management instructions.
2180 let SchedRW = [WriteALU], Defs = [DF] in {
2181 def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
2182 def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
2185 // Table lookup instructions
2186 let Uses = [AL,EBX], Defs = [AL], hasSideEffects = 0, mayLoad = 1 in
2187 def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>, Sched<[WriteLoad]>;
2189 let SchedRW = [WriteMicrocoded] in {
2190 // ASCII Adjust After Addition
2191 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2192 def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>,
2193 Requires<[Not64BitMode]>;
2195 // ASCII Adjust AX Before Division
2196 let Uses = [AX], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2197 def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
2198 "aad\t$src", []>, Requires<[Not64BitMode]>;
2200 // ASCII Adjust AX After Multiply
2201 let Uses = [AL], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2202 def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
2203 "aam\t$src", []>, Requires<[Not64BitMode]>;
2205 // ASCII Adjust AL After Subtraction - sets
2206 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2207 def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>,
2208 Requires<[Not64BitMode]>;
2210 // Decimal Adjust AL after Addition
2211 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2212 def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>,
2213 Requires<[Not64BitMode]>;
2215 // Decimal Adjust AL after Subtraction
2216 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2217 def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>,
2218 Requires<[Not64BitMode]>;
2221 let SchedRW = [WriteSystem] in {
2222 // Check Array Index Against Bounds
2223 // Note: "bound" does not have reversed operands in at&t syntax.
2224 def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2225 "bound\t$dst, $src", []>, OpSize16,
2226 Requires<[Not64BitMode]>;
2227 def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2228 "bound\t$dst, $src", []>, OpSize32,
2229 Requires<[Not64BitMode]>;
2231 // Adjust RPL Field of Segment Selector
2232 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2233 "arpl\t{$src, $dst|$dst, $src}", []>,
2234 Requires<[Not64BitMode]>, NotMemoryFoldable;
2236 def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2237 "arpl\t{$src, $dst|$dst, $src}", []>,
2238 Requires<[Not64BitMode]>, NotMemoryFoldable;
2241 //===----------------------------------------------------------------------===//
2242 // MOVBE Instructions
2244 let Predicates = [HasMOVBE] in {
2245 let SchedRW = [WriteALULd] in {
2246 def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2247 "movbe{w}\t{$src, $dst|$dst, $src}",
2248 [(set GR16:$dst, (bswap (loadi16 addr:$src)))]>,
2250 def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2251 "movbe{l}\t{$src, $dst|$dst, $src}",
2252 [(set GR32:$dst, (bswap (loadi32 addr:$src)))]>,
2254 def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2255 "movbe{q}\t{$src, $dst|$dst, $src}",
2256 [(set GR64:$dst, (bswap (loadi64 addr:$src)))]>,
2259 let SchedRW = [WriteStore] in {
2260 def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2261 "movbe{w}\t{$src, $dst|$dst, $src}",
2262 [(store (bswap GR16:$src), addr:$dst)]>,
2264 def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2265 "movbe{l}\t{$src, $dst|$dst, $src}",
2266 [(store (bswap GR32:$src), addr:$dst)]>,
2268 def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2269 "movbe{q}\t{$src, $dst|$dst, $src}",
2270 [(store (bswap GR64:$src), addr:$dst)]>,
2275 //===----------------------------------------------------------------------===//
2276 // RDRAND Instruction
2278 let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2279 def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
2280 "rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
2282 def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
2283 "rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
2285 def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
2286 "rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
2290 //===----------------------------------------------------------------------===//
2291 // RDSEED Instruction
2293 let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2294 def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
2295 [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, PS;
2296 def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
2297 [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, PS;
2298 def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
2299 [(set GR64:$dst, EFLAGS, (X86rdseed))]>, PS;
2302 //===----------------------------------------------------------------------===//
2303 // LZCNT Instruction
2305 let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
2306 def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2307 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2308 [(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>,
2309 XS, OpSize16, Sched<[WriteLZCNT]>;
2310 def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2311 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2312 [(set GR16:$dst, (ctlz (loadi16 addr:$src))),
2313 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteLZCNTLd]>;
2315 def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2316 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2317 [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>,
2318 XS, OpSize32, Sched<[WriteLZCNT]>;
2319 def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2320 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2321 [(set GR32:$dst, (ctlz (loadi32 addr:$src))),
2322 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteLZCNTLd]>;
2324 def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2325 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2326 [(set GR64:$dst, (ctlz GR64:$src)), (implicit EFLAGS)]>,
2327 XS, Sched<[WriteLZCNT]>;
2328 def LZCNT64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2329 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2330 [(set GR64:$dst, (ctlz (loadi64 addr:$src))),
2331 (implicit EFLAGS)]>, XS, Sched<[WriteLZCNTLd]>;
2334 //===----------------------------------------------------------------------===//
2337 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2338 def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2339 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2340 [(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>,
2341 XS, OpSize16, Sched<[WriteTZCNT]>;
2342 def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2343 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2344 [(set GR16:$dst, (cttz (loadi16 addr:$src))),
2345 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteTZCNTLd]>;
2347 def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2348 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2349 [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>,
2350 XS, OpSize32, Sched<[WriteTZCNT]>;
2351 def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2352 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2353 [(set GR32:$dst, (cttz (loadi32 addr:$src))),
2354 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteTZCNTLd]>;
2356 def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2357 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2358 [(set GR64:$dst, (cttz GR64:$src)), (implicit EFLAGS)]>,
2359 XS, Sched<[WriteTZCNT]>;
2360 def TZCNT64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2361 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2362 [(set GR64:$dst, (cttz (loadi64 addr:$src))),
2363 (implicit EFLAGS)]>, XS, Sched<[WriteTZCNTLd]>;
2366 multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
2367 RegisterClass RC, X86MemOperand x86memop> {
2368 let hasSideEffects = 0 in {
2369 def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
2370 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2371 T8PS, VEX_4V, Sched<[WriteALU]>;
2373 def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
2374 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2375 T8PS, VEX_4V, Sched<[WriteALULd]>;
2379 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2380 defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem>;
2381 defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem>, VEX_W;
2382 defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem>;
2383 defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem>, VEX_W;
2384 defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem>;
2385 defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem>, VEX_W;
2388 //===----------------------------------------------------------------------===//
2389 // Pattern fragments to auto generate BMI instructions.
2390 //===----------------------------------------------------------------------===//
2392 let Predicates = [HasBMI] in {
2393 // FIXME: patterns for the load versions are not implemented
2394 def : Pat<(and GR32:$src, (add GR32:$src, -1)),
2395 (BLSR32rr GR32:$src)>;
2396 def : Pat<(and GR64:$src, (add GR64:$src, -1)),
2397 (BLSR64rr GR64:$src)>;
2399 def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
2400 (BLSMSK32rr GR32:$src)>;
2401 def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
2402 (BLSMSK64rr GR64:$src)>;
2404 def : Pat<(and GR32:$src, (ineg GR32:$src)),
2405 (BLSI32rr GR32:$src)>;
2406 def : Pat<(and GR64:$src, (ineg GR64:$src)),
2407 (BLSI64rr GR64:$src)>;
2410 multiclass bmi_bextr<bits<8> opc, string mnemonic, RegisterClass RC,
2411 X86MemOperand x86memop, SDNode OpNode,
2412 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2413 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2414 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2415 [(set RC:$dst, (OpNode RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2416 T8PS, VEX, Sched<[Sched]>;
2417 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2418 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2419 [(set RC:$dst, (OpNode (ld_frag addr:$src1), RC:$src2)),
2420 (implicit EFLAGS)]>, T8PS, VEX,
2421 Sched<[Sched.Folded,
2423 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2429 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2430 defm BEXTR32 : bmi_bextr<0xF7, "bextr{l}", GR32, i32mem,
2431 X86bextr, loadi32, WriteBEXTR>;
2432 defm BEXTR64 : bmi_bextr<0xF7, "bextr{q}", GR64, i64mem,
2433 X86bextr, loadi64, WriteBEXTR>, VEX_W;
2436 multiclass bmi_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
2437 X86MemOperand x86memop, Intrinsic Int,
2438 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2439 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2440 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2441 [(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2442 T8PS, VEX, Sched<[Sched]>;
2443 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2444 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2445 [(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
2446 (implicit EFLAGS)]>, T8PS, VEX,
2447 Sched<[Sched.Folded,
2449 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2455 let Predicates = [HasBMI2], Defs = [EFLAGS] in {
2456 defm BZHI32 : bmi_bzhi<0xF5, "bzhi{l}", GR32, i32mem,
2457 int_x86_bmi_bzhi_32, loadi32, WriteBZHI>;
2458 defm BZHI64 : bmi_bzhi<0xF5, "bzhi{q}", GR64, i64mem,
2459 int_x86_bmi_bzhi_64, loadi64, WriteBZHI>, VEX_W;
2462 def CountTrailingOnes : SDNodeXForm<imm, [{
2463 // Count the trailing ones in the immediate.
2464 return getI8Imm(countTrailingOnes(N->getZExtValue()), SDLoc(N));
2467 def BEXTRMaskXForm : SDNodeXForm<imm, [{
2468 unsigned Length = countTrailingOnes(N->getZExtValue());
2469 return getI32Imm(Length << 8, SDLoc(N));
2472 def AndMask64 : ImmLeaf<i64, [{
2473 return isMask_64(Imm) && !isUInt<32>(Imm);
2476 // Use BEXTR for 64-bit 'and' with large immediate 'mask'.
2477 let Predicates = [HasBMI, NoBMI2, NoTBM] in {
2478 def : Pat<(and GR64:$src, AndMask64:$mask),
2479 (BEXTR64rr GR64:$src,
2480 (SUBREG_TO_REG (i64 0),
2481 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2482 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2483 (BEXTR64rm addr:$src,
2484 (SUBREG_TO_REG (i64 0),
2485 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2488 // Use BZHI for 64-bit 'and' with large immediate 'mask'.
2489 let Predicates = [HasBMI2, NoTBM] in {
2490 def : Pat<(and GR64:$src, AndMask64:$mask),
2491 (BZHI64rr GR64:$src,
2492 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2493 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2494 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2495 (BZHI64rm addr:$src,
2496 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2497 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2500 let Predicates = [HasBMI2] in {
2501 multiclass _bmi_bzhi_pattern<dag regpattern, dag mempattern, RegisterClass RC,
2502 ValueType VT, Instruction DstInst,
2503 Instruction DstMemInst> {
2504 def : Pat<regpattern,
2506 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
2507 def : Pat<mempattern,
2508 (DstMemInst addr:$src,
2509 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
2512 multiclass bmi_bzhi_patterns<RegisterClass RC, int bitwidth, ValueType VT,
2513 Instruction DstInst, X86MemOperand x86memop,
2514 Instruction DstMemInst> {
2515 // x & ((1 << y) - 1)
2516 defm : _bmi_bzhi_pattern<(and RC:$src, (add (shl 1, GR8:$lz), -1)),
2517 (and (x86memop addr:$src),
2518 (add (shl 1, GR8:$lz), -1)),
2519 RC, VT, DstInst, DstMemInst>;
2522 defm : _bmi_bzhi_pattern<(and RC:$src, (xor (shl -1, GR8:$lz), -1)),
2523 (and (x86memop addr:$src),
2524 (xor (shl -1, GR8:$lz), -1)),
2525 RC, VT, DstInst, DstMemInst>;
2527 // x & (-1 >> (bitwidth - y))
2528 defm : _bmi_bzhi_pattern<(and RC:$src, (srl -1, (sub bitwidth, GR8:$lz))),
2529 (and (x86memop addr:$src),
2530 (srl -1, (sub bitwidth, GR8:$lz))),
2531 RC, VT, DstInst, DstMemInst>;
2533 // x << (bitwidth - y) >> (bitwidth - y)
2534 defm : _bmi_bzhi_pattern<(srl (shl RC:$src, (sub bitwidth, GR8:$lz)),
2535 (sub bitwidth, GR8:$lz)),
2536 (srl (shl (x86memop addr:$src),
2537 (sub bitwidth, GR8:$lz)),
2538 (sub bitwidth, GR8:$lz)),
2539 RC, VT, DstInst, DstMemInst>;
2542 defm : bmi_bzhi_patterns<GR32, 32, i32, BZHI32rr, loadi32, BZHI32rm>;
2543 defm : bmi_bzhi_patterns<GR64, 64, i64, BZHI64rr, loadi64, BZHI64rm>;
2545 // x & (-1 >> (32 - y))
2546 def : Pat<(and GR32:$src, (srl -1, (i8 (trunc (sub 32, GR32:$lz))))),
2547 (BZHI32rr GR32:$src, GR32:$lz)>;
2548 def : Pat<(and (loadi32 addr:$src), (srl -1, (i8 (trunc (sub 32, GR32:$lz))))),
2549 (BZHI32rm addr:$src, GR32:$lz)>;
2551 // x & (-1 >> (64 - y))
2552 def : Pat<(and GR64:$src, (srl -1, (i8 (trunc (sub 64, GR32:$lz))))),
2553 (BZHI64rr GR64:$src,
2554 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$lz, sub_32bit))>;
2555 def : Pat<(and (loadi64 addr:$src), (srl -1, (i8 (trunc (sub 64, GR32:$lz))))),
2556 (BZHI64rm addr:$src,
2557 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$lz, sub_32bit))>;
2559 // x << (32 - y) >> (32 - y)
2560 def : Pat<(srl (shl GR32:$src, (i8 (trunc (sub 32, GR32:$lz)))),
2561 (i8 (trunc (sub 32, GR32:$lz)))),
2562 (BZHI32rr GR32:$src, GR32:$lz)>;
2563 def : Pat<(srl (shl (loadi32 addr:$src), (i8 (trunc (sub 32, GR32:$lz)))),
2564 (i8 (trunc (sub 32, GR32:$lz)))),
2565 (BZHI32rm addr:$src, GR32:$lz)>;
2567 // x << (64 - y) >> (64 - y)
2568 def : Pat<(srl (shl GR64:$src, (i8 (trunc (sub 64, GR32:$lz)))),
2569 (i8 (trunc (sub 64, GR32:$lz)))),
2570 (BZHI64rr GR64:$src,
2571 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$lz, sub_32bit))>;
2572 def : Pat<(srl (shl (loadi64 addr:$src), (i8 (trunc (sub 64, GR32:$lz)))),
2573 (i8 (trunc (sub 64, GR32:$lz)))),
2574 (BZHI64rm addr:$src,
2575 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$lz, sub_32bit))>;
2578 multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
2579 X86MemOperand x86memop, Intrinsic Int,
2581 def rr : I<0xF5, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2582 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2583 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>,
2584 VEX_4V, Sched<[WriteALU]>;
2585 def rm : I<0xF5, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2586 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2587 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))]>,
2588 VEX_4V, Sched<[WriteALULd, ReadAfterLd]>;
2591 let Predicates = [HasBMI2] in {
2592 defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
2593 int_x86_bmi_pdep_32, loadi32>, T8XD;
2594 defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
2595 int_x86_bmi_pdep_64, loadi64>, T8XD, VEX_W;
2596 defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
2597 int_x86_bmi_pext_32, loadi32>, T8XS;
2598 defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
2599 int_x86_bmi_pext_64, loadi64>, T8XS, VEX_W;
2602 //===----------------------------------------------------------------------===//
2605 let Predicates = [HasTBM], Defs = [EFLAGS] in {
2607 multiclass tbm_ternary_imm<bits<8> opc, RegisterClass RC, string OpcodeStr,
2608 X86MemOperand x86memop, PatFrag ld_frag,
2609 SDNode OpNode, Operand immtype,
2610 SDPatternOperator immoperator,
2611 X86FoldableSchedWrite Sched> {
2612 def ri : Ii32<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, immtype:$cntl),
2613 !strconcat(OpcodeStr,
2614 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2615 [(set RC:$dst, (OpNode RC:$src1, immoperator:$cntl))]>,
2616 XOP, XOPA, Sched<[Sched]>;
2617 def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
2618 (ins x86memop:$src1, immtype:$cntl),
2619 !strconcat(OpcodeStr,
2620 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2621 [(set RC:$dst, (OpNode (ld_frag addr:$src1), immoperator:$cntl))]>,
2622 XOP, XOPA, Sched<[Sched.Folded]>;
2625 defm BEXTRI32 : tbm_ternary_imm<0x10, GR32, "bextr{l}", i32mem, loadi32,
2626 X86bextr, i32imm, imm, WriteBEXTR>;
2627 let ImmT = Imm32S in
2628 defm BEXTRI64 : tbm_ternary_imm<0x10, GR64, "bextr{q}", i64mem, loadi64,
2629 X86bextr, i64i32imm,
2630 i64immSExt32, WriteBEXTR>, VEX_W;
2632 multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
2633 RegisterClass RC, string OpcodeStr,
2634 X86MemOperand x86memop, X86FoldableSchedWrite Sched> {
2635 let hasSideEffects = 0 in {
2636 def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
2637 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2638 XOP_4V, XOP9, Sched<[Sched]>;
2640 def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
2641 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2642 XOP_4V, XOP9, Sched<[Sched.Folded]>;
2646 multiclass tbm_binary_intr<bits<8> opc, string OpcodeStr,
2647 X86FoldableSchedWrite Sched,
2648 Format FormReg, Format FormMem> {
2649 defm NAME#32 : tbm_binary_rm<opc, FormReg, FormMem, GR32, OpcodeStr#"{l}",
2651 defm NAME#64 : tbm_binary_rm<opc, FormReg, FormMem, GR64, OpcodeStr#"{q}",
2652 i64mem, Sched>, VEX_W;
2655 defm BLCFILL : tbm_binary_intr<0x01, "blcfill", WriteALU, MRM1r, MRM1m>;
2656 defm BLCI : tbm_binary_intr<0x02, "blci", WriteALU, MRM6r, MRM6m>;
2657 defm BLCIC : tbm_binary_intr<0x01, "blcic", WriteALU, MRM5r, MRM5m>;
2658 defm BLCMSK : tbm_binary_intr<0x02, "blcmsk", WriteALU, MRM1r, MRM1m>;
2659 defm BLCS : tbm_binary_intr<0x01, "blcs", WriteALU, MRM3r, MRM3m>;
2660 defm BLSFILL : tbm_binary_intr<0x01, "blsfill", WriteALU, MRM2r, MRM2m>;
2661 defm BLSIC : tbm_binary_intr<0x01, "blsic", WriteALU, MRM6r, MRM6m>;
2662 defm T1MSKC : tbm_binary_intr<0x01, "t1mskc", WriteALU, MRM7r, MRM7m>;
2663 defm TZMSK : tbm_binary_intr<0x01, "tzmsk", WriteALU, MRM4r, MRM4m>;
2666 // Use BEXTRI for 64-bit 'and' with large immediate 'mask'.
2667 let Predicates = [HasTBM] in {
2668 def : Pat<(and GR64:$src, AndMask64:$mask),
2669 (BEXTRI64ri GR64:$src, (BEXTRMaskXForm imm:$mask))>;
2671 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2672 (BEXTRI64mi addr:$src, (BEXTRMaskXForm imm:$mask))>;
2675 //===----------------------------------------------------------------------===//
2676 // Lightweight Profiling Instructions
2678 let Predicates = [HasLWP], SchedRW = [WriteSystem] in {
2680 def LLWPCB : I<0x12, MRM0r, (outs), (ins GR32:$src), "llwpcb\t$src",
2681 [(int_x86_llwpcb GR32:$src)]>, XOP, XOP9;
2682 def SLWPCB : I<0x12, MRM1r, (outs GR32:$dst), (ins), "slwpcb\t$dst",
2683 [(set GR32:$dst, (int_x86_slwpcb))]>, XOP, XOP9;
2685 def LLWPCB64 : I<0x12, MRM0r, (outs), (ins GR64:$src), "llwpcb\t$src",
2686 [(int_x86_llwpcb GR64:$src)]>, XOP, XOP9, VEX_W;
2687 def SLWPCB64 : I<0x12, MRM1r, (outs GR64:$dst), (ins), "slwpcb\t$dst",
2688 [(set GR64:$dst, (int_x86_slwpcb))]>, XOP, XOP9, VEX_W;
2690 multiclass lwpins_intr<RegisterClass RC> {
2691 def rri : Ii32<0x12, MRM0r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2692 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2693 [(set EFLAGS, (X86lwpins RC:$src0, GR32:$src1, imm:$cntl))]>,
2696 def rmi : Ii32<0x12, MRM0m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2697 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2698 [(set EFLAGS, (X86lwpins RC:$src0, (loadi32 addr:$src1), imm:$cntl))]>,
2702 let Defs = [EFLAGS] in {
2703 defm LWPINS32 : lwpins_intr<GR32>;
2704 defm LWPINS64 : lwpins_intr<GR64>, VEX_W;
2707 multiclass lwpval_intr<RegisterClass RC, Intrinsic Int> {
2708 def rri : Ii32<0x12, MRM1r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2709 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2710 [(Int RC:$src0, GR32:$src1, imm:$cntl)]>, XOP_4V, XOPA;
2712 def rmi : Ii32<0x12, MRM1m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2713 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2714 [(Int RC:$src0, (loadi32 addr:$src1), imm:$cntl)]>,
2718 defm LWPVAL32 : lwpval_intr<GR32, int_x86_lwpval32>;
2719 defm LWPVAL64 : lwpval_intr<GR64, int_x86_lwpval64>, VEX_W;
2721 } // HasLWP, SchedRW
2723 //===----------------------------------------------------------------------===//
2724 // MONITORX/MWAITX Instructions
2726 let SchedRW = [ WriteSystem ] in {
2727 let usesCustomInserter = 1 in {
2728 def MONITORX : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
2729 [(int_x86_monitorx addr:$src1, GR32:$src2, GR32:$src3)]>,
2730 Requires<[ HasMWAITX ]>;
2733 let Uses = [ EAX, ECX, EDX ] in {
2734 def MONITORXrrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2735 TB, Requires<[ HasMWAITX ]>;
2738 let Uses = [ ECX, EAX, EBX ] in {
2739 def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
2740 [(int_x86_mwaitx ECX, EAX, EBX)]>,
2741 TB, Requires<[ HasMWAITX ]>;
2745 def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
2746 Requires<[ Not64BitMode ]>;
2747 def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
2748 Requires<[ In64BitMode ]>;
2750 def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORXrrr)>,
2751 Requires<[ Not64BitMode ]>;
2752 def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORXrrr)>,
2753 Requires<[ In64BitMode ]>;
2755 //===----------------------------------------------------------------------===//
2756 // WAITPKG Instructions
2758 let SchedRW = [WriteSystem] in {
2759 def UMONITOR16 : I<0xAE, MRM6r, (outs), (ins GR16:$src),
2760 "umonitor\t$src", [(int_x86_umonitor GR16:$src)]>,
2761 XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
2762 def UMONITOR32 : I<0xAE, MRM6r, (outs), (ins GR32:$src),
2763 "umonitor\t$src", [(int_x86_umonitor GR32:$src)]>,
2764 XS, AdSize32, Requires<[HasWAITPKG]>;
2765 def UMONITOR64 : I<0xAE, MRM6r, (outs), (ins GR64:$src),
2766 "umonitor\t$src", [(int_x86_umonitor GR64:$src)]>,
2767 XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
2768 let Uses = [EAX, EDX], Defs = [EFLAGS] in {
2769 def UMWAIT : I<0xAE, MRM6r,
2770 (outs), (ins GR32orGR64:$src), "umwait\t$src",
2771 [(set EFLAGS, (X86umwait GR32orGR64:$src, EDX, EAX))]>,
2772 XD, Requires<[HasWAITPKG]>;
2773 def TPAUSE : I<0xAE, MRM6r,
2774 (outs), (ins GR32orGR64:$src), "tpause\t$src",
2775 [(set EFLAGS, (X86tpause GR32orGR64:$src, EDX, EAX))]>,
2776 PD, Requires<[HasWAITPKG]>, NotMemoryFoldable;
2780 //===----------------------------------------------------------------------===//
2781 // MOVDIRI - Move doubleword/quadword as direct store
2783 let SchedRW = [WriteStore] in {
2784 def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2785 "movdiri\t{$src, $dst|$dst, $src}",
2786 [(int_x86_directstore32 addr:$dst, GR32:$src)]>,
2787 T8, Requires<[HasMOVDIRI]>;
2788 def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2789 "movdiri\t{$src, $dst|$dst, $src}",
2790 [(int_x86_directstore64 addr:$dst, GR64:$src)]>,
2791 T8, Requires<[In64BitMode, HasMOVDIRI]>;
2794 //===----------------------------------------------------------------------===//
2795 // MOVDIR64B - Move 64 bytes as direct store
2797 let SchedRW = [WriteStore] in {
2798 def MOVDIR64B16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2799 "movdir64b\t{$src, $dst|$dst, $src}", []>,
2800 T8PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
2801 def MOVDIR64B32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2802 "movdir64b\t{$src, $dst|$dst, $src}",
2803 [(int_x86_movdir64b GR32:$dst, addr:$src)]>,
2804 T8PD, AdSize32, Requires<[HasMOVDIR64B]>;
2805 def MOVDIR64B64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2806 "movdir64b\t{$src, $dst|$dst, $src}",
2807 [(int_x86_movdir64b GR64:$dst, addr:$src)]>,
2808 T8PD, AdSize64, Requires<[HasMOVDIR64B, In64BitMode]>;
2811 //===----------------------------------------------------------------------===//
2812 // CLZERO Instruction
2814 let SchedRW = [WriteSystem] in {
2816 def CLZEROr : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2817 TB, Requires<[HasCLZERO]>;
2819 let usesCustomInserter = 1 in {
2820 def CLZERO : PseudoI<(outs), (ins i32mem:$src1),
2821 [(int_x86_clzero addr:$src1)]>, Requires<[HasCLZERO]>;
2825 def : InstAlias<"clzero\t{%eax|eax}", (CLZEROr)>, Requires<[Not64BitMode]>;
2826 def : InstAlias<"clzero\t{%rax|rax}", (CLZEROr)>, Requires<[In64BitMode]>;
2828 //===----------------------------------------------------------------------===//
2829 // Pattern fragments to auto generate TBM instructions.
2830 //===----------------------------------------------------------------------===//
2832 let Predicates = [HasTBM] in {
2833 // FIXME: patterns for the load versions are not implemented
2834 def : Pat<(and GR32:$src, (add GR32:$src, 1)),
2835 (BLCFILL32rr GR32:$src)>;
2836 def : Pat<(and GR64:$src, (add GR64:$src, 1)),
2837 (BLCFILL64rr GR64:$src)>;
2839 def : Pat<(or GR32:$src, (not (add GR32:$src, 1))),
2840 (BLCI32rr GR32:$src)>;
2841 def : Pat<(or GR64:$src, (not (add GR64:$src, 1))),
2842 (BLCI64rr GR64:$src)>;
2844 // Extra patterns because opt can optimize the above patterns to this.
2845 def : Pat<(or GR32:$src, (sub -2, GR32:$src)),
2846 (BLCI32rr GR32:$src)>;
2847 def : Pat<(or GR64:$src, (sub -2, GR64:$src)),
2848 (BLCI64rr GR64:$src)>;
2850 def : Pat<(and (not GR32:$src), (add GR32:$src, 1)),
2851 (BLCIC32rr GR32:$src)>;
2852 def : Pat<(and (not GR64:$src), (add GR64:$src, 1)),
2853 (BLCIC64rr GR64:$src)>;
2855 def : Pat<(xor GR32:$src, (add GR32:$src, 1)),
2856 (BLCMSK32rr GR32:$src)>;
2857 def : Pat<(xor GR64:$src, (add GR64:$src, 1)),
2858 (BLCMSK64rr GR64:$src)>;
2860 def : Pat<(or GR32:$src, (add GR32:$src, 1)),
2861 (BLCS32rr GR32:$src)>;
2862 def : Pat<(or GR64:$src, (add GR64:$src, 1)),
2863 (BLCS64rr GR64:$src)>;
2865 def : Pat<(or GR32:$src, (add GR32:$src, -1)),
2866 (BLSFILL32rr GR32:$src)>;
2867 def : Pat<(or GR64:$src, (add GR64:$src, -1)),
2868 (BLSFILL64rr GR64:$src)>;
2870 def : Pat<(or (not GR32:$src), (add GR32:$src, -1)),
2871 (BLSIC32rr GR32:$src)>;
2872 def : Pat<(or (not GR64:$src), (add GR64:$src, -1)),
2873 (BLSIC64rr GR64:$src)>;
2875 def : Pat<(or (not GR32:$src), (add GR32:$src, 1)),
2876 (T1MSKC32rr GR32:$src)>;
2877 def : Pat<(or (not GR64:$src), (add GR64:$src, 1)),
2878 (T1MSKC64rr GR64:$src)>;
2880 def : Pat<(and (not GR32:$src), (add GR32:$src, -1)),
2881 (TZMSK32rr GR32:$src)>;
2882 def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
2883 (TZMSK64rr GR64:$src)>;
2886 //===----------------------------------------------------------------------===//
2887 // Memory Instructions
2890 let Predicates = [HasCLFLUSHOPT], SchedRW = [WriteLoad] in
2891 def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
2892 "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
2894 let Predicates = [HasCLWB], SchedRW = [WriteLoad] in
2895 def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
2896 [(int_x86_clwb addr:$src)]>, PD, NotMemoryFoldable;
2898 let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
2899 def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
2900 [(int_x86_cldemote addr:$src)]>, TB;
2902 //===----------------------------------------------------------------------===//
2904 //===----------------------------------------------------------------------===//
2906 include "X86InstrArithmetic.td"
2907 include "X86InstrCMovSetCC.td"
2908 include "X86InstrExtension.td"
2909 include "X86InstrControl.td"
2910 include "X86InstrShiftRotate.td"
2912 // X87 Floating Point Stack.
2913 include "X86InstrFPStack.td"
2915 // SIMD support (SSE, MMX and AVX)
2916 include "X86InstrFragmentsSIMD.td"
2918 // FMA - Fused Multiply-Add support (requires FMA)
2919 include "X86InstrFMA.td"
2922 include "X86InstrXOP.td"
2924 // SSE, MMX and 3DNow! vector support.
2925 include "X86InstrSSE.td"
2926 include "X86InstrAVX512.td"
2927 include "X86InstrMMX.td"
2928 include "X86Instr3DNow.td"
2931 include "X86InstrMPX.td"
2933 include "X86InstrVMX.td"
2934 include "X86InstrSVM.td"
2936 include "X86InstrTSX.td"
2937 include "X86InstrSGX.td"
2939 // System instructions.
2940 include "X86InstrSystem.td"
2942 // Compiler Pseudo Instructions and Pat Patterns
2943 include "X86InstrCompiler.td"
2944 include "X86InstrVecCompiler.td"
2946 //===----------------------------------------------------------------------===//
2947 // Assembler Mnemonic Aliases
2948 //===----------------------------------------------------------------------===//
2950 def : MnemonicAlias<"call", "callw", "att">, Requires<[In16BitMode]>;
2951 def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>;
2952 def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>;
2954 def : MnemonicAlias<"cbw", "cbtw", "att">;
2955 def : MnemonicAlias<"cwde", "cwtl", "att">;
2956 def : MnemonicAlias<"cwd", "cwtd", "att">;
2957 def : MnemonicAlias<"cdq", "cltd", "att">;
2958 def : MnemonicAlias<"cdqe", "cltq", "att">;
2959 def : MnemonicAlias<"cqo", "cqto", "att">;
2961 // In 64-bit mode lret maps to lretl; it is not ambiguous with lretq.
2962 def : MnemonicAlias<"lret", "lretw", "att">, Requires<[In16BitMode]>;
2963 def : MnemonicAlias<"lret", "lretl", "att">, Requires<[Not16BitMode]>;
2965 def : MnemonicAlias<"leavel", "leave", "att">, Requires<[Not64BitMode]>;
2966 def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>;
2968 def : MnemonicAlias<"loopz", "loope">;
2969 def : MnemonicAlias<"loopnz", "loopne">;
2971 def : MnemonicAlias<"pop", "popw", "att">, Requires<[In16BitMode]>;
2972 def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>;
2973 def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>;
2974 def : MnemonicAlias<"popf", "popfw", "att">, Requires<[In16BitMode]>;
2975 def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>;
2976 def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>;
2977 def : MnemonicAlias<"popf", "popfq", "intel">, Requires<[In64BitMode]>;
2978 def : MnemonicAlias<"popfd", "popfl", "att">;
2980 // FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
2981 // all modes. However: "push (addr)" and "push $42" should default to
2982 // pushl/pushq depending on the current mode. Similar for "pop %bx"
2983 def : MnemonicAlias<"push", "pushw", "att">, Requires<[In16BitMode]>;
2984 def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>;
2985 def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>;
2986 def : MnemonicAlias<"pushf", "pushfw", "att">, Requires<[In16BitMode]>;
2987 def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
2988 def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
2989 def : MnemonicAlias<"pushf", "pushfq", "intel">, Requires<[In64BitMode]>;
2990 def : MnemonicAlias<"pushfd", "pushfl", "att">;
2992 def : MnemonicAlias<"popad", "popal", "intel">, Requires<[Not64BitMode]>;
2993 def : MnemonicAlias<"pushad", "pushal", "intel">, Requires<[Not64BitMode]>;
2994 def : MnemonicAlias<"popa", "popaw", "intel">, Requires<[In16BitMode]>;
2995 def : MnemonicAlias<"pusha", "pushaw", "intel">, Requires<[In16BitMode]>;
2996 def : MnemonicAlias<"popa", "popal", "intel">, Requires<[In32BitMode]>;
2997 def : MnemonicAlias<"pusha", "pushal", "intel">, Requires<[In32BitMode]>;
2999 def : MnemonicAlias<"popa", "popaw", "att">, Requires<[In16BitMode]>;
3000 def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>;
3001 def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>;
3002 def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>;
3004 def : MnemonicAlias<"repe", "rep">;
3005 def : MnemonicAlias<"repz", "rep">;
3006 def : MnemonicAlias<"repnz", "repne">;
3008 def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>;
3009 def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>;
3010 def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>;
3012 // Apply 'ret' behavior to 'retn'
3013 def : MnemonicAlias<"retn", "retw", "att">, Requires<[In16BitMode]>;
3014 def : MnemonicAlias<"retn", "retl", "att">, Requires<[In32BitMode]>;
3015 def : MnemonicAlias<"retn", "retq", "att">, Requires<[In64BitMode]>;
3016 def : MnemonicAlias<"retn", "ret", "intel">;
3018 def : MnemonicAlias<"sal", "shl", "intel">;
3019 def : MnemonicAlias<"salb", "shlb", "att">;
3020 def : MnemonicAlias<"salw", "shlw", "att">;
3021 def : MnemonicAlias<"sall", "shll", "att">;
3022 def : MnemonicAlias<"salq", "shlq", "att">;
3024 def : MnemonicAlias<"smovb", "movsb", "att">;
3025 def : MnemonicAlias<"smovw", "movsw", "att">;
3026 def : MnemonicAlias<"smovl", "movsl", "att">;
3027 def : MnemonicAlias<"smovq", "movsq", "att">;
3029 def : MnemonicAlias<"ud2a", "ud2", "att">;
3030 def : MnemonicAlias<"verrw", "verr", "att">;
3032 // MS recognizes 'xacquire'/'xrelease' as 'acquire'/'release'
3033 def : MnemonicAlias<"acquire", "xacquire", "intel">;
3034 def : MnemonicAlias<"release", "xrelease", "intel">;
3036 // System instruction aliases.
3037 def : MnemonicAlias<"iret", "iretw", "att">, Requires<[In16BitMode]>;
3038 def : MnemonicAlias<"iret", "iretl", "att">, Requires<[Not16BitMode]>;
3039 def : MnemonicAlias<"sysret", "sysretl", "att">;
3040 def : MnemonicAlias<"sysexit", "sysexitl", "att">;
3042 def : MnemonicAlias<"lgdt", "lgdtw", "att">, Requires<[In16BitMode]>;
3043 def : MnemonicAlias<"lgdt", "lgdtl", "att">, Requires<[In32BitMode]>;
3044 def : MnemonicAlias<"lgdt", "lgdtq", "att">, Requires<[In64BitMode]>;
3045 def : MnemonicAlias<"lidt", "lidtw", "att">, Requires<[In16BitMode]>;
3046 def : MnemonicAlias<"lidt", "lidtl", "att">, Requires<[In32BitMode]>;
3047 def : MnemonicAlias<"lidt", "lidtq", "att">, Requires<[In64BitMode]>;
3048 def : MnemonicAlias<"sgdt", "sgdtw", "att">, Requires<[In16BitMode]>;
3049 def : MnemonicAlias<"sgdt", "sgdtl", "att">, Requires<[In32BitMode]>;
3050 def : MnemonicAlias<"sgdt", "sgdtq", "att">, Requires<[In64BitMode]>;
3051 def : MnemonicAlias<"sidt", "sidtw", "att">, Requires<[In16BitMode]>;
3052 def : MnemonicAlias<"sidt", "sidtl", "att">, Requires<[In32BitMode]>;
3053 def : MnemonicAlias<"sidt", "sidtq", "att">, Requires<[In64BitMode]>;
3054 def : MnemonicAlias<"lgdt", "lgdtw", "intel">, Requires<[In16BitMode]>;
3055 def : MnemonicAlias<"lgdt", "lgdtd", "intel">, Requires<[In32BitMode]>;
3056 def : MnemonicAlias<"lidt", "lidtw", "intel">, Requires<[In16BitMode]>;
3057 def : MnemonicAlias<"lidt", "lidtd", "intel">, Requires<[In32BitMode]>;
3058 def : MnemonicAlias<"sgdt", "sgdtw", "intel">, Requires<[In16BitMode]>;
3059 def : MnemonicAlias<"sgdt", "sgdtd", "intel">, Requires<[In32BitMode]>;
3060 def : MnemonicAlias<"sidt", "sidtw", "intel">, Requires<[In16BitMode]>;
3061 def : MnemonicAlias<"sidt", "sidtd", "intel">, Requires<[In32BitMode]>;
3064 // Floating point stack aliases.
3065 def : MnemonicAlias<"fcmovz", "fcmove", "att">;
3066 def : MnemonicAlias<"fcmova", "fcmovnbe", "att">;
3067 def : MnemonicAlias<"fcmovnae", "fcmovb", "att">;
3068 def : MnemonicAlias<"fcmovna", "fcmovbe", "att">;
3069 def : MnemonicAlias<"fcmovae", "fcmovnb", "att">;
3070 def : MnemonicAlias<"fcomip", "fcompi">;
3071 def : MnemonicAlias<"fildq", "fildll", "att">;
3072 def : MnemonicAlias<"fistpq", "fistpll", "att">;
3073 def : MnemonicAlias<"fisttpq", "fisttpll", "att">;
3074 def : MnemonicAlias<"fldcww", "fldcw", "att">;
3075 def : MnemonicAlias<"fnstcww", "fnstcw", "att">;
3076 def : MnemonicAlias<"fnstsww", "fnstsw", "att">;
3077 def : MnemonicAlias<"fucomip", "fucompi">;
3078 def : MnemonicAlias<"fwait", "wait">;
3080 def : MnemonicAlias<"fxsaveq", "fxsave64", "att">;
3081 def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">;
3082 def : MnemonicAlias<"xsaveq", "xsave64", "att">;
3083 def : MnemonicAlias<"xrstorq", "xrstor64", "att">;
3084 def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">;
3085 def : MnemonicAlias<"xrstorsq", "xrstors64", "att">;
3086 def : MnemonicAlias<"xsavecq", "xsavec64", "att">;
3087 def : MnemonicAlias<"xsavesq", "xsaves64", "att">;
3089 class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
3091 : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
3092 !strconcat(Prefix, NewCond, Suffix), VariantName>;
3094 /// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
3095 /// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
3096 /// example "setz" -> "sete".
3097 multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix,
3099 def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb
3100 def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete
3101 def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe
3102 def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae
3103 def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae
3104 def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle
3105 def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge
3106 def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne
3107 def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp
3108 def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp
3110 def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb
3111 def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta
3112 def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl
3113 def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg
3116 // Aliases for set<CC>
3117 defm : IntegerCondCodeMnemonicAlias<"set", "">;
3118 // Aliases for j<CC>
3119 defm : IntegerCondCodeMnemonicAlias<"j", "">;
3120 // Aliases for cmov<CC>{w,l,q}
3121 defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">;
3122 defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">;
3123 defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">;
3124 // No size suffix for intel-style asm.
3125 defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">;
3128 //===----------------------------------------------------------------------===//
3129 // Assembler Instruction Aliases
3130 //===----------------------------------------------------------------------===//
3132 // aad/aam default to base 10 if no operand is specified.
3133 def : InstAlias<"aad", (AAD8i8 10)>, Requires<[Not64BitMode]>;
3134 def : InstAlias<"aam", (AAM8i8 10)>, Requires<[Not64BitMode]>;
3136 // Disambiguate the mem/imm form of bt-without-a-suffix as btl.
3137 // Likewise for btc/btr/bts.
3138 def : InstAlias<"bt\t{$imm, $mem|$mem, $imm}",
3139 (BT32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
3140 def : InstAlias<"btc\t{$imm, $mem|$mem, $imm}",
3141 (BTC32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
3142 def : InstAlias<"btr\t{$imm, $mem|$mem, $imm}",
3143 (BTR32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
3144 def : InstAlias<"bts\t{$imm, $mem|$mem, $imm}",
3145 (BTS32mi8 i32mem:$mem, i32i8imm:$imm), 0, "att">;
3148 def : InstAlias<"clr{b}\t$reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>;
3149 def : InstAlias<"clr{w}\t$reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
3150 def : InstAlias<"clr{l}\t$reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
3151 def : InstAlias<"clr{q}\t$reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
3153 // lods aliases. Accept the destination being omitted because it's implicit
3154 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3155 // in the destination.
3156 def : InstAlias<"lodsb\t$src", (LODSB srcidx8:$src), 0>;
3157 def : InstAlias<"lodsw\t$src", (LODSW srcidx16:$src), 0>;
3158 def : InstAlias<"lods{l|d}\t$src", (LODSL srcidx32:$src), 0>;
3159 def : InstAlias<"lodsq\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3160 def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
3161 def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
3162 def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
3163 def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3164 def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0, "intel">;
3165 def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0, "intel">;
3166 def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0, "intel">;
3167 def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3170 // stos aliases. Accept the source being omitted because it's implicit in
3171 // the mnemonic, or the mnemonic suffix being omitted because it's implicit
3173 def : InstAlias<"stosb\t$dst", (STOSB dstidx8:$dst), 0>;
3174 def : InstAlias<"stosw\t$dst", (STOSW dstidx16:$dst), 0>;
3175 def : InstAlias<"stos{l|d}\t$dst", (STOSL dstidx32:$dst), 0>;
3176 def : InstAlias<"stosq\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3177 def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
3178 def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
3179 def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
3180 def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3181 def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0, "intel">;
3182 def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0, "intel">;
3183 def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0, "intel">;
3184 def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3187 // scas aliases. Accept the destination being omitted because it's implicit
3188 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3189 // in the destination.
3190 def : InstAlias<"scasb\t$dst", (SCASB dstidx8:$dst), 0>;
3191 def : InstAlias<"scasw\t$dst", (SCASW dstidx16:$dst), 0>;
3192 def : InstAlias<"scas{l|d}\t$dst", (SCASL dstidx32:$dst), 0>;
3193 def : InstAlias<"scasq\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3194 def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
3195 def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
3196 def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
3197 def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3198 def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0, "intel">;
3199 def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0, "intel">;
3200 def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0, "intel">;
3201 def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3203 // cmps aliases. Mnemonic suffix being omitted because it's implicit
3204 // in the destination.
3205 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3206 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3207 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3208 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3210 // movs aliases. Mnemonic suffix being omitted because it's implicit
3211 // in the destination.
3212 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3213 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3214 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3215 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3217 // div and idiv aliases for explicit A register.
3218 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
3219 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
3220 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>;
3221 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>;
3222 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>;
3223 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>;
3224 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>;
3225 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>;
3226 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>;
3227 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>;
3228 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>;
3229 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>;
3230 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>;
3231 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>;
3232 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>;
3233 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>;
3237 // Various unary fpstack operations default to operating on ST1.
3238 // For example, "fxch" -> "fxch %st(1)"
3239 def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
3240 def: InstAlias<"fadd", (ADD_FPrST0 ST1), 0>;
3241 def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>;
3242 def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>;
3243 def : InstAlias<"fmul", (MUL_FPrST0 ST1), 0>;
3244 def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>;
3245 def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>;
3246 def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>;
3247 def : InstAlias<"fxch", (XCH_F ST1), 0>;
3248 def : InstAlias<"fcom", (COM_FST0r ST1), 0>;
3249 def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>;
3250 def : InstAlias<"fcomi", (COM_FIr ST1), 0>;
3251 def : InstAlias<"fcompi", (COM_FIPr ST1), 0>;
3252 def : InstAlias<"fucom", (UCOM_Fr ST1), 0>;
3253 def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>;
3254 def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>;
3255 def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>;
3257 // Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
3258 // For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
3259 // instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
3261 multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
3262 def : InstAlias<!strconcat(Mnemonic, "\t{$op, %st(0)|st(0), $op}"),
3263 (Inst RST:$op), EmitAlias>;
3264 def : InstAlias<!strconcat(Mnemonic, "\t{%st(0), %st(0)|st(0), st(0)}"),
3265 (Inst ST0), EmitAlias>;
3268 defm : FpUnaryAlias<"fadd", ADD_FST0r>;
3269 defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
3270 defm : FpUnaryAlias<"fsub", SUB_FST0r>;
3271 defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0>;
3272 defm : FpUnaryAlias<"fsubr", SUBR_FST0r>;
3273 defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0>;
3274 defm : FpUnaryAlias<"fmul", MUL_FST0r>;
3275 defm : FpUnaryAlias<"fmulp", MUL_FPrST0>;
3276 defm : FpUnaryAlias<"fdiv", DIV_FST0r>;
3277 defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0>;
3278 defm : FpUnaryAlias<"fdivr", DIVR_FST0r>;
3279 defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0>;
3280 defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
3281 defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
3282 defm : FpUnaryAlias<"fcompi", COM_FIPr>;
3283 defm : FpUnaryAlias<"fucompi", UCOM_FIPr>;
3286 // Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they
3287 // commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
3288 // solely because gas supports it.
3289 def : InstAlias<"faddp\t{%st(0), $op|$op, st(0)}", (ADD_FPrST0 RST:$op), 0>;
3290 def : InstAlias<"fmulp\t{%st(0), $op|$op, st(0)}", (MUL_FPrST0 RST:$op)>;
3291 def : InstAlias<"fsub{|r}p\t{%st(0), $op|$op, st(0)}", (SUBR_FPrST0 RST:$op)>;
3292 def : InstAlias<"fsub{r|}p\t{%st(0), $op|$op, st(0)}", (SUB_FPrST0 RST:$op)>;
3293 def : InstAlias<"fdiv{|r}p\t{%st(0), $op|$op, st(0)}", (DIVR_FPrST0 RST:$op)>;
3294 def : InstAlias<"fdiv{r|}p\t{%st(0), $op|$op, st(0)}", (DIV_FPrST0 RST:$op)>;
3296 def : InstAlias<"fnstsw" , (FNSTSW16r), 0>;
3298 // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
3299 // this is compatible with what GAS does.
3300 def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3301 def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3302 def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3303 def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3304 def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3305 def : InstAlias<"ljmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3306 def : InstAlias<"lcall\t{*}$dst", (FARCALL16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3307 def : InstAlias<"ljmp\t{*}$dst", (FARJMP16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3309 def : InstAlias<"jmp\t{*}$dst", (JMP64m i64mem:$dst), 0, "att">, Requires<[In64BitMode]>;
3310 def : InstAlias<"jmp\t{*}$dst", (JMP32m i32mem:$dst), 0, "att">, Requires<[In32BitMode]>;
3311 def : InstAlias<"jmp\t{*}$dst", (JMP16m i16mem:$dst), 0, "att">, Requires<[In16BitMode]>;
3314 // "imul <imm>, B" is an alias for "imul <imm>, B, B".
3315 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>;
3316 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>;
3317 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>;
3318 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>;
3319 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
3320 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
3322 // ins aliases. Accept the mnemonic suffix being omitted because it's implicit
3323 // in the destination.
3324 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0, "intel">;
3325 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0, "intel">;
3326 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0, "intel">;
3328 // outs aliases. Accept the mnemonic suffix being omitted because it's implicit
3330 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0, "intel">;
3331 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0, "intel">;
3332 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0, "intel">;
3334 // inb %dx -> inb %al, %dx
3335 def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
3336 def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
3337 def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>;
3338 def : InstAlias<"inb\t$port", (IN8ri u8imm:$port), 0>;
3339 def : InstAlias<"inw\t$port", (IN16ri u8imm:$port), 0>;
3340 def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
3343 // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
3344 def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3345 def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3346 def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3347 def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3348 def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3349 def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3350 def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3351 def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3353 // Match 'movq <largeimm>, <reg>' as an alias for movabsq.
3354 def : InstAlias<"mov{q}\t{$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
3356 // Match 'movd GR64, MMX' as an alias for movq to be compatible with gas,
3357 // which supports this due to an old AMD documentation bug when 64-bit mode was
3359 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3360 (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
3361 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3362 (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
3365 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3366 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3367 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3368 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3369 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3370 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3371 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0, "att">;
3374 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3375 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3376 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3377 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3378 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3379 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3380 // Note: No GR32->GR64 movzx form.
3382 // outb %dx -> outb %al, %dx
3383 def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>;
3384 def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>;
3385 def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>;
3386 def : InstAlias<"outb\t$port", (OUT8ir u8imm:$port), 0>;
3387 def : InstAlias<"outw\t$port", (OUT16ir u8imm:$port), 0>;
3388 def : InstAlias<"outl\t$port", (OUT32ir u8imm:$port), 0>;
3390 // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
3391 // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
3392 // errors, since its encoding is the most compact.
3393 def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>;
3395 // shld/shrd op,op -> shld op, op, CL
3396 def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
3397 def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>;
3398 def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>;
3399 def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>;
3400 def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>;
3401 def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>;
3403 def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>;
3404 def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>;
3405 def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>;
3406 def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>;
3407 def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>;
3408 def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>;
3410 /* FIXME: This is disabled because the asm matcher is currently incapable of
3411 * matching a fixed immediate like $1.
3412 // "shl X, $1" is an alias for "shl X".
3413 multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
3414 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3415 (!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
3416 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3417 (!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
3418 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3419 (!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
3420 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3421 (!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
3422 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3423 (!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
3424 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3425 (!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
3426 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3427 (!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
3428 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3429 (!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
3432 defm : ShiftRotateByOneAlias<"rcl", "RCL">;
3433 defm : ShiftRotateByOneAlias<"rcr", "RCR">;
3434 defm : ShiftRotateByOneAlias<"rol", "ROL">;
3435 defm : ShiftRotateByOneAlias<"ror", "ROR">;
3438 // test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
3439 def : InstAlias<"test{b}\t{$mem, $val|$val, $mem}",
3440 (TEST8mr i8mem :$mem, GR8 :$val), 0>;
3441 def : InstAlias<"test{w}\t{$mem, $val|$val, $mem}",
3442 (TEST16mr i16mem:$mem, GR16:$val), 0>;
3443 def : InstAlias<"test{l}\t{$mem, $val|$val, $mem}",
3444 (TEST32mr i32mem:$mem, GR32:$val), 0>;
3445 def : InstAlias<"test{q}\t{$mem, $val|$val, $mem}",
3446 (TEST64mr i64mem:$mem, GR64:$val), 0>;
3448 // xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
3449 def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
3450 (XCHG8rm GR8 :$val, i8mem :$mem), 0>;
3451 def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
3452 (XCHG16rm GR16:$val, i16mem:$mem), 0>;
3453 def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
3454 (XCHG32rm GR32:$val, i32mem:$mem), 0>;
3455 def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
3456 (XCHG64rm GR64:$val, i64mem:$mem), 0>;
3458 // xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
3459 def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
3460 def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src), 0>;
3461 def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;
3463 // In 64-bit mode, xchg %eax, %eax can't be encoded with the 0x90 opcode we
3464 // would get by default because it's defined as NOP. But xchg %eax, %eax implies
3465 // implicit zeroing of the upper 32 bits. So alias to the longer encoding.
3466 def : InstAlias<"xchg{l}\t{%eax, %eax|eax, eax}",
3467 (XCHG32rr EAX, EAX), 0>, Requires<[In64BitMode]>;
3469 // xchg %rax, %rax is a nop in x86-64 and can be encoded as such. Without this
3470 // we emit an unneeded REX.w prefix.
3471 def : InstAlias<"xchg{q}\t{%rax, %rax|rax, rax}", (NOOP), 0>;
3473 // These aliases exist to get the parser to prioritize matching 8-bit
3474 // immediate encodings over matching the implicit ax/eax/rax encodings. By
3475 // explicitly mentioning the A register here, these entries will be ordered
3476 // first due to the more explicit immediate type.
3477 def : InstAlias<"adc{w}\t{$imm, %ax|ax, $imm}", (ADC16ri8 AX, i16i8imm:$imm), 0>;
3478 def : InstAlias<"add{w}\t{$imm, %ax|ax, $imm}", (ADD16ri8 AX, i16i8imm:$imm), 0>;
3479 def : InstAlias<"and{w}\t{$imm, %ax|ax, $imm}", (AND16ri8 AX, i16i8imm:$imm), 0>;
3480 def : InstAlias<"cmp{w}\t{$imm, %ax|ax, $imm}", (CMP16ri8 AX, i16i8imm:$imm), 0>;
3481 def : InstAlias<"or{w}\t{$imm, %ax|ax, $imm}", (OR16ri8 AX, i16i8imm:$imm), 0>;
3482 def : InstAlias<"sbb{w}\t{$imm, %ax|ax, $imm}", (SBB16ri8 AX, i16i8imm:$imm), 0>;
3483 def : InstAlias<"sub{w}\t{$imm, %ax|ax, $imm}", (SUB16ri8 AX, i16i8imm:$imm), 0>;
3484 def : InstAlias<"xor{w}\t{$imm, %ax|ax, $imm}", (XOR16ri8 AX, i16i8imm:$imm), 0>;
3486 def : InstAlias<"adc{l}\t{$imm, %eax|eax, $imm}", (ADC32ri8 EAX, i32i8imm:$imm), 0>;
3487 def : InstAlias<"add{l}\t{$imm, %eax|eax, $imm}", (ADD32ri8 EAX, i32i8imm:$imm), 0>;
3488 def : InstAlias<"and{l}\t{$imm, %eax|eax, $imm}", (AND32ri8 EAX, i32i8imm:$imm), 0>;
3489 def : InstAlias<"cmp{l}\t{$imm, %eax|eax, $imm}", (CMP32ri8 EAX, i32i8imm:$imm), 0>;
3490 def : InstAlias<"or{l}\t{$imm, %eax|eax, $imm}", (OR32ri8 EAX, i32i8imm:$imm), 0>;
3491 def : InstAlias<"sbb{l}\t{$imm, %eax|eax, $imm}", (SBB32ri8 EAX, i32i8imm:$imm), 0>;
3492 def : InstAlias<"sub{l}\t{$imm, %eax|eax, $imm}", (SUB32ri8 EAX, i32i8imm:$imm), 0>;
3493 def : InstAlias<"xor{l}\t{$imm, %eax|eax, $imm}", (XOR32ri8 EAX, i32i8imm:$imm), 0>;
3495 def : InstAlias<"adc{q}\t{$imm, %rax|rax, $imm}", (ADC64ri8 RAX, i64i8imm:$imm), 0>;
3496 def : InstAlias<"add{q}\t{$imm, %rax|rax, $imm}", (ADD64ri8 RAX, i64i8imm:$imm), 0>;
3497 def : InstAlias<"and{q}\t{$imm, %rax|rax, $imm}", (AND64ri8 RAX, i64i8imm:$imm), 0>;
3498 def : InstAlias<"cmp{q}\t{$imm, %rax|rax, $imm}", (CMP64ri8 RAX, i64i8imm:$imm), 0>;
3499 def : InstAlias<"or{q}\t{$imm, %rax|rax, $imm}", (OR64ri8 RAX, i64i8imm:$imm), 0>;
3500 def : InstAlias<"sbb{q}\t{$imm, %rax|rax, $imm}", (SBB64ri8 RAX, i64i8imm:$imm), 0>;
3501 def : InstAlias<"sub{q}\t{$imm, %rax|rax, $imm}", (SUB64ri8 RAX, i64i8imm:$imm), 0>;
3502 def : InstAlias<"xor{q}\t{$imm, %rax|rax, $imm}", (XOR64ri8 RAX, i64i8imm:$imm), 0>;