1 //===-- X86InstrInfo.td - Main X86 Instruction Definition --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 instruction set, defining the instructions, and
11 // properties of the instructions which are needed for code generation, machine
12 // code emission, and analysis.
14 //===----------------------------------------------------------------------===//
16 //===----------------------------------------------------------------------===//
17 // X86 specific DAG Nodes.
20 def SDTIntShiftDOp: SDTypeProfile<1, 3,
21 [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
22 SDTCisInt<0>, SDTCisInt<3>]>;
24 def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisSameAs<1, 2>]>;
26 def SDTX86Cmps : SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
27 //def SDTX86Cmpss : SDTypeProfile<1, 3, [SDTCisVT<0, f32>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
29 def SDTX86Cmov : SDTypeProfile<1, 4,
30 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
31 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
33 // Unary and binary operator instructions that set EFLAGS as a side-effect.
34 def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
36 SDTCisInt<0>, SDTCisVT<1, i32>]>;
38 def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
41 SDTCisInt<0>, SDTCisVT<1, i32>]>;
43 // SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
44 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
50 // RES1, RES2, FLAGS = op LHS, RHS
51 def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
55 SDTCisInt<0>, SDTCisVT<1, i32>]>;
56 def SDTX86BrCond : SDTypeProfile<0, 3,
57 [SDTCisVT<0, OtherVT>,
58 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
60 def SDTX86SetCC : SDTypeProfile<1, 2,
62 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
63 def SDTX86SetCC_C : SDTypeProfile<1, 2,
65 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
67 def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>;
69 def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
71 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
73 def SDTX86caspair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
74 def SDTX86caspairSaveEbx8 : SDTypeProfile<1, 3,
75 [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
76 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
77 def SDTX86caspairSaveRbx16 : SDTypeProfile<1, 3,
78 [SDTCisVT<0, i64>, SDTCisPtrTy<1>,
79 SDTCisVT<2, i64>, SDTCisVT<3, i64>]>;
81 def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
85 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
87 def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
89 def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
92 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
94 def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
98 def SDT_X86VAARG_64 : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
104 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
106 def SDTX86Void : SDTypeProfile<0, 0, []>;
108 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
110 def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
112 def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
114 def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
116 def SDT_X86WIN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
118 def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
120 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
122 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
124 def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
126 def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
127 [SDNPHasChain,SDNPSideEffect]>;
128 def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
132 def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
133 def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
134 def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
135 def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>;
137 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
138 def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
140 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
141 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
143 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
144 def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
146 def X86sahf : SDNode<"X86ISD::SAHF", SDTX86sahf>;
148 def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand,
149 [SDNPHasChain, SDNPSideEffect]>;
151 def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand,
152 [SDNPHasChain, SDNPSideEffect]>;
154 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
155 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
156 SDNPMayLoad, SDNPMemOperand]>;
157 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86caspair,
158 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
159 SDNPMayLoad, SDNPMemOperand]>;
160 def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair,
161 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
162 SDNPMayLoad, SDNPMemOperand]>;
163 def X86cas8save_ebx : SDNode<"X86ISD::LCMPXCHG8_SAVE_EBX_DAG",
164 SDTX86caspairSaveEbx8,
165 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
166 SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
167 def X86cas16save_rbx : SDNode<"X86ISD::LCMPXCHG16_SAVE_RBX_DAG",
168 SDTX86caspairSaveRbx16,
169 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
170 SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
172 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
173 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
174 def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret,
175 [SDNPHasChain, SDNPOptInGlue]>;
177 def X86vastart_save_xmm_regs :
178 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
179 SDT_X86VASTART_SAVE_XMM_REGS,
180 [SDNPHasChain, SDNPVariadic]>;
182 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG_64,
183 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
185 def X86callseq_start :
186 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
187 [SDNPHasChain, SDNPOutGlue]>;
189 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
190 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
192 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
193 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
196 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
197 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>;
198 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
199 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
202 def X86rdtsc : SDNode<"X86ISD::RDTSC_DAG", SDTX86Void,
203 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
204 def X86rdtscp : SDNode<"X86ISD::RDTSCP_DAG", SDTX86Void,
205 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
206 def X86rdpmc : SDNode<"X86ISD::RDPMC_DAG", SDTX86Void,
207 [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
209 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
210 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
212 def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER",
213 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
216 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
217 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
219 def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
220 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
222 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
225 def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
226 SDTypeProfile<1, 1, [SDTCisInt<0>,
228 [SDNPHasChain, SDNPSideEffect]>;
229 def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
230 SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
231 [SDNPHasChain, SDNPSideEffect]>;
232 def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
233 SDTypeProfile<0, 0, []>,
234 [SDNPHasChain, SDNPSideEffect]>;
236 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
237 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
239 def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
241 def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
242 def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
244 def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
246 def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>;
247 def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>;
249 def X86inc_flag : SDNode<"X86ISD::INC", SDTUnaryArithWithFlags>;
250 def X86dec_flag : SDNode<"X86ISD::DEC", SDTUnaryArithWithFlags>;
251 def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
253 def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
255 def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
258 def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
259 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
261 def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
262 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
264 def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
265 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
267 def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
268 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
270 def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
271 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
274 def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
276 def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
278 def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDT_X86WIN_ALLOCA,
279 [SDNPHasChain, SDNPOutGlue]>;
281 def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
284 def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
285 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
287 def X86lwpins : SDNode<"X86ISD::LWPINS",
288 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
289 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
290 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>;
292 //===----------------------------------------------------------------------===//
293 // X86 Operand Definitions.
296 // A version of ptr_rc which excludes SP, ESP, and RSP. This is used for
297 // the index operand of an address, to conform to x86 encoding restrictions.
298 def ptr_rc_nosp : PointerLikeRegClass<1>;
300 // *mem - Operand definitions for the funky X86 addressing mode operands.
302 def X86MemAsmOperand : AsmOperandClass {
305 let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
306 def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
307 def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
308 def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
309 def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; }
310 def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; }
311 def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; }
312 def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
313 def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
314 // Gather mem operands
315 def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
316 def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
317 def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
318 def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
319 def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
321 def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
322 def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
323 def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
324 def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
325 def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
326 def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
327 def X86Mem256_RC512Operand : AsmOperandClass { let Name = "Mem256_RC512"; }
328 def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
331 def X86AbsMemAsmOperand : AsmOperandClass {
333 let SuperClasses = [X86MemAsmOperand];
336 class X86MemOperand<string printMethod,
337 AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
338 let PrintMethod = printMethod;
339 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
340 let ParserMatchClass = parserMatchClass;
341 let OperandType = "OPERAND_MEMORY";
344 // Gather mem operands
345 class X86VMemOperand<RegisterClass RC, string printMethod,
346 AsmOperandClass parserMatchClass>
347 : X86MemOperand<printMethod, parserMatchClass> {
348 let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
351 def anymem : X86MemOperand<"printanymem">;
353 def opaque32mem : X86MemOperand<"printopaquemem">;
354 def opaque48mem : X86MemOperand<"printopaquemem">;
355 def opaque80mem : X86MemOperand<"printopaquemem">;
356 def opaque512mem : X86MemOperand<"printopaquemem">;
358 def i8mem : X86MemOperand<"printi8mem", X86Mem8AsmOperand>;
359 def i16mem : X86MemOperand<"printi16mem", X86Mem16AsmOperand>;
360 def i32mem : X86MemOperand<"printi32mem", X86Mem32AsmOperand>;
361 def i64mem : X86MemOperand<"printi64mem", X86Mem64AsmOperand>;
362 def i128mem : X86MemOperand<"printi128mem", X86Mem128AsmOperand>;
363 def i256mem : X86MemOperand<"printi256mem", X86Mem256AsmOperand>;
364 def i512mem : X86MemOperand<"printi512mem", X86Mem512AsmOperand>;
365 def f32mem : X86MemOperand<"printf32mem", X86Mem32AsmOperand>;
366 def f64mem : X86MemOperand<"printf64mem", X86Mem64AsmOperand>;
367 def f80mem : X86MemOperand<"printf80mem", X86Mem80AsmOperand>;
368 def f128mem : X86MemOperand<"printf128mem", X86Mem128AsmOperand>;
369 def f256mem : X86MemOperand<"printf256mem", X86Mem256AsmOperand>;
370 def f512mem : X86MemOperand<"printf512mem", X86Mem512AsmOperand>;
372 def v512mem : X86VMemOperand<VR512, "printf512mem", X86Mem512AsmOperand>;
374 // Gather mem operands
375 def vx64mem : X86VMemOperand<VR128, "printi64mem", X86Mem64_RC128Operand>;
376 def vx128mem : X86VMemOperand<VR128, "printi128mem", X86Mem128_RC128Operand>;
377 def vx256mem : X86VMemOperand<VR128, "printi256mem", X86Mem256_RC128Operand>;
378 def vy128mem : X86VMemOperand<VR256, "printi128mem", X86Mem128_RC256Operand>;
379 def vy256mem : X86VMemOperand<VR256, "printi256mem", X86Mem256_RC256Operand>;
381 def vx64xmem : X86VMemOperand<VR128X, "printi64mem", X86Mem64_RC128XOperand>;
382 def vx128xmem : X86VMemOperand<VR128X, "printi128mem", X86Mem128_RC128XOperand>;
383 def vx256xmem : X86VMemOperand<VR128X, "printi256mem", X86Mem256_RC128XOperand>;
384 def vy128xmem : X86VMemOperand<VR256X, "printi128mem", X86Mem128_RC256XOperand>;
385 def vy256xmem : X86VMemOperand<VR256X, "printi256mem", X86Mem256_RC256XOperand>;
386 def vy512mem : X86VMemOperand<VR256X, "printi512mem", X86Mem512_RC256XOperand>;
387 def vz256xmem : X86VMemOperand<VR512, "printi256mem", X86Mem256_RC512Operand>;
388 def vz512mem : X86VMemOperand<VR512, "printi512mem", X86Mem512_RC512Operand>;
390 // A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
391 // of a plain GPR, so that it doesn't potentially require a REX prefix.
392 def ptr_rc_norex : PointerLikeRegClass<2>;
393 def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
395 def i8mem_NOREX : Operand<iPTR> {
396 let PrintMethod = "printi8mem";
397 let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
399 let ParserMatchClass = X86Mem8AsmOperand;
400 let OperandType = "OPERAND_MEMORY";
403 // GPRs available for tailcall.
404 // It represents GR32_TC, GR64_TC or GR64_TCW64.
405 def ptr_rc_tailcall : PointerLikeRegClass<4>;
407 // Special i32mem for addresses of load folding tail calls. These are not
408 // allowed to use callee-saved registers since they must be scheduled
409 // after callee-saved register are popped.
410 def i32mem_TC : Operand<i32> {
411 let PrintMethod = "printi32mem";
412 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
413 i32imm, SEGMENT_REG);
414 let ParserMatchClass = X86Mem32AsmOperand;
415 let OperandType = "OPERAND_MEMORY";
418 // Special i64mem for addresses of load folding tail calls. These are not
419 // allowed to use callee-saved registers since they must be scheduled
420 // after callee-saved register are popped.
421 def i64mem_TC : Operand<i64> {
422 let PrintMethod = "printi64mem";
423 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
424 ptr_rc_tailcall, i32imm, SEGMENT_REG);
425 let ParserMatchClass = X86Mem64AsmOperand;
426 let OperandType = "OPERAND_MEMORY";
429 let OperandType = "OPERAND_PCREL",
430 ParserMatchClass = X86AbsMemAsmOperand,
431 PrintMethod = "printPCRelImm" in {
432 def i32imm_pcrel : Operand<i32>;
433 def i16imm_pcrel : Operand<i16>;
435 // Branch targets have OtherVT type and print as pc-relative values.
436 def brtarget : Operand<OtherVT>;
437 def brtarget8 : Operand<OtherVT>;
441 // Special parser to detect 16-bit mode to select 16-bit displacement.
442 def X86AbsMem16AsmOperand : AsmOperandClass {
443 let Name = "AbsMem16";
444 let RenderMethod = "addAbsMemOperands";
445 let SuperClasses = [X86AbsMemAsmOperand];
448 // Branch targets have OtherVT type and print as pc-relative values.
449 let OperandType = "OPERAND_PCREL",
450 PrintMethod = "printPCRelImm" in {
451 let ParserMatchClass = X86AbsMem16AsmOperand in
452 def brtarget16 : Operand<OtherVT>;
453 let ParserMatchClass = X86AbsMemAsmOperand in
454 def brtarget32 : Operand<OtherVT>;
457 let RenderMethod = "addSrcIdxOperands" in {
458 def X86SrcIdx8Operand : AsmOperandClass {
459 let Name = "SrcIdx8";
460 let SuperClasses = [X86Mem8AsmOperand];
462 def X86SrcIdx16Operand : AsmOperandClass {
463 let Name = "SrcIdx16";
464 let SuperClasses = [X86Mem16AsmOperand];
466 def X86SrcIdx32Operand : AsmOperandClass {
467 let Name = "SrcIdx32";
468 let SuperClasses = [X86Mem32AsmOperand];
470 def X86SrcIdx64Operand : AsmOperandClass {
471 let Name = "SrcIdx64";
472 let SuperClasses = [X86Mem64AsmOperand];
474 } // RenderMethod = "addSrcIdxOperands"
476 let RenderMethod = "addDstIdxOperands" in {
477 def X86DstIdx8Operand : AsmOperandClass {
478 let Name = "DstIdx8";
479 let SuperClasses = [X86Mem8AsmOperand];
481 def X86DstIdx16Operand : AsmOperandClass {
482 let Name = "DstIdx16";
483 let SuperClasses = [X86Mem16AsmOperand];
485 def X86DstIdx32Operand : AsmOperandClass {
486 let Name = "DstIdx32";
487 let SuperClasses = [X86Mem32AsmOperand];
489 def X86DstIdx64Operand : AsmOperandClass {
490 let Name = "DstIdx64";
491 let SuperClasses = [X86Mem64AsmOperand];
493 } // RenderMethod = "addDstIdxOperands"
495 let RenderMethod = "addMemOffsOperands" in {
496 def X86MemOffs16_8AsmOperand : AsmOperandClass {
497 let Name = "MemOffs16_8";
498 let SuperClasses = [X86Mem8AsmOperand];
500 def X86MemOffs16_16AsmOperand : AsmOperandClass {
501 let Name = "MemOffs16_16";
502 let SuperClasses = [X86Mem16AsmOperand];
504 def X86MemOffs16_32AsmOperand : AsmOperandClass {
505 let Name = "MemOffs16_32";
506 let SuperClasses = [X86Mem32AsmOperand];
508 def X86MemOffs32_8AsmOperand : AsmOperandClass {
509 let Name = "MemOffs32_8";
510 let SuperClasses = [X86Mem8AsmOperand];
512 def X86MemOffs32_16AsmOperand : AsmOperandClass {
513 let Name = "MemOffs32_16";
514 let SuperClasses = [X86Mem16AsmOperand];
516 def X86MemOffs32_32AsmOperand : AsmOperandClass {
517 let Name = "MemOffs32_32";
518 let SuperClasses = [X86Mem32AsmOperand];
520 def X86MemOffs32_64AsmOperand : AsmOperandClass {
521 let Name = "MemOffs32_64";
522 let SuperClasses = [X86Mem64AsmOperand];
524 def X86MemOffs64_8AsmOperand : AsmOperandClass {
525 let Name = "MemOffs64_8";
526 let SuperClasses = [X86Mem8AsmOperand];
528 def X86MemOffs64_16AsmOperand : AsmOperandClass {
529 let Name = "MemOffs64_16";
530 let SuperClasses = [X86Mem16AsmOperand];
532 def X86MemOffs64_32AsmOperand : AsmOperandClass {
533 let Name = "MemOffs64_32";
534 let SuperClasses = [X86Mem32AsmOperand];
536 def X86MemOffs64_64AsmOperand : AsmOperandClass {
537 let Name = "MemOffs64_64";
538 let SuperClasses = [X86Mem64AsmOperand];
540 } // RenderMethod = "addMemOffsOperands"
542 class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
543 : X86MemOperand<printMethod, parserMatchClass> {
544 let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
547 class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
548 : X86MemOperand<printMethod, parserMatchClass> {
549 let MIOperandInfo = (ops ptr_rc);
552 def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>;
553 def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>;
554 def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>;
555 def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>;
556 def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>;
557 def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>;
558 def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>;
559 def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
561 class X86MemOffsOperand<Operand immOperand, string printMethod,
562 AsmOperandClass parserMatchClass>
563 : X86MemOperand<printMethod, parserMatchClass> {
564 let MIOperandInfo = (ops immOperand, SEGMENT_REG);
567 def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
568 X86MemOffs16_8AsmOperand>;
569 def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16",
570 X86MemOffs16_16AsmOperand>;
571 def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32",
572 X86MemOffs16_32AsmOperand>;
573 def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8",
574 X86MemOffs32_8AsmOperand>;
575 def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16",
576 X86MemOffs32_16AsmOperand>;
577 def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32",
578 X86MemOffs32_32AsmOperand>;
579 def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64",
580 X86MemOffs32_64AsmOperand>;
581 def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8",
582 X86MemOffs64_8AsmOperand>;
583 def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16",
584 X86MemOffs64_16AsmOperand>;
585 def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32",
586 X86MemOffs64_32AsmOperand>;
587 def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64",
588 X86MemOffs64_64AsmOperand>;
590 def SSECC : Operand<i8> {
591 let PrintMethod = "printSSEAVXCC";
592 let OperandType = "OPERAND_IMMEDIATE";
595 def i8immZExt3 : ImmLeaf<i8, [{
596 return Imm >= 0 && Imm < 8;
599 def AVXCC : Operand<i8> {
600 let PrintMethod = "printSSEAVXCC";
601 let OperandType = "OPERAND_IMMEDIATE";
604 def i8immZExt5 : ImmLeaf<i8, [{
605 return Imm >= 0 && Imm < 32;
608 def AVX512ICC : Operand<i8> {
609 let PrintMethod = "printSSEAVXCC";
610 let OperandType = "OPERAND_IMMEDIATE";
613 def XOPCC : Operand<i8> {
614 let PrintMethod = "printXOPCC";
615 let OperandType = "OPERAND_IMMEDIATE";
618 class ImmSExtAsmOperandClass : AsmOperandClass {
619 let SuperClasses = [ImmAsmOperand];
620 let RenderMethod = "addImmOperands";
623 def X86GR32orGR64AsmOperand : AsmOperandClass {
624 let Name = "GR32orGR64";
627 def GR32orGR64 : RegisterOperand<GR32> {
628 let ParserMatchClass = X86GR32orGR64AsmOperand;
630 def AVX512RCOperand : AsmOperandClass {
631 let Name = "AVX512RC";
633 def AVX512RC : Operand<i32> {
634 let PrintMethod = "printRoundingControl";
635 let OperandType = "OPERAND_IMMEDIATE";
636 let ParserMatchClass = AVX512RCOperand;
639 // Sign-extended immediate classes. We don't need to define the full lattice
640 // here because there is no instruction with an ambiguity between ImmSExti64i32
643 // The strange ranges come from the fact that the assembler always works with
644 // 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
645 // (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
648 // [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
649 def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
650 let Name = "ImmSExti64i32";
653 // [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
654 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
655 def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
656 let Name = "ImmSExti16i8";
657 let SuperClasses = [ImmSExti64i32AsmOperand];
660 // [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
661 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
662 def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
663 let Name = "ImmSExti32i8";
667 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
668 def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
669 let Name = "ImmSExti64i8";
670 let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
671 ImmSExti64i32AsmOperand];
674 // Unsigned immediate used by SSE/AVX instructions
676 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
677 def ImmUnsignedi8AsmOperand : AsmOperandClass {
678 let Name = "ImmUnsignedi8";
679 let RenderMethod = "addImmOperands";
682 // A couple of more descriptive operand definitions.
683 // 16-bits but only 8 bits are significant.
684 def i16i8imm : Operand<i16> {
685 let ParserMatchClass = ImmSExti16i8AsmOperand;
686 let OperandType = "OPERAND_IMMEDIATE";
688 // 32-bits but only 8 bits are significant.
689 def i32i8imm : Operand<i32> {
690 let ParserMatchClass = ImmSExti32i8AsmOperand;
691 let OperandType = "OPERAND_IMMEDIATE";
694 // 64-bits but only 32 bits are significant.
695 def i64i32imm : Operand<i64> {
696 let ParserMatchClass = ImmSExti64i32AsmOperand;
697 let OperandType = "OPERAND_IMMEDIATE";
700 // 64-bits but only 8 bits are significant.
701 def i64i8imm : Operand<i64> {
702 let ParserMatchClass = ImmSExti64i8AsmOperand;
703 let OperandType = "OPERAND_IMMEDIATE";
706 // Unsigned 8-bit immediate used by SSE/AVX instructions.
707 def u8imm : Operand<i8> {
708 let PrintMethod = "printU8Imm";
709 let ParserMatchClass = ImmUnsignedi8AsmOperand;
710 let OperandType = "OPERAND_IMMEDIATE";
713 // 32-bit immediate but only 8-bits are significant and they are unsigned.
714 // Used by some SSE/AVX instructions that use intrinsics.
715 def i32u8imm : Operand<i32> {
716 let PrintMethod = "printU8Imm";
717 let ParserMatchClass = ImmUnsignedi8AsmOperand;
718 let OperandType = "OPERAND_IMMEDIATE";
721 // 64-bits but only 32 bits are significant, and those bits are treated as being
723 def i64i32imm_pcrel : Operand<i64> {
724 let PrintMethod = "printPCRelImm";
725 let ParserMatchClass = X86AbsMemAsmOperand;
726 let OperandType = "OPERAND_PCREL";
729 def lea64_32mem : Operand<i32> {
730 let PrintMethod = "printanymem";
731 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
732 let ParserMatchClass = X86MemAsmOperand;
735 // Memory operands that use 64-bit pointers in both ILP32 and LP64.
736 def lea64mem : Operand<i64> {
737 let PrintMethod = "printanymem";
738 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
739 let ParserMatchClass = X86MemAsmOperand;
743 //===----------------------------------------------------------------------===//
744 // X86 Complex Pattern Definitions.
747 // Define X86-specific addressing mode.
748 def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>;
749 def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr",
750 [add, sub, mul, X86mul_imm, shl, or, frameindex],
752 // In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
753 def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr",
754 [add, sub, mul, X86mul_imm, shl, or,
755 frameindex, X86WrapperRIP],
758 def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
759 [tglobaltlsaddr], []>;
761 def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
762 [tglobaltlsaddr], []>;
764 def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr",
765 [add, sub, mul, X86mul_imm, shl, or, frameindex,
768 def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
769 [tglobaltlsaddr], []>;
771 def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
772 [tglobaltlsaddr], []>;
774 def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>;
776 // A relocatable immediate is either an immediate operand or an operand that can
777 // be relocated by the linker to an immediate, such as a regular symbol in
779 def relocImm : ComplexPattern<iAny, 1, "selectRelocImm", [imm, X86Wrapper], [],
782 //===----------------------------------------------------------------------===//
783 // X86 Instruction Predicate Definitions.
784 def TruePredicate : Predicate<"true">;
786 def HasCMov : Predicate<"Subtarget->hasCMov()">;
787 def NoCMov : Predicate<"!Subtarget->hasCMov()">;
789 def HasMMX : Predicate<"Subtarget->hasMMX()">;
790 def Has3DNow : Predicate<"Subtarget->has3DNow()">;
791 def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
792 def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
793 def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
794 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
795 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
796 def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
797 def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
798 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
799 def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
800 def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
801 def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">;
802 def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
803 def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
804 def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
805 def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
806 def HasAVX : Predicate<"Subtarget->hasAVX()">;
807 def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
808 def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
809 def HasAVX512 : Predicate<"Subtarget->hasAVX512()">,
810 AssemblerPredicate<"FeatureAVX512", "AVX-512 ISA">;
811 def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
812 def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
813 def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
814 def HasCDI : Predicate<"Subtarget->hasCDI()">,
815 AssemblerPredicate<"FeatureCDI", "AVX-512 CD ISA">;
816 def HasPFI : Predicate<"Subtarget->hasPFI()">,
817 AssemblerPredicate<"FeaturePFI", "AVX-512 PF ISA">;
818 def HasERI : Predicate<"Subtarget->hasERI()">,
819 AssemblerPredicate<"FeatureERI", "AVX-512 ER ISA">;
820 def HasDQI : Predicate<"Subtarget->hasDQI()">,
821 AssemblerPredicate<"FeatureDQI", "AVX-512 DQ ISA">;
822 def NoDQI : Predicate<"!Subtarget->hasDQI()">;
823 def HasBWI : Predicate<"Subtarget->hasBWI()">,
824 AssemblerPredicate<"FeatureBWI", "AVX-512 BW ISA">;
825 def NoBWI : Predicate<"!Subtarget->hasBWI()">;
826 def HasVLX : Predicate<"Subtarget->hasVLX()">,
827 AssemblerPredicate<"FeatureVLX", "AVX-512 VL ISA">;
828 def NoVLX : Predicate<"!Subtarget->hasVLX()">;
829 def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
830 def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
831 def PKU : Predicate<"Subtarget->hasPKU()">;
833 def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
834 def HasAES : Predicate<"Subtarget->hasAES()">;
835 def HasFXSR : Predicate<"Subtarget->hasFXSR()">;
836 def HasXSAVE : Predicate<"Subtarget->hasXSAVE()">;
837 def HasXSAVEOPT : Predicate<"Subtarget->hasXSAVEOPT()">;
838 def HasXSAVEC : Predicate<"Subtarget->hasXSAVEC()">;
839 def HasXSAVES : Predicate<"Subtarget->hasXSAVES()">;
840 def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
841 def HasFMA : Predicate<"Subtarget->hasFMA()">;
842 def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
843 def HasXOP : Predicate<"Subtarget->hasXOP()">;
844 def HasTBM : Predicate<"Subtarget->hasTBM()">;
845 def HasLWP : Predicate<"Subtarget->hasLWP()">;
846 def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
847 def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
848 def HasF16C : Predicate<"Subtarget->hasF16C()">;
849 def NoF16C : Predicate<"!Subtarget->hasF16C()">;
850 def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
851 def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
852 def HasBMI : Predicate<"Subtarget->hasBMI()">;
853 def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
854 def HasVBMI : Predicate<"Subtarget->hasVBMI()">,
855 AssemblerPredicate<"FeatureVBMI", "AVX-512 VBMI ISA">;
856 def HasIFMA : Predicate<"Subtarget->hasIFMA()">,
857 AssemblerPredicate<"FeatureIFMA", "AVX-512 IFMA ISA">;
858 def HasRTM : Predicate<"Subtarget->hasRTM()">;
859 def HasADX : Predicate<"Subtarget->hasADX()">;
860 def HasSHA : Predicate<"Subtarget->hasSHA()">;
861 def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">;
862 def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
863 def HasPrefetchW : Predicate<"Subtarget->hasPRFCHW()">;
864 def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
865 def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
866 def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">;
867 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
868 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
869 def HasMPX : Predicate<"Subtarget->hasMPX()">;
870 def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">;
871 def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
872 def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
873 AssemblerPredicate<"!Mode64Bit", "Not 64-bit mode">;
874 def In64BitMode : Predicate<"Subtarget->is64Bit()">,
875 AssemblerPredicate<"Mode64Bit", "64-bit mode">;
876 def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">;
877 def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">;
878 def In16BitMode : Predicate<"Subtarget->is16Bit()">,
879 AssemblerPredicate<"Mode16Bit", "16-bit mode">;
880 def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
881 AssemblerPredicate<"!Mode16Bit", "Not 16-bit mode">;
882 def In32BitMode : Predicate<"Subtarget->is32Bit()">,
883 AssemblerPredicate<"Mode32Bit", "32-bit mode">;
884 def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
885 def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
886 def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
887 "Subtarget->getFrameLowering()->hasFP(*MF)"> {
888 let RecomputePerFunction = 1;
890 def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
891 def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
892 def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
893 def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
894 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
895 def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
896 def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
897 "TM.getCodeModel() == CodeModel::Kernel">;
898 def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
900 // We could compute these on a per-module basis but doing so requires accessing
901 // the Function object through the <Target>Subtarget and objections were raised
902 // to that (see post-commit review comments for r301750).
903 let RecomputePerFunction = 1 in {
904 def OptForSize : Predicate<"MF->getFunction()->optForSize()">;
905 def OptForMinSize : Predicate<"MF->getFunction()->optForMinSize()">;
906 def OptForSpeed : Predicate<"!MF->getFunction()->optForSize()">;
909 def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">;
910 def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
911 def FavorMemIndirectCall : Predicate<"!Subtarget->callRegIndirect()">;
912 def NotSlowIncDec : Predicate<"!Subtarget->slowIncDec()">;
913 def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
914 def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
915 def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
916 def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
917 def HasMFence : Predicate<"Subtarget->hasMFence()">;
919 //===----------------------------------------------------------------------===//
920 // X86 Instruction Format Definitions.
923 include "X86InstrFormats.td"
925 //===----------------------------------------------------------------------===//
926 // Pattern fragments.
929 // X86 specific condition code. These correspond to CondCode in
930 // X86InstrInfo.h. They must be kept in synch.
931 def X86_COND_A : PatLeaf<(i8 0)>; // alt. COND_NBE
932 def X86_COND_AE : PatLeaf<(i8 1)>; // alt. COND_NC
933 def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
934 def X86_COND_BE : PatLeaf<(i8 3)>; // alt. COND_NA
935 def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
936 def X86_COND_G : PatLeaf<(i8 5)>; // alt. COND_NLE
937 def X86_COND_GE : PatLeaf<(i8 6)>; // alt. COND_NL
938 def X86_COND_L : PatLeaf<(i8 7)>; // alt. COND_NGE
939 def X86_COND_LE : PatLeaf<(i8 8)>; // alt. COND_NG
940 def X86_COND_NE : PatLeaf<(i8 9)>; // alt. COND_NZ
941 def X86_COND_NO : PatLeaf<(i8 10)>;
942 def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
943 def X86_COND_NS : PatLeaf<(i8 12)>;
944 def X86_COND_O : PatLeaf<(i8 13)>;
945 def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
946 def X86_COND_S : PatLeaf<(i8 15)>;
948 def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
949 def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
950 def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
951 def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
953 // FIXME: Ideally we would just replace the above i*immSExt* matchers with
954 // relocImm-based matchers, but then FastISel would be unable to use them.
955 def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
956 return isSExtRelocImm<8>(N);
958 def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
959 return isSExtRelocImm<32>(N);
962 // If we have multiple users of an immediate, it's much smaller to reuse
963 // the register, rather than encode the immediate in every instruction.
964 // This has the risk of increasing register pressure from stretched live
965 // ranges, however, the immediates should be trivial to rematerialize by
966 // the RA in the event of high register pressure.
967 // TODO : This is currently enabled for stores and binary ops. There are more
968 // cases for which this can be enabled, though this catches the bulk of the
970 // TODO2 : This should really also be enabled under O2, but there's currently
971 // an issue with RA where we don't pull the constants into their users
972 // when we rematerialize them. I'll follow-up on enabling O2 after we fix that
974 // TODO3 : This is currently limited to single basic blocks (DAG creation
975 // pulls block immediates to the top and merges them if necessary).
976 // Eventually, it would be nice to allow ConstantHoisting to merge constants
977 // globally for potentially added savings.
979 def imm8_su : PatLeaf<(i8 relocImm), [{
980 return !shouldAvoidImmediateInstFormsForSize(N);
982 def imm16_su : PatLeaf<(i16 relocImm), [{
983 return !shouldAvoidImmediateInstFormsForSize(N);
985 def imm32_su : PatLeaf<(i32 relocImm), [{
986 return !shouldAvoidImmediateInstFormsForSize(N);
988 def i64immSExt32_su : PatLeaf<(i64immSExt32), [{
989 return !shouldAvoidImmediateInstFormsForSize(N);
992 def i16immSExt8_su : PatLeaf<(i16immSExt8), [{
993 return !shouldAvoidImmediateInstFormsForSize(N);
995 def i32immSExt8_su : PatLeaf<(i32immSExt8), [{
996 return !shouldAvoidImmediateInstFormsForSize(N);
998 def i64immSExt8_su : PatLeaf<(i64immSExt8), [{
999 return !shouldAvoidImmediateInstFormsForSize(N);
1002 def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
1003 return !shouldAvoidImmediateInstFormsForSize(N);
1005 def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
1006 return !shouldAvoidImmediateInstFormsForSize(N);
1009 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
1011 def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
1013 def i64immZExt32SExt8 : ImmLeaf<i64, [{
1014 return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm));
1017 // Helper fragments for loads.
1018 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
1019 // known to be 32-bit aligned or better. Ditto for i8 to i16.
1020 def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
1021 LoadSDNode *LD = cast<LoadSDNode>(N);
1022 ISD::LoadExtType ExtType = LD->getExtensionType();
1023 if (ExtType == ISD::NON_EXTLOAD)
1025 if (ExtType == ISD::EXTLOAD)
1026 return LD->getAlignment() >= 2 && !LD->isVolatile();
1030 def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)),[{
1031 LoadSDNode *LD = cast<LoadSDNode>(N);
1032 ISD::LoadExtType ExtType = LD->getExtensionType();
1033 if (ExtType == ISD::EXTLOAD)
1034 return LD->getAlignment() >= 2 && !LD->isVolatile();
1038 def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
1039 LoadSDNode *LD = cast<LoadSDNode>(N);
1040 ISD::LoadExtType ExtType = LD->getExtensionType();
1041 if (ExtType == ISD::NON_EXTLOAD)
1043 if (ExtType == ISD::EXTLOAD)
1044 return LD->getAlignment() >= 4 && !LD->isVolatile();
1048 def loadi8 : PatFrag<(ops node:$ptr), (i8 (load node:$ptr))>;
1049 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
1050 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
1051 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
1052 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
1053 def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>;
1055 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
1056 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
1057 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
1058 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
1059 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
1060 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
1062 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
1063 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
1064 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
1065 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
1066 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
1067 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
1068 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
1069 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
1070 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
1071 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
1073 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
1074 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
1075 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
1076 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
1077 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
1078 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
1079 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
1080 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
1081 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
1082 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>;
1085 // An 'and' node with a single use.
1086 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
1087 return N->hasOneUse();
1089 // An 'srl' node with a single use.
1090 def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
1091 return N->hasOneUse();
1093 // An 'trunc' node with a single use.
1094 def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
1095 return N->hasOneUse();
1098 //===----------------------------------------------------------------------===//
1099 // Instruction list.
1103 let hasSideEffects = 0, SchedRW = [WriteZero] in {
1104 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", [], IIC_NOP>;
1105 def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
1106 "nop{w}\t$zero", [], IIC_NOP>, TB, OpSize16;
1107 def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
1108 "nop{l}\t$zero", [], IIC_NOP>, TB, OpSize32;
1112 // Constructing a stack frame.
1113 def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
1114 "enter\t$len, $lvl", [], IIC_ENTER>, Sched<[WriteMicrocoded]>;
1116 let SchedRW = [WriteALU] in {
1117 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
1118 def LEAVE : I<0xC9, RawFrm,
1119 (outs), (ins), "leave", [], IIC_LEAVE>,
1120 Requires<[Not64BitMode]>;
1122 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
1123 def LEAVE64 : I<0xC9, RawFrm,
1124 (outs), (ins), "leave", [], IIC_LEAVE>,
1125 Requires<[In64BitMode]>;
1128 //===----------------------------------------------------------------------===//
1129 // Miscellaneous Instructions.
1132 let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1 in
1133 def Int_eh_sjlj_setup_dispatch
1134 : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
1136 let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
1137 let mayLoad = 1, SchedRW = [WriteLoad] in {
1138 def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
1139 IIC_POP_REG16>, OpSize16;
1140 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", [],
1141 IIC_POP_REG>, OpSize32, Requires<[Not64BitMode]>;
1142 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", [],
1143 IIC_POP_REG>, OpSize16;
1144 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", [],
1145 IIC_POP_REG>, OpSize32, Requires<[Not64BitMode]>;
1146 } // mayLoad, SchedRW
1147 let mayStore = 1, mayLoad = 1, SchedRW = [WriteRMW] in {
1148 def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", [],
1149 IIC_POP_MEM>, OpSize16;
1150 def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", [],
1151 IIC_POP_MEM>, OpSize32, Requires<[Not64BitMode]>;
1152 } // mayStore, mayLoad, WriteRMW
1154 let mayStore = 1, SchedRW = [WriteStore] in {
1155 def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[],
1156 IIC_PUSH_REG>, OpSize16;
1157 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[],
1158 IIC_PUSH_REG>, OpSize32, Requires<[Not64BitMode]>;
1159 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[],
1160 IIC_PUSH_REG>, OpSize16;
1161 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[],
1162 IIC_PUSH_REG>, OpSize32, Requires<[Not64BitMode]>;
1164 def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
1165 "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize16;
1166 def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
1167 "push{w}\t$imm", [], IIC_PUSH_IMM>, OpSize16;
1169 def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
1170 "push{l}\t$imm", [], IIC_PUSH_IMM>, OpSize32,
1171 Requires<[Not64BitMode]>;
1172 def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
1173 "push{l}\t$imm", [], IIC_PUSH_IMM>, OpSize32,
1174 Requires<[Not64BitMode]>;
1175 } // mayStore, SchedRW
1177 let mayLoad = 1, mayStore = 1, SchedRW = [WriteRMW] in {
1178 def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src",[],
1179 IIC_PUSH_MEM>, OpSize16;
1180 def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[],
1181 IIC_PUSH_MEM>, OpSize32, Requires<[Not64BitMode]>;
1182 } // mayLoad, mayStore, SchedRW
1186 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1187 SchedRW = [WriteRMW], Defs = [ESP] in {
1189 def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
1190 [(set GR32:$dst, (int_x86_flags_read_u32))]>,
1191 Requires<[Not64BitMode]>;
1194 def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
1195 [(set GR64:$dst, (int_x86_flags_read_u64))]>,
1196 Requires<[In64BitMode]>;
1199 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1200 SchedRW = [WriteRMW] in {
1201 let Defs = [ESP, EFLAGS], Uses = [ESP] in
1202 def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
1203 [(int_x86_flags_write_u32 GR32:$src)]>,
1204 Requires<[Not64BitMode]>;
1206 let Defs = [RSP, EFLAGS], Uses = [RSP] in
1207 def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
1208 [(int_x86_flags_write_u64 GR64:$src)]>,
1209 Requires<[In64BitMode]>;
1212 let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
1213 SchedRW = [WriteLoad] in {
1214 def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", [], IIC_POP_F>,
1216 def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", [], IIC_POP_FD>,
1217 OpSize32, Requires<[Not64BitMode]>;
1220 let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, hasSideEffects=0,
1221 SchedRW = [WriteStore] in {
1222 def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", [], IIC_PUSH_F>,
1224 def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", [], IIC_PUSH_F>,
1225 OpSize32, Requires<[Not64BitMode]>;
1228 let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
1229 let mayLoad = 1, SchedRW = [WriteLoad] in {
1230 def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", [],
1231 IIC_POP_REG>, OpSize32, Requires<[In64BitMode]>;
1232 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", [],
1233 IIC_POP_REG>, OpSize32, Requires<[In64BitMode]>;
1234 } // mayLoad, SchedRW
1235 let mayLoad = 1, mayStore = 1, SchedRW = [WriteRMW] in
1236 def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", [],
1237 IIC_POP_MEM>, OpSize32, Requires<[In64BitMode]>;
1238 let mayStore = 1, SchedRW = [WriteStore] in {
1239 def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", [],
1240 IIC_PUSH_REG>, OpSize32, Requires<[In64BitMode]>;
1241 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", [],
1242 IIC_PUSH_REG>, OpSize32, Requires<[In64BitMode]>;
1243 } // mayStore, SchedRW
1244 let mayLoad = 1, mayStore = 1, SchedRW = [WriteRMW] in {
1245 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", [],
1246 IIC_PUSH_MEM>, OpSize32, Requires<[In64BitMode]>;
1247 } // mayLoad, mayStore, SchedRW
1250 let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
1251 SchedRW = [WriteStore] in {
1252 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
1253 "push{q}\t$imm", [], IIC_PUSH_IMM>, OpSize32,
1254 Requires<[In64BitMode]>;
1255 def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
1256 "push{q}\t$imm", [], IIC_PUSH_IMM>, OpSize32,
1257 Requires<[In64BitMode]>;
1260 let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
1261 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", [], IIC_POP_FD>,
1262 OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
1263 let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, hasSideEffects=0 in
1264 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", [], IIC_PUSH_F>,
1265 OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
1267 let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
1268 mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
1269 def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", [], IIC_POP_A>,
1270 OpSize32, Requires<[Not64BitMode]>;
1271 def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", [], IIC_POP_A>,
1272 OpSize16, Requires<[Not64BitMode]>;
1274 let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
1275 mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
1276 def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", [], IIC_PUSH_A>,
1277 OpSize32, Requires<[Not64BitMode]>;
1278 def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", [], IIC_PUSH_A>,
1279 OpSize16, Requires<[Not64BitMode]>;
1282 let Constraints = "$src = $dst", SchedRW = [WriteALU] in {
1283 // GR32 = bswap GR32
1284 def BSWAP32r : I<0xC8, AddRegFrm,
1285 (outs GR32:$dst), (ins GR32:$src),
1287 [(set GR32:$dst, (bswap GR32:$src))], IIC_BSWAP>, OpSize32, TB;
1289 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
1291 [(set GR64:$dst, (bswap GR64:$src))], IIC_BSWAP>, TB;
1292 } // Constraints = "$src = $dst", SchedRW
1294 // Bit scan instructions.
1295 let Defs = [EFLAGS] in {
1296 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1297 "bsf{w}\t{$src, $dst|$dst, $src}",
1298 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))],
1299 IIC_BIT_SCAN_REG>, PS, OpSize16, Sched<[WriteShift]>;
1300 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1301 "bsf{w}\t{$src, $dst|$dst, $src}",
1302 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))],
1303 IIC_BIT_SCAN_MEM>, PS, OpSize16, Sched<[WriteShiftLd]>;
1304 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1305 "bsf{l}\t{$src, $dst|$dst, $src}",
1306 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))],
1307 IIC_BIT_SCAN_REG>, PS, OpSize32, Sched<[WriteShift]>;
1308 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1309 "bsf{l}\t{$src, $dst|$dst, $src}",
1310 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))],
1311 IIC_BIT_SCAN_MEM>, PS, OpSize32, Sched<[WriteShiftLd]>;
1312 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1313 "bsf{q}\t{$src, $dst|$dst, $src}",
1314 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))],
1315 IIC_BIT_SCAN_REG>, PS, Sched<[WriteShift]>;
1316 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1317 "bsf{q}\t{$src, $dst|$dst, $src}",
1318 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))],
1319 IIC_BIT_SCAN_MEM>, PS, Sched<[WriteShiftLd]>;
1321 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1322 "bsr{w}\t{$src, $dst|$dst, $src}",
1323 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))],
1324 IIC_BIT_SCAN_REG>, PS, OpSize16, Sched<[WriteShift]>;
1325 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1326 "bsr{w}\t{$src, $dst|$dst, $src}",
1327 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))],
1328 IIC_BIT_SCAN_MEM>, PS, OpSize16, Sched<[WriteShiftLd]>;
1329 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1330 "bsr{l}\t{$src, $dst|$dst, $src}",
1331 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))],
1332 IIC_BIT_SCAN_REG>, PS, OpSize32, Sched<[WriteShift]>;
1333 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1334 "bsr{l}\t{$src, $dst|$dst, $src}",
1335 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))],
1336 IIC_BIT_SCAN_MEM>, PS, OpSize32, Sched<[WriteShiftLd]>;
1337 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1338 "bsr{q}\t{$src, $dst|$dst, $src}",
1339 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))],
1340 IIC_BIT_SCAN_REG>, PS, Sched<[WriteShift]>;
1341 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1342 "bsr{q}\t{$src, $dst|$dst, $src}",
1343 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))],
1344 IIC_BIT_SCAN_MEM>, PS, Sched<[WriteShiftLd]>;
1345 } // Defs = [EFLAGS]
1347 let SchedRW = [WriteMicrocoded] in {
1348 // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
1349 let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in {
1350 def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1351 "movsb\t{$src, $dst|$dst, $src}", [], IIC_MOVS>;
1352 def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1353 "movsw\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize16;
1354 def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1355 "movs{l|d}\t{$src, $dst|$dst, $src}", [], IIC_MOVS>, OpSize32;
1356 def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1357 "movsq\t{$src, $dst|$dst, $src}", [], IIC_MOVS>;
1360 // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
1361 let Defs = [EDI], Uses = [AL,EDI,EFLAGS] in
1362 def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
1363 "stosb\t{%al, $dst|$dst, al}", [], IIC_STOS>;
1364 let Defs = [EDI], Uses = [AX,EDI,EFLAGS] in
1365 def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
1366 "stosw\t{%ax, $dst|$dst, ax}", [], IIC_STOS>, OpSize16;
1367 let Defs = [EDI], Uses = [EAX,EDI,EFLAGS] in
1368 def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
1369 "stos{l|d}\t{%eax, $dst|$dst, eax}", [], IIC_STOS>, OpSize32;
1370 let Defs = [RDI], Uses = [RAX,RDI,EFLAGS] in
1371 def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
1372 "stosq\t{%rax, $dst|$dst, rax}", [], IIC_STOS>;
1374 // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
1375 let Defs = [EDI,EFLAGS], Uses = [AL,EDI,EFLAGS] in
1376 def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
1377 "scasb\t{$dst, %al|al, $dst}", [], IIC_SCAS>;
1378 let Defs = [EDI,EFLAGS], Uses = [AX,EDI,EFLAGS] in
1379 def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
1380 "scasw\t{$dst, %ax|ax, $dst}", [], IIC_SCAS>, OpSize16;
1381 let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,EFLAGS] in
1382 def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
1383 "scas{l|d}\t{$dst, %eax|eax, $dst}", [], IIC_SCAS>, OpSize32;
1384 let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,EFLAGS] in
1385 def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
1386 "scasq\t{$dst, %rax|rax, $dst}", [], IIC_SCAS>;
1388 // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
1389 let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,EFLAGS] in {
1390 def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1391 "cmpsb\t{$dst, $src|$src, $dst}", [], IIC_CMPS>;
1392 def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1393 "cmpsw\t{$dst, $src|$src, $dst}", [], IIC_CMPS>, OpSize16;
1394 def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1395 "cmps{l|d}\t{$dst, $src|$src, $dst}", [], IIC_CMPS>, OpSize32;
1396 def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1397 "cmpsq\t{$dst, $src|$src, $dst}", [], IIC_CMPS>;
1401 //===----------------------------------------------------------------------===//
1402 // Move Instructions.
1404 let SchedRW = [WriteMove] in {
1405 let hasSideEffects = 0 in {
1406 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
1407 "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
1408 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1409 "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16;
1410 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1411 "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize32;
1412 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1413 "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
1416 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
1417 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
1418 "mov{b}\t{$src, $dst|$dst, $src}",
1419 [(set GR8:$dst, imm:$src)], IIC_MOV>;
1420 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
1421 "mov{w}\t{$src, $dst|$dst, $src}",
1422 [(set GR16:$dst, imm:$src)], IIC_MOV>, OpSize16;
1423 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
1424 "mov{l}\t{$src, $dst|$dst, $src}",
1425 [(set GR32:$dst, relocImm:$src)], IIC_MOV>, OpSize32;
1426 def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
1427 "mov{q}\t{$src, $dst|$dst, $src}",
1428 [(set GR64:$dst, i64immSExt32:$src)], IIC_MOV>;
1430 let isReMaterializable = 1 in {
1431 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
1432 "movabs{q}\t{$src, $dst|$dst, $src}",
1433 [(set GR64:$dst, relocImm:$src)], IIC_MOV>;
1436 // Longer forms that use a ModR/M byte. Needed for disassembler
1437 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1438 def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
1439 "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
1440 def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
1441 "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16;
1442 def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
1443 "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize32;
1447 let SchedRW = [WriteStore] in {
1448 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
1449 "mov{b}\t{$src, $dst|$dst, $src}",
1450 [(store (i8 imm8_su:$src), addr:$dst)], IIC_MOV_MEM>;
1451 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
1452 "mov{w}\t{$src, $dst|$dst, $src}",
1453 [(store (i16 imm16_su:$src), addr:$dst)], IIC_MOV_MEM>, OpSize16;
1454 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
1455 "mov{l}\t{$src, $dst|$dst, $src}",
1456 [(store (i32 imm32_su:$src), addr:$dst)], IIC_MOV_MEM>, OpSize32;
1457 def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1458 "mov{q}\t{$src, $dst|$dst, $src}",
1459 [(store i64immSExt32_su:$src, addr:$dst)], IIC_MOV_MEM>;
1462 let hasSideEffects = 0 in {
1464 /// Memory offset versions of moves. The immediate is an address mode sized
1465 /// offset from the segment base.
1466 let SchedRW = [WriteALU] in {
1467 let mayLoad = 1 in {
1469 def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
1470 "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>,
1473 def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
1474 "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
1477 def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
1478 "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
1481 def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
1482 "mov{q}\t{$src, %rax|rax, $src}", [], IIC_MOV_MEM>,
1486 def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
1487 "mov{b}\t{$src, %al|al, $src}", [], IIC_MOV_MEM>, AdSize16;
1489 def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
1490 "mov{w}\t{$src, %ax|ax, $src}", [], IIC_MOV_MEM>,
1493 def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
1494 "mov{l}\t{$src, %eax|eax, $src}", [], IIC_MOV_MEM>,
1497 let mayStore = 1 in {
1499 def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
1500 "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize32;
1502 def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
1503 "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
1506 def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
1507 "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
1510 def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
1511 "mov{q}\t{%rax, $dst|$dst, rax}", [], IIC_MOV_MEM>,
1515 def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
1516 "mov{b}\t{%al, $dst|$dst, al}", [], IIC_MOV_MEM>, AdSize16;
1518 def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
1519 "mov{w}\t{%ax, $dst|$dst, ax}", [], IIC_MOV_MEM>,
1522 def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
1523 "mov{l}\t{%eax, $dst|$dst, eax}", [], IIC_MOV_MEM>,
1528 // These forms all have full 64-bit absolute addresses in their instructions
1529 // and use the movabs mnemonic to indicate this specific form.
1530 let mayLoad = 1 in {
1532 def MOV8ao64 : RIi64_NOREX<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
1533 "movabs{b}\t{$src, %al|al, $src}", []>, AdSize64;
1535 def MOV16ao64 : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
1536 "movabs{w}\t{$src, %ax|ax, $src}", []>, OpSize16, AdSize64;
1538 def MOV32ao64 : RIi64_NOREX<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
1539 "movabs{l}\t{$src, %eax|eax, $src}", []>, OpSize32,
1542 def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
1543 "movabs{q}\t{$src, %rax|rax, $src}", []>, AdSize64;
1546 let mayStore = 1 in {
1548 def MOV8o64a : RIi64_NOREX<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
1549 "movabs{b}\t{%al, $dst|$dst, al}", []>, AdSize64;
1551 def MOV16o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
1552 "movabs{w}\t{%ax, $dst|$dst, ax}", []>, OpSize16, AdSize64;
1554 def MOV32o64a : RIi64_NOREX<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
1555 "movabs{l}\t{%eax, $dst|$dst, eax}", []>, OpSize32,
1558 def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
1559 "movabs{q}\t{%rax, $dst|$dst, rax}", []>, AdSize64;
1561 } // hasSideEffects = 0
1563 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
1564 SchedRW = [WriteMove] in {
1565 def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
1566 "mov{b}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
1567 def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1568 "mov{w}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize16;
1569 def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1570 "mov{l}\t{$src, $dst|$dst, $src}", [], IIC_MOV>, OpSize32;
1571 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1572 "mov{q}\t{$src, $dst|$dst, $src}", [], IIC_MOV>;
1575 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1576 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
1577 "mov{b}\t{$src, $dst|$dst, $src}",
1578 [(set GR8:$dst, (loadi8 addr:$src))], IIC_MOV_MEM>;
1579 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1580 "mov{w}\t{$src, $dst|$dst, $src}",
1581 [(set GR16:$dst, (loadi16 addr:$src))], IIC_MOV_MEM>, OpSize16;
1582 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1583 "mov{l}\t{$src, $dst|$dst, $src}",
1584 [(set GR32:$dst, (loadi32 addr:$src))], IIC_MOV_MEM>, OpSize32;
1585 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1586 "mov{q}\t{$src, $dst|$dst, $src}",
1587 [(set GR64:$dst, (load addr:$src))], IIC_MOV_MEM>;
1590 let SchedRW = [WriteStore] in {
1591 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
1592 "mov{b}\t{$src, $dst|$dst, $src}",
1593 [(store GR8:$src, addr:$dst)], IIC_MOV_MEM>;
1594 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1595 "mov{w}\t{$src, $dst|$dst, $src}",
1596 [(store GR16:$src, addr:$dst)], IIC_MOV_MEM>, OpSize16;
1597 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1598 "mov{l}\t{$src, $dst|$dst, $src}",
1599 [(store GR32:$src, addr:$dst)], IIC_MOV_MEM>, OpSize32;
1600 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1601 "mov{q}\t{$src, $dst|$dst, $src}",
1602 [(store GR64:$src, addr:$dst)], IIC_MOV_MEM>;
1605 // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
1606 // that they can be used for copying and storing h registers, which can't be
1607 // encoded when a REX prefix is present.
1608 let isCodeGenOnly = 1 in {
1609 let hasSideEffects = 0 in
1610 def MOV8rr_NOREX : I<0x88, MRMDestReg,
1611 (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
1612 "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [], IIC_MOV>,
1614 let mayStore = 1, hasSideEffects = 0 in
1615 def MOV8mr_NOREX : I<0x88, MRMDestMem,
1616 (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
1617 "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [],
1618 IIC_MOV_MEM>, Sched<[WriteStore]>;
1619 let mayLoad = 1, hasSideEffects = 0,
1620 canFoldAsLoad = 1, isReMaterializable = 1 in
1621 def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
1622 (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
1623 "mov{b}\t{$src, $dst|$dst, $src} # NOREX", [],
1624 IIC_MOV_MEM>, Sched<[WriteLoad]>;
1628 // Condition code ops, incl. set if equal/not equal/...
1629 let SchedRW = [WriteALU] in {
1630 let Defs = [EFLAGS], Uses = [AH] in
1631 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf",
1632 [(set EFLAGS, (X86sahf AH))], IIC_AHF>,
1633 Requires<[HasLAHFSAHF]>;
1634 let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
1635 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", [],
1636 IIC_AHF>, // AH = flags
1637 Requires<[HasLAHFSAHF]>;
1640 //===----------------------------------------------------------------------===//
1641 // Bit tests instructions: BT, BTS, BTR, BTC.
1643 let Defs = [EFLAGS] in {
1644 let SchedRW = [WriteALU] in {
1645 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1646 "bt{w}\t{$src2, $src1|$src1, $src2}",
1647 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))], IIC_BT_RR>,
1649 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1650 "bt{l}\t{$src2, $src1|$src1, $src2}",
1651 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))], IIC_BT_RR>,
1653 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1654 "bt{q}\t{$src2, $src1|$src1, $src2}",
1655 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))], IIC_BT_RR>, TB;
1658 // Unlike with the register+register form, the memory+register form of the
1659 // bt instruction does not ignore the high bits of the index. From ISel's
1660 // perspective, this is pretty bizarre. Make these instructions disassembly
1663 let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteALULd] in {
1664 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1665 "bt{w}\t{$src2, $src1|$src1, $src2}",
1666 // [(X86bt (loadi16 addr:$src1), GR16:$src2),
1667 // (implicit EFLAGS)]
1669 >, OpSize16, TB, Requires<[FastBTMem]>;
1670 def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1671 "bt{l}\t{$src2, $src1|$src1, $src2}",
1672 // [(X86bt (loadi32 addr:$src1), GR32:$src2),
1673 // (implicit EFLAGS)]
1675 >, OpSize32, TB, Requires<[FastBTMem]>;
1676 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1677 "bt{q}\t{$src2, $src1|$src1, $src2}",
1678 // [(X86bt (loadi64 addr:$src1), GR64:$src2),
1679 // (implicit EFLAGS)]
1684 let SchedRW = [WriteALU] in {
1685 def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
1686 "bt{w}\t{$src2, $src1|$src1, $src2}",
1687 [(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))],
1688 IIC_BT_RI>, OpSize16, TB;
1689 def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
1690 "bt{l}\t{$src2, $src1|$src1, $src2}",
1691 [(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))],
1692 IIC_BT_RI>, OpSize32, TB;
1693 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1694 "bt{q}\t{$src2, $src1|$src1, $src2}",
1695 [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))],
1699 // Note that these instructions don't need FastBTMem because that
1700 // only applies when the other operand is in a register. When it's
1701 // an immediate, bt is still fast.
1702 let SchedRW = [WriteALU] in {
1703 def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1704 "bt{w}\t{$src2, $src1|$src1, $src2}",
1705 [(set EFLAGS, (X86bt (loadi16 addr:$src1), i16immSExt8:$src2))
1706 ], IIC_BT_MI>, OpSize16, TB;
1707 def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1708 "bt{l}\t{$src2, $src1|$src1, $src2}",
1709 [(set EFLAGS, (X86bt (loadi32 addr:$src1), i32immSExt8:$src2))
1710 ], IIC_BT_MI>, OpSize32, TB;
1711 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1712 "bt{q}\t{$src2, $src1|$src1, $src2}",
1713 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1714 i64immSExt8:$src2))], IIC_BT_MI>, TB;
1717 let hasSideEffects = 0 in {
1718 let SchedRW = [WriteALU] in {
1719 def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1720 "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
1722 def BTC32rr : I<0xBB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1723 "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
1725 def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1726 "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
1729 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1730 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1731 "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
1733 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1734 "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
1736 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1737 "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
1740 let SchedRW = [WriteALU] in {
1741 def BTC16ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2),
1742 "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
1744 def BTC32ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2),
1745 "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
1747 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1748 "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
1751 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1752 def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1753 "btc{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
1755 def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1756 "btc{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
1758 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1759 "btc{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
1762 let SchedRW = [WriteALU] in {
1763 def BTR16rr : I<0xB3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1764 "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
1766 def BTR32rr : I<0xB3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1767 "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
1769 def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1770 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1773 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1774 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1775 "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
1777 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1778 "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
1780 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1781 "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
1784 let SchedRW = [WriteALU] in {
1785 def BTR16ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR16:$src1, i16i8imm:$src2),
1786 "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
1788 def BTR32ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR32:$src1, i32i8imm:$src2),
1789 "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
1791 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1792 "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
1795 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1796 def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1797 "btr{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
1799 def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1800 "btr{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
1802 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1803 "btr{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
1806 let SchedRW = [WriteALU] in {
1807 def BTS16rr : I<0xAB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1808 "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
1810 def BTS32rr : I<0xAB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1811 "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>,
1813 def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1814 "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RR>, TB;
1817 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1818 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1819 "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
1821 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1822 "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>,
1824 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1825 "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MR>, TB;
1828 let SchedRW = [WriteALU] in {
1829 def BTS16ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR16:$src1, i16i8imm:$src2),
1830 "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
1832 def BTS32ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR32:$src1, i32i8imm:$src2),
1833 "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>,
1835 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2),
1836 "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_RI>, TB;
1839 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1840 def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
1841 "bts{w}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
1843 def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
1844 "bts{l}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>,
1846 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
1847 "bts{q}\t{$src2, $src1|$src1, $src2}", [], IIC_BTX_MI>, TB;
1849 } // hasSideEffects = 0
1850 } // Defs = [EFLAGS]
1853 //===----------------------------------------------------------------------===//
1857 // Atomic swap. These are just normal xchg instructions. But since a memory
1858 // operand is referenced, the atomicity is ensured.
1859 multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag,
1860 InstrItinClass itin> {
1861 let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in {
1862 def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
1863 (ins GR8:$val, i8mem:$ptr),
1864 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
1867 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
1869 def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
1870 (ins GR16:$val, i16mem:$ptr),
1871 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
1874 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
1876 def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
1877 (ins GR32:$val, i32mem:$ptr),
1878 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
1881 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
1883 def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
1884 (ins GR64:$val, i64mem:$ptr),
1885 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
1888 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
1893 defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap", IIC_XCHG_MEM>;
1895 // Swap between registers.
1896 let SchedRW = [WriteALU] in {
1897 let Constraints = "$val = $dst" in {
1898 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src),
1899 "xchg{b}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
1900 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src),
1901 "xchg{w}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>,
1903 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src),
1904 "xchg{l}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>,
1906 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
1907 "xchg{q}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>;
1910 // Swap between EAX and other registers.
1911 let Uses = [AX], Defs = [AX] in
1912 def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src),
1913 "xchg{w}\t{$src, %ax|ax, $src}", [], IIC_XCHG_REG>, OpSize16;
1914 let Uses = [EAX], Defs = [EAX] in
1915 def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src),
1916 "xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>,
1917 OpSize32, Requires<[Not64BitMode]>;
1918 let Uses = [EAX], Defs = [EAX] in
1919 // Uses GR32_NOAX in 64-bit mode to prevent encoding using the 0x90 NOP encoding.
1920 // xchg %eax, %eax needs to clear upper 32-bits of RAX so is not a NOP.
1921 def XCHG32ar64 : I<0x90, AddRegFrm, (outs), (ins GR32_NOAX:$src),
1922 "xchg{l}\t{$src, %eax|eax, $src}", [], IIC_XCHG_REG>,
1923 OpSize32, Requires<[In64BitMode]>;
1924 let Uses = [RAX], Defs = [RAX] in
1925 def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
1926 "xchg{q}\t{$src, %rax|rax, $src}", [], IIC_XCHG_REG>;
1929 let SchedRW = [WriteALU] in {
1930 def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
1931 "xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB;
1932 def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1933 "xadd{w}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB,
1935 def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1936 "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB,
1938 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1939 "xadd{q}\t{$src, $dst|$dst, $src}", [], IIC_XADD_REG>, TB;
1942 let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in {
1943 def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
1944 "xadd{b}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB;
1945 def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1946 "xadd{w}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB,
1948 def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1949 "xadd{l}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB,
1951 def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1952 "xadd{q}\t{$src, $dst|$dst, $src}", [], IIC_XADD_MEM>, TB;
1956 let SchedRW = [WriteALU] in {
1957 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
1958 "cmpxchg{b}\t{$src, $dst|$dst, $src}", [],
1959 IIC_CMPXCHG_REG8>, TB;
1960 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1961 "cmpxchg{w}\t{$src, $dst|$dst, $src}", [],
1962 IIC_CMPXCHG_REG>, TB, OpSize16;
1963 def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1964 "cmpxchg{l}\t{$src, $dst|$dst, $src}", [],
1965 IIC_CMPXCHG_REG>, TB, OpSize32;
1966 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1967 "cmpxchg{q}\t{$src, $dst|$dst, $src}", [],
1968 IIC_CMPXCHG_REG>, TB;
1971 let SchedRW = [WriteALULd, WriteRMW] in {
1972 let mayLoad = 1, mayStore = 1 in {
1973 def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
1974 "cmpxchg{b}\t{$src, $dst|$dst, $src}", [],
1975 IIC_CMPXCHG_MEM8>, TB;
1976 def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1977 "cmpxchg{w}\t{$src, $dst|$dst, $src}", [],
1978 IIC_CMPXCHG_MEM>, TB, OpSize16;
1979 def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1980 "cmpxchg{l}\t{$src, $dst|$dst, $src}", [],
1981 IIC_CMPXCHG_MEM>, TB, OpSize32;
1982 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1983 "cmpxchg{q}\t{$src, $dst|$dst, $src}", [],
1984 IIC_CMPXCHG_MEM>, TB;
1987 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
1988 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
1989 "cmpxchg8b\t$dst", [], IIC_CMPXCHG_8B>, TB;
1991 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
1992 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
1993 "cmpxchg16b\t$dst", [], IIC_CMPXCHG_16B>,
1994 TB, Requires<[HasCmpxchg16b]>;
1998 // Lock instruction prefix
1999 def LOCK_PREFIX : I<0xF0, RawFrm, (outs), (ins), "lock", []>;
2001 // Rex64 instruction prefix
2002 def REX64_PREFIX : I<0x48, RawFrm, (outs), (ins), "rex64", []>,
2003 Requires<[In64BitMode]>;
2005 // Data16 instruction prefix
2006 def DATA16_PREFIX : I<0x66, RawFrm, (outs), (ins), "data16", []>,
2007 Requires<[Not16BitMode]>;
2009 // Data instruction prefix
2010 def DATA32_PREFIX : I<0x66, RawFrm, (outs), (ins), "data32", []>,
2011 Requires<[In16BitMode]>;
2013 // Repeat string operation instruction prefixes
2014 // These uses the DF flag in the EFLAGS register to inc or dec ECX
2015 let Defs = [ECX], Uses = [ECX,EFLAGS] in {
2016 // Repeat (used with INS, OUTS, MOVS, LODS and STOS)
2017 def REP_PREFIX : I<0xF3, RawFrm, (outs), (ins), "rep", []>;
2018 // Repeat while not equal (used with CMPS and SCAS)
2019 def REPNE_PREFIX : I<0xF2, RawFrm, (outs), (ins), "repne", []>;
2023 // String manipulation instructions
2024 let SchedRW = [WriteMicrocoded] in {
2025 // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
2026 let Defs = [AL,ESI], Uses = [ESI,EFLAGS] in
2027 def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
2028 "lodsb\t{$src, %al|al, $src}", [], IIC_LODS>;
2029 let Defs = [AX,ESI], Uses = [ESI,EFLAGS] in
2030 def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
2031 "lodsw\t{$src, %ax|ax, $src}", [], IIC_LODS>, OpSize16;
2032 let Defs = [EAX,ESI], Uses = [ESI,EFLAGS] in
2033 def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
2034 "lods{l|d}\t{$src, %eax|eax, $src}", [], IIC_LODS>, OpSize32;
2035 let Defs = [RAX,ESI], Uses = [ESI,EFLAGS] in
2036 def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
2037 "lodsq\t{$src, %rax|rax, $src}", [], IIC_LODS>;
2040 let SchedRW = [WriteSystem] in {
2041 // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
2042 let Defs = [ESI], Uses = [DX,ESI,EFLAGS] in {
2043 def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
2044 "outsb\t{$src, %dx|dx, $src}", [], IIC_OUTS>;
2045 def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
2046 "outsw\t{$src, %dx|dx, $src}", [], IIC_OUTS>, OpSize16;
2047 def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
2048 "outs{l|d}\t{$src, %dx|dx, $src}", [], IIC_OUTS>, OpSize32;
2051 // These uses the DF flag in the EFLAGS register to inc or dec EDI and ESI
2052 let Defs = [EDI], Uses = [DX,EDI,EFLAGS] in {
2053 def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
2054 "insb\t{%dx, $dst|$dst, dx}", [], IIC_INS>;
2055 def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
2056 "insw\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize16;
2057 def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
2058 "ins{l|d}\t{%dx, $dst|$dst, dx}", [], IIC_INS>, OpSize32;
2062 // Flag instructions
2063 let SchedRW = [WriteALU] in {
2064 def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", [], IIC_CLC>;
2065 def STC : I<0xF9, RawFrm, (outs), (ins), "stc", [], IIC_STC>;
2066 def CLI : I<0xFA, RawFrm, (outs), (ins), "cli", [], IIC_CLI>;
2067 def STI : I<0xFB, RawFrm, (outs), (ins), "sti", [], IIC_STI>;
2068 def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", [], IIC_CLD>;
2069 def STD : I<0xFD, RawFrm, (outs), (ins), "std", [], IIC_STD>;
2070 def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", [], IIC_CMC>;
2072 def CLTS : I<0x06, RawFrm, (outs), (ins), "clts", [], IIC_CLTS>, TB;
2075 // Table lookup instructions
2076 let Uses = [AL,EBX], Defs = [AL], hasSideEffects = 0, mayLoad = 1 in
2077 def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", [], IIC_XLAT>,
2080 let SchedRW = [WriteMicrocoded] in {
2081 // ASCII Adjust After Addition
2082 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2083 def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", [], IIC_AAA>,
2084 Requires<[Not64BitMode]>;
2086 // ASCII Adjust AX Before Division
2087 let Uses = [AX], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2088 def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
2089 "aad\t$src", [], IIC_AAD>, Requires<[Not64BitMode]>;
2091 // ASCII Adjust AX After Multiply
2092 let Uses = [AL], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2093 def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
2094 "aam\t$src", [], IIC_AAM>, Requires<[Not64BitMode]>;
2096 // ASCII Adjust AL After Subtraction - sets
2097 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2098 def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", [], IIC_AAS>,
2099 Requires<[Not64BitMode]>;
2101 // Decimal Adjust AL after Addition
2102 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2103 def DAA : I<0x27, RawFrm, (outs), (ins), "daa", [], IIC_DAA>,
2104 Requires<[Not64BitMode]>;
2106 // Decimal Adjust AL after Subtraction
2107 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2108 def DAS : I<0x2F, RawFrm, (outs), (ins), "das", [], IIC_DAS>,
2109 Requires<[Not64BitMode]>;
2112 let SchedRW = [WriteSystem] in {
2113 // Check Array Index Against Bounds
2114 def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2115 "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize16,
2116 Requires<[Not64BitMode]>;
2117 def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2118 "bound\t{$src, $dst|$dst, $src}", [], IIC_BOUND>, OpSize32,
2119 Requires<[Not64BitMode]>;
2121 // Adjust RPL Field of Segment Selector
2122 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2123 "arpl\t{$src, $dst|$dst, $src}", [], IIC_ARPL_REG>,
2124 Requires<[Not64BitMode]>;
2126 def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2127 "arpl\t{$src, $dst|$dst, $src}", [], IIC_ARPL_MEM>,
2128 Requires<[Not64BitMode]>;
2131 //===----------------------------------------------------------------------===//
2132 // MOVBE Instructions
2134 let Predicates = [HasMOVBE] in {
2135 let SchedRW = [WriteALULd] in {
2136 def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2137 "movbe{w}\t{$src, $dst|$dst, $src}",
2138 [(set GR16:$dst, (bswap (loadi16 addr:$src)))], IIC_MOVBE>,
2140 def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2141 "movbe{l}\t{$src, $dst|$dst, $src}",
2142 [(set GR32:$dst, (bswap (loadi32 addr:$src)))], IIC_MOVBE>,
2144 def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2145 "movbe{q}\t{$src, $dst|$dst, $src}",
2146 [(set GR64:$dst, (bswap (loadi64 addr:$src)))], IIC_MOVBE>,
2149 let SchedRW = [WriteStore] in {
2150 def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2151 "movbe{w}\t{$src, $dst|$dst, $src}",
2152 [(store (bswap GR16:$src), addr:$dst)], IIC_MOVBE>,
2154 def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2155 "movbe{l}\t{$src, $dst|$dst, $src}",
2156 [(store (bswap GR32:$src), addr:$dst)], IIC_MOVBE>,
2158 def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2159 "movbe{q}\t{$src, $dst|$dst, $src}",
2160 [(store (bswap GR64:$src), addr:$dst)], IIC_MOVBE>,
2165 //===----------------------------------------------------------------------===//
2166 // RDRAND Instruction
2168 let Predicates = [HasRDRAND], Defs = [EFLAGS] in {
2169 def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
2171 [(set GR16:$dst, EFLAGS, (X86rdrand))]>, OpSize16, TB;
2172 def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
2174 [(set GR32:$dst, EFLAGS, (X86rdrand))]>, OpSize32, TB;
2175 def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
2177 [(set GR64:$dst, EFLAGS, (X86rdrand))]>, TB;
2180 //===----------------------------------------------------------------------===//
2181 // RDSEED Instruction
2183 let Predicates = [HasRDSEED], Defs = [EFLAGS] in {
2184 def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins),
2186 [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, TB;
2187 def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins),
2189 [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, TB;
2190 def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins),
2192 [(set GR64:$dst, EFLAGS, (X86rdseed))]>, TB;
2195 //===----------------------------------------------------------------------===//
2196 // LZCNT Instruction
2198 let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
2199 def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2200 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2201 [(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>, XS,
2203 def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2204 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2205 [(set GR16:$dst, (ctlz (loadi16 addr:$src))),
2206 (implicit EFLAGS)]>, XS, OpSize16;
2208 def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2209 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2210 [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>, XS,
2212 def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2213 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2214 [(set GR32:$dst, (ctlz (loadi32 addr:$src))),
2215 (implicit EFLAGS)]>, XS, OpSize32;
2217 def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2218 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2219 [(set GR64:$dst, (ctlz GR64:$src)), (implicit EFLAGS)]>,
2221 def LZCNT64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2222 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2223 [(set GR64:$dst, (ctlz (loadi64 addr:$src))),
2224 (implicit EFLAGS)]>, XS;
2227 //===----------------------------------------------------------------------===//
2230 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2231 def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2232 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2233 [(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>, XS,
2235 def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2236 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2237 [(set GR16:$dst, (cttz (loadi16 addr:$src))),
2238 (implicit EFLAGS)]>, XS, OpSize16;
2240 def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2241 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2242 [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>, XS,
2244 def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2245 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2246 [(set GR32:$dst, (cttz (loadi32 addr:$src))),
2247 (implicit EFLAGS)]>, XS, OpSize32;
2249 def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2250 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2251 [(set GR64:$dst, (cttz GR64:$src)), (implicit EFLAGS)]>,
2253 def TZCNT64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2254 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2255 [(set GR64:$dst, (cttz (loadi64 addr:$src))),
2256 (implicit EFLAGS)]>, XS;
2259 multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
2260 RegisterClass RC, X86MemOperand x86memop> {
2261 let hasSideEffects = 0 in {
2262 def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
2263 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
2266 def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
2267 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
2272 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2273 defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem>;
2274 defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem>, VEX_W;
2275 defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem>;
2276 defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem>, VEX_W;
2277 defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem>;
2278 defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem>, VEX_W;
2281 //===----------------------------------------------------------------------===//
2282 // Pattern fragments to auto generate BMI instructions.
2283 //===----------------------------------------------------------------------===//
2285 let Predicates = [HasBMI] in {
2286 // FIXME: patterns for the load versions are not implemented
2287 def : Pat<(and GR32:$src, (add GR32:$src, -1)),
2288 (BLSR32rr GR32:$src)>;
2289 def : Pat<(and GR64:$src, (add GR64:$src, -1)),
2290 (BLSR64rr GR64:$src)>;
2292 def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
2293 (BLSMSK32rr GR32:$src)>;
2294 def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
2295 (BLSMSK64rr GR64:$src)>;
2297 def : Pat<(and GR32:$src, (ineg GR32:$src)),
2298 (BLSI32rr GR32:$src)>;
2299 def : Pat<(and GR64:$src, (ineg GR64:$src)),
2300 (BLSI64rr GR64:$src)>;
2304 multiclass bmi_bextr_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
2305 X86MemOperand x86memop, Intrinsic Int,
2307 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2308 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2309 [(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2311 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2312 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2313 [(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
2314 (implicit EFLAGS)]>, T8PS, VEX;
2317 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2318 defm BEXTR32 : bmi_bextr_bzhi<0xF7, "bextr{l}", GR32, i32mem,
2319 int_x86_bmi_bextr_32, loadi32>;
2320 defm BEXTR64 : bmi_bextr_bzhi<0xF7, "bextr{q}", GR64, i64mem,
2321 int_x86_bmi_bextr_64, loadi64>, VEX_W;
2324 let Predicates = [HasBMI2], Defs = [EFLAGS] in {
2325 defm BZHI32 : bmi_bextr_bzhi<0xF5, "bzhi{l}", GR32, i32mem,
2326 int_x86_bmi_bzhi_32, loadi32>;
2327 defm BZHI64 : bmi_bextr_bzhi<0xF5, "bzhi{q}", GR64, i64mem,
2328 int_x86_bmi_bzhi_64, loadi64>, VEX_W;
2332 def CountTrailingOnes : SDNodeXForm<imm, [{
2333 // Count the trailing ones in the immediate.
2334 return getI8Imm(countTrailingOnes(N->getZExtValue()), SDLoc(N));
2337 def BZHIMask : ImmLeaf<i64, [{
2338 return isMask_64(Imm) && (countTrailingOnes<uint64_t>(Imm) > 32);
2341 let Predicates = [HasBMI2] in {
2342 def : Pat<(and GR64:$src, BZHIMask:$mask),
2343 (BZHI64rr GR64:$src,
2344 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2345 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2347 def : Pat<(and GR32:$src, (add (shl 1, GR8:$lz), -1)),
2348 (BZHI32rr GR32:$src,
2349 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
2351 def : Pat<(and (loadi32 addr:$src), (add (shl 1, GR8:$lz), -1)),
2352 (BZHI32rm addr:$src,
2353 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
2355 def : Pat<(and GR64:$src, (add (shl 1, GR8:$lz), -1)),
2356 (BZHI64rr GR64:$src,
2357 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
2359 def : Pat<(and (loadi64 addr:$src), (add (shl 1, GR8:$lz), -1)),
2360 (BZHI64rm addr:$src,
2361 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR8:$lz, sub_8bit))>;
2363 // x & (-1 >> (32 - y))
2364 def : Pat<(and GR32:$src, (srl -1, (i8 (trunc (sub 32, GR32:$lz))))),
2365 (BZHI32rr GR32:$src, GR32:$lz)>;
2366 def : Pat<(and (loadi32 addr:$src), (srl -1, (i8 (trunc (sub 32, GR32:$lz))))),
2367 (BZHI32rm addr:$src, GR32:$lz)>;
2369 // x & (-1 >> (64 - y))
2370 def : Pat<(and GR64:$src, (srl -1, (i8 (trunc (sub 64, GR32:$lz))))),
2371 (BZHI64rr GR64:$src,
2372 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$lz, sub_32bit))>;
2373 def : Pat<(and (loadi64 addr:$src), (srl -1, (i8 (trunc (sub 64, GR32:$lz))))),
2374 (BZHI64rm addr:$src,
2375 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$lz, sub_32bit))>;
2377 // x << (32 - y) >> (32 - y)
2378 def : Pat<(srl (shl GR32:$src, (i8 (trunc (sub 32, GR32:$lz)))),
2379 (i8 (trunc (sub 32, GR32:$lz)))),
2380 (BZHI32rr GR32:$src, GR32:$lz)>;
2381 def : Pat<(srl (shl (loadi32 addr:$src), (i8 (trunc (sub 32, GR32:$lz)))),
2382 (i8 (trunc (sub 32, GR32:$lz)))),
2383 (BZHI32rm addr:$src, GR32:$lz)>;
2385 // x << (64 - y) >> (64 - y)
2386 def : Pat<(srl (shl GR64:$src, (i8 (trunc (sub 64, GR32:$lz)))),
2387 (i8 (trunc (sub 64, GR32:$lz)))),
2388 (BZHI64rr GR64:$src,
2389 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$lz, sub_32bit))>;
2390 def : Pat<(srl (shl (loadi64 addr:$src), (i8 (trunc (sub 64, GR32:$lz)))),
2391 (i8 (trunc (sub 64, GR32:$lz)))),
2392 (BZHI64rm addr:$src,
2393 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$lz, sub_32bit))>;
2396 let Predicates = [HasBMI] in {
2397 def : Pat<(X86bextr GR32:$src1, GR32:$src2),
2398 (BEXTR32rr GR32:$src1, GR32:$src2)>;
2399 def : Pat<(X86bextr (loadi32 addr:$src1), GR32:$src2),
2400 (BEXTR32rm addr:$src1, GR32:$src2)>;
2401 def : Pat<(X86bextr GR64:$src1, GR64:$src2),
2402 (BEXTR64rr GR64:$src1, GR64:$src2)>;
2403 def : Pat<(X86bextr (loadi64 addr:$src1), GR64:$src2),
2404 (BEXTR64rm addr:$src1, GR64:$src2)>;
2407 multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
2408 X86MemOperand x86memop, Intrinsic Int,
2410 def rr : I<0xF5, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2411 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2412 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>,
2414 def rm : I<0xF5, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2415 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2416 [(set RC:$dst, (Int RC:$src1, (ld_frag addr:$src2)))]>, VEX_4V;
2419 let Predicates = [HasBMI2] in {
2420 defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
2421 int_x86_bmi_pdep_32, loadi32>, T8XD;
2422 defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
2423 int_x86_bmi_pdep_64, loadi64>, T8XD, VEX_W;
2424 defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
2425 int_x86_bmi_pext_32, loadi32>, T8XS;
2426 defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
2427 int_x86_bmi_pext_64, loadi64>, T8XS, VEX_W;
2430 //===----------------------------------------------------------------------===//
2433 let Predicates = [HasTBM], Defs = [EFLAGS] in {
2435 multiclass tbm_ternary_imm_intr<bits<8> opc, RegisterClass RC, string OpcodeStr,
2436 X86MemOperand x86memop, PatFrag ld_frag,
2437 Intrinsic Int, Operand immtype,
2438 SDPatternOperator immoperator> {
2439 def ri : Ii32<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, immtype:$cntl),
2440 !strconcat(OpcodeStr,
2441 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2442 [(set RC:$dst, (Int RC:$src1, immoperator:$cntl))]>,
2444 def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
2445 (ins x86memop:$src1, immtype:$cntl),
2446 !strconcat(OpcodeStr,
2447 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2448 [(set RC:$dst, (Int (ld_frag addr:$src1), immoperator:$cntl))]>,
2452 defm BEXTRI32 : tbm_ternary_imm_intr<0x10, GR32, "bextr", i32mem, loadi32,
2453 int_x86_tbm_bextri_u32, i32imm, imm>;
2454 let ImmT = Imm32S in
2455 defm BEXTRI64 : tbm_ternary_imm_intr<0x10, GR64, "bextr", i64mem, loadi64,
2456 int_x86_tbm_bextri_u64, i64i32imm,
2457 i64immSExt32>, VEX_W;
2459 multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
2460 RegisterClass RC, string OpcodeStr,
2461 X86MemOperand x86memop, PatFrag ld_frag> {
2462 let hasSideEffects = 0 in {
2463 def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
2464 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
2467 def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
2468 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
2473 multiclass tbm_binary_intr<bits<8> opc, string OpcodeStr,
2474 Format FormReg, Format FormMem> {
2475 defm NAME#32 : tbm_binary_rm<opc, FormReg, FormMem, GR32, OpcodeStr, i32mem,
2477 defm NAME#64 : tbm_binary_rm<opc, FormReg, FormMem, GR64, OpcodeStr, i64mem,
2481 defm BLCFILL : tbm_binary_intr<0x01, "blcfill", MRM1r, MRM1m>;
2482 defm BLCI : tbm_binary_intr<0x02, "blci", MRM6r, MRM6m>;
2483 defm BLCIC : tbm_binary_intr<0x01, "blcic", MRM5r, MRM5m>;
2484 defm BLCMSK : tbm_binary_intr<0x02, "blcmsk", MRM1r, MRM1m>;
2485 defm BLCS : tbm_binary_intr<0x01, "blcs", MRM3r, MRM3m>;
2486 defm BLSFILL : tbm_binary_intr<0x01, "blsfill", MRM2r, MRM2m>;
2487 defm BLSIC : tbm_binary_intr<0x01, "blsic", MRM6r, MRM6m>;
2488 defm T1MSKC : tbm_binary_intr<0x01, "t1mskc", MRM7r, MRM7m>;
2489 defm TZMSK : tbm_binary_intr<0x01, "tzmsk", MRM4r, MRM4m>;
2492 //===----------------------------------------------------------------------===//
2493 // Lightweight Profiling Instructions
2495 let Predicates = [HasLWP] in {
2497 def LLWPCB : I<0x12, MRM0r, (outs), (ins GR32:$src), "llwpcb\t$src",
2498 [(int_x86_llwpcb GR32:$src)], IIC_LWP>,
2499 XOP, XOP9, Requires<[Not64BitMode]>;
2500 def SLWPCB : I<0x12, MRM1r, (outs GR32:$dst), (ins), "slwpcb\t$dst",
2501 [(set GR32:$dst, (int_x86_slwpcb))], IIC_LWP>,
2502 XOP, XOP9, Requires<[Not64BitMode]>;
2504 def LLWPCB64 : I<0x12, MRM0r, (outs), (ins GR64:$src), "llwpcb\t$src",
2505 [(int_x86_llwpcb GR64:$src)], IIC_LWP>,
2506 XOP, XOP9, VEX_W, Requires<[In64BitMode]>;
2507 def SLWPCB64 : I<0x12, MRM1r, (outs GR64:$dst), (ins), "slwpcb\t$dst",
2508 [(set GR64:$dst, (int_x86_slwpcb))], IIC_LWP>,
2509 XOP, XOP9, VEX_W, Requires<[In64BitMode]>;
2511 multiclass lwpins_intr<RegisterClass RC> {
2512 def rri : Ii32<0x12, MRM0r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2513 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2514 [(set EFLAGS, (X86lwpins RC:$src0, GR32:$src1, imm:$cntl))]>,
2517 def rmi : Ii32<0x12, MRM0m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2518 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2519 [(set EFLAGS, (X86lwpins RC:$src0, (loadi32 addr:$src1), imm:$cntl))]>,
2523 let Defs = [EFLAGS] in {
2524 defm LWPINS32 : lwpins_intr<GR32>;
2525 defm LWPINS64 : lwpins_intr<GR64>, VEX_W;
2528 multiclass lwpval_intr<RegisterClass RC, Intrinsic Int> {
2529 def rri : Ii32<0x12, MRM1r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2530 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2531 [(Int RC:$src0, GR32:$src1, imm:$cntl)], IIC_LWP>,
2534 def rmi : Ii32<0x12, MRM1m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2535 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2536 [(Int RC:$src0, (loadi32 addr:$src1), imm:$cntl)], IIC_LWP>,
2540 defm LWPVAL32 : lwpval_intr<GR32, int_x86_lwpval32>;
2541 defm LWPVAL64 : lwpval_intr<GR64, int_x86_lwpval64>, VEX_W;
2545 //===----------------------------------------------------------------------===//
2546 // MONITORX/MWAITX Instructions
2548 let SchedRW = [ WriteSystem ] in {
2549 let usesCustomInserter = 1 in {
2550 def MONITORX : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
2551 [(int_x86_monitorx addr:$src1, GR32:$src2, GR32:$src3)]>,
2552 Requires<[ HasMWAITX ]>;
2555 let Uses = [ EAX, ECX, EDX ] in {
2556 def MONITORXrrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", [], IIC_SSE_MONITORX>,
2557 TB, Requires<[ HasMWAITX ]>;
2560 let Uses = [ ECX, EAX, EBX ] in {
2561 def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
2562 [(int_x86_mwaitx ECX, EAX, EBX)], IIC_SSE_MWAITX>,
2563 TB, Requires<[ HasMWAITX ]>;
2567 def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
2568 Requires<[ Not64BitMode ]>;
2569 def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
2570 Requires<[ In64BitMode ]>;
2572 def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORXrrr)>,
2573 Requires<[ Not64BitMode ]>;
2574 def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORXrrr)>,
2575 Requires<[ In64BitMode ]>;
2577 //===----------------------------------------------------------------------===//
2578 // CLZERO Instruction
2580 let SchedRW = [WriteSystem] in {
2582 def CLZEROr : I<0x01, MRM_FC, (outs), (ins), "clzero", [], IIC_SSE_CLZERO>,
2583 TB, Requires<[HasCLZERO]>;
2585 let usesCustomInserter = 1 in {
2586 def CLZERO : PseudoI<(outs), (ins i32mem:$src1),
2587 [(int_x86_clzero addr:$src1)]>, Requires<[HasCLZERO]>;
2591 def : InstAlias<"clzero\t{%eax|eax}", (CLZEROr)>, Requires<[Not64BitMode]>;
2592 def : InstAlias<"clzero\t{%rax|rax}", (CLZEROr)>, Requires<[In64BitMode]>;
2594 //===----------------------------------------------------------------------===//
2595 // Pattern fragments to auto generate TBM instructions.
2596 //===----------------------------------------------------------------------===//
2598 let Predicates = [HasTBM] in {
2599 def : Pat<(X86bextr GR32:$src1, (i32 imm:$src2)),
2600 (BEXTRI32ri GR32:$src1, imm:$src2)>;
2601 def : Pat<(X86bextr (loadi32 addr:$src1), (i32 imm:$src2)),
2602 (BEXTRI32mi addr:$src1, imm:$src2)>;
2603 def : Pat<(X86bextr GR64:$src1, i64immSExt32:$src2),
2604 (BEXTRI64ri GR64:$src1, i64immSExt32:$src2)>;
2605 def : Pat<(X86bextr (loadi64 addr:$src1), i64immSExt32:$src2),
2606 (BEXTRI64mi addr:$src1, i64immSExt32:$src2)>;
2608 // FIXME: patterns for the load versions are not implemented
2609 def : Pat<(and GR32:$src, (add GR32:$src, 1)),
2610 (BLCFILL32rr GR32:$src)>;
2611 def : Pat<(and GR64:$src, (add GR64:$src, 1)),
2612 (BLCFILL64rr GR64:$src)>;
2614 def : Pat<(or GR32:$src, (not (add GR32:$src, 1))),
2615 (BLCI32rr GR32:$src)>;
2616 def : Pat<(or GR64:$src, (not (add GR64:$src, 1))),
2617 (BLCI64rr GR64:$src)>;
2619 // Extra patterns because opt can optimize the above patterns to this.
2620 def : Pat<(or GR32:$src, (sub -2, GR32:$src)),
2621 (BLCI32rr GR32:$src)>;
2622 def : Pat<(or GR64:$src, (sub -2, GR64:$src)),
2623 (BLCI64rr GR64:$src)>;
2625 def : Pat<(and (not GR32:$src), (add GR32:$src, 1)),
2626 (BLCIC32rr GR32:$src)>;
2627 def : Pat<(and (not GR64:$src), (add GR64:$src, 1)),
2628 (BLCIC64rr GR64:$src)>;
2630 def : Pat<(xor GR32:$src, (add GR32:$src, 1)),
2631 (BLCMSK32rr GR32:$src)>;
2632 def : Pat<(xor GR64:$src, (add GR64:$src, 1)),
2633 (BLCMSK64rr GR64:$src)>;
2635 def : Pat<(or GR32:$src, (add GR32:$src, 1)),
2636 (BLCS32rr GR32:$src)>;
2637 def : Pat<(or GR64:$src, (add GR64:$src, 1)),
2638 (BLCS64rr GR64:$src)>;
2640 def : Pat<(or GR32:$src, (add GR32:$src, -1)),
2641 (BLSFILL32rr GR32:$src)>;
2642 def : Pat<(or GR64:$src, (add GR64:$src, -1)),
2643 (BLSFILL64rr GR64:$src)>;
2645 def : Pat<(or (not GR32:$src), (add GR32:$src, -1)),
2646 (BLSIC32rr GR32:$src)>;
2647 def : Pat<(or (not GR64:$src), (add GR64:$src, -1)),
2648 (BLSIC64rr GR64:$src)>;
2650 def : Pat<(or (not GR32:$src), (add GR32:$src, 1)),
2651 (T1MSKC32rr GR32:$src)>;
2652 def : Pat<(or (not GR64:$src), (add GR64:$src, 1)),
2653 (T1MSKC64rr GR64:$src)>;
2655 def : Pat<(and (not GR32:$src), (add GR32:$src, -1)),
2656 (TZMSK32rr GR32:$src)>;
2657 def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
2658 (TZMSK64rr GR64:$src)>;
2661 //===----------------------------------------------------------------------===//
2662 // Memory Instructions
2665 let Predicates = [HasCLFLUSHOPT] in
2666 def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
2667 "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
2668 def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src", []>, PD;
2671 //===----------------------------------------------------------------------===//
2673 //===----------------------------------------------------------------------===//
2675 include "X86InstrArithmetic.td"
2676 include "X86InstrCMovSetCC.td"
2677 include "X86InstrExtension.td"
2678 include "X86InstrControl.td"
2679 include "X86InstrShiftRotate.td"
2681 // X87 Floating Point Stack.
2682 include "X86InstrFPStack.td"
2684 // SIMD support (SSE, MMX and AVX)
2685 include "X86InstrFragmentsSIMD.td"
2687 // FMA - Fused Multiply-Add support (requires FMA)
2688 include "X86InstrFMA.td"
2691 include "X86InstrXOP.td"
2693 // SSE, MMX and 3DNow! vector support.
2694 include "X86InstrSSE.td"
2695 include "X86InstrAVX512.td"
2696 include "X86InstrMMX.td"
2697 include "X86Instr3DNow.td"
2700 include "X86InstrMPX.td"
2702 include "X86InstrVMX.td"
2703 include "X86InstrSVM.td"
2705 include "X86InstrTSX.td"
2706 include "X86InstrSGX.td"
2708 // System instructions.
2709 include "X86InstrSystem.td"
2711 // Compiler Pseudo Instructions and Pat Patterns
2712 include "X86InstrCompiler.td"
2714 //===----------------------------------------------------------------------===//
2715 // Assembler Mnemonic Aliases
2716 //===----------------------------------------------------------------------===//
2718 def : MnemonicAlias<"call", "callw", "att">, Requires<[In16BitMode]>;
2719 def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>;
2720 def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>;
2722 def : MnemonicAlias<"cbw", "cbtw", "att">;
2723 def : MnemonicAlias<"cwde", "cwtl", "att">;
2724 def : MnemonicAlias<"cwd", "cwtd", "att">;
2725 def : MnemonicAlias<"cdq", "cltd", "att">;
2726 def : MnemonicAlias<"cdqe", "cltq", "att">;
2727 def : MnemonicAlias<"cqo", "cqto", "att">;
2729 // In 64-bit mode lret maps to lretl; it is not ambiguous with lretq.
2730 def : MnemonicAlias<"lret", "lretw", "att">, Requires<[In16BitMode]>;
2731 def : MnemonicAlias<"lret", "lretl", "att">, Requires<[Not16BitMode]>;
2733 def : MnemonicAlias<"leavel", "leave", "att">, Requires<[Not64BitMode]>;
2734 def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>;
2736 def : MnemonicAlias<"loopz", "loope">;
2737 def : MnemonicAlias<"loopnz", "loopne">;
2739 def : MnemonicAlias<"pop", "popw", "att">, Requires<[In16BitMode]>;
2740 def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>;
2741 def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>;
2742 def : MnemonicAlias<"popf", "popfw", "att">, Requires<[In16BitMode]>;
2743 def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>;
2744 def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>;
2745 def : MnemonicAlias<"popfd", "popfl", "att">;
2747 // FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
2748 // all modes. However: "push (addr)" and "push $42" should default to
2749 // pushl/pushq depending on the current mode. Similar for "pop %bx"
2750 def : MnemonicAlias<"push", "pushw", "att">, Requires<[In16BitMode]>;
2751 def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>;
2752 def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>;
2753 def : MnemonicAlias<"pushf", "pushfw", "att">, Requires<[In16BitMode]>;
2754 def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
2755 def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
2756 def : MnemonicAlias<"pushfd", "pushfl", "att">;
2758 def : MnemonicAlias<"popad", "popal", "intel">, Requires<[Not64BitMode]>;
2759 def : MnemonicAlias<"pushad", "pushal", "intel">, Requires<[Not64BitMode]>;
2760 def : MnemonicAlias<"popa", "popaw", "intel">, Requires<[In16BitMode]>;
2761 def : MnemonicAlias<"pusha", "pushaw", "intel">, Requires<[In16BitMode]>;
2762 def : MnemonicAlias<"popa", "popal", "intel">, Requires<[In32BitMode]>;
2763 def : MnemonicAlias<"pusha", "pushal", "intel">, Requires<[In32BitMode]>;
2765 def : MnemonicAlias<"popa", "popaw", "att">, Requires<[In16BitMode]>;
2766 def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>;
2767 def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>;
2768 def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>;
2770 def : MnemonicAlias<"repe", "rep">;
2771 def : MnemonicAlias<"repz", "rep">;
2772 def : MnemonicAlias<"repnz", "repne">;
2774 def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>;
2775 def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>;
2776 def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>;
2778 // Apply 'ret' behavior to 'retn'
2779 def : MnemonicAlias<"retn", "retw", "att">, Requires<[In16BitMode]>;
2780 def : MnemonicAlias<"retn", "retl", "att">, Requires<[In32BitMode]>;
2781 def : MnemonicAlias<"retn", "retq", "att">, Requires<[In64BitMode]>;
2782 def : MnemonicAlias<"retn", "ret", "intel">;
2784 def : MnemonicAlias<"sal", "shl", "intel">;
2785 def : MnemonicAlias<"salb", "shlb", "att">;
2786 def : MnemonicAlias<"salw", "shlw", "att">;
2787 def : MnemonicAlias<"sall", "shll", "att">;
2788 def : MnemonicAlias<"salq", "shlq", "att">;
2790 def : MnemonicAlias<"smovb", "movsb", "att">;
2791 def : MnemonicAlias<"smovw", "movsw", "att">;
2792 def : MnemonicAlias<"smovl", "movsl", "att">;
2793 def : MnemonicAlias<"smovq", "movsq", "att">;
2795 def : MnemonicAlias<"ud2a", "ud2", "att">;
2796 def : MnemonicAlias<"verrw", "verr", "att">;
2798 // System instruction aliases.
2799 def : MnemonicAlias<"iret", "iretw", "att">, Requires<[In16BitMode]>;
2800 def : MnemonicAlias<"iret", "iretl", "att">, Requires<[Not16BitMode]>;
2801 def : MnemonicAlias<"sysret", "sysretl", "att">;
2802 def : MnemonicAlias<"sysexit", "sysexitl", "att">;
2804 def : MnemonicAlias<"lgdt", "lgdtw", "att">, Requires<[In16BitMode]>;
2805 def : MnemonicAlias<"lgdt", "lgdtl", "att">, Requires<[In32BitMode]>;
2806 def : MnemonicAlias<"lgdt", "lgdtq", "att">, Requires<[In64BitMode]>;
2807 def : MnemonicAlias<"lidt", "lidtw", "att">, Requires<[In16BitMode]>;
2808 def : MnemonicAlias<"lidt", "lidtl", "att">, Requires<[In32BitMode]>;
2809 def : MnemonicAlias<"lidt", "lidtq", "att">, Requires<[In64BitMode]>;
2810 def : MnemonicAlias<"sgdt", "sgdtw", "att">, Requires<[In16BitMode]>;
2811 def : MnemonicAlias<"sgdt", "sgdtl", "att">, Requires<[In32BitMode]>;
2812 def : MnemonicAlias<"sgdt", "sgdtq", "att">, Requires<[In64BitMode]>;
2813 def : MnemonicAlias<"sidt", "sidtw", "att">, Requires<[In16BitMode]>;
2814 def : MnemonicAlias<"sidt", "sidtl", "att">, Requires<[In32BitMode]>;
2815 def : MnemonicAlias<"sidt", "sidtq", "att">, Requires<[In64BitMode]>;
2818 // Floating point stack aliases.
2819 def : MnemonicAlias<"fcmovz", "fcmove", "att">;
2820 def : MnemonicAlias<"fcmova", "fcmovnbe", "att">;
2821 def : MnemonicAlias<"fcmovnae", "fcmovb", "att">;
2822 def : MnemonicAlias<"fcmovna", "fcmovbe", "att">;
2823 def : MnemonicAlias<"fcmovae", "fcmovnb", "att">;
2824 def : MnemonicAlias<"fcomip", "fcompi">;
2825 def : MnemonicAlias<"fildq", "fildll", "att">;
2826 def : MnemonicAlias<"fistpq", "fistpll", "att">;
2827 def : MnemonicAlias<"fisttpq", "fisttpll", "att">;
2828 def : MnemonicAlias<"fldcww", "fldcw", "att">;
2829 def : MnemonicAlias<"fnstcww", "fnstcw", "att">;
2830 def : MnemonicAlias<"fnstsww", "fnstsw", "att">;
2831 def : MnemonicAlias<"fucomip", "fucompi">;
2832 def : MnemonicAlias<"fwait", "wait">;
2834 def : MnemonicAlias<"fxsaveq", "fxsave64", "att">;
2835 def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">;
2836 def : MnemonicAlias<"xsaveq", "xsave64", "att">;
2837 def : MnemonicAlias<"xrstorq", "xrstor64", "att">;
2838 def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">;
2839 def : MnemonicAlias<"xrstorsq", "xrstors64", "att">;
2840 def : MnemonicAlias<"xsavecq", "xsavec64", "att">;
2841 def : MnemonicAlias<"xsavesq", "xsaves64", "att">;
2843 class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
2845 : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
2846 !strconcat(Prefix, NewCond, Suffix), VariantName>;
2848 /// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
2849 /// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
2850 /// example "setz" -> "sete".
2851 multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix,
2853 def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb
2854 def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete
2855 def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe
2856 def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae
2857 def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae
2858 def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle
2859 def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge
2860 def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne
2861 def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp
2862 def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp
2864 def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb
2865 def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta
2866 def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl
2867 def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg
2870 // Aliases for set<CC>
2871 defm : IntegerCondCodeMnemonicAlias<"set", "">;
2872 // Aliases for j<CC>
2873 defm : IntegerCondCodeMnemonicAlias<"j", "">;
2874 // Aliases for cmov<CC>{w,l,q}
2875 defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">;
2876 defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">;
2877 defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">;
2878 // No size suffix for intel-style asm.
2879 defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">;
2882 //===----------------------------------------------------------------------===//
2883 // Assembler Instruction Aliases
2884 //===----------------------------------------------------------------------===//
2886 // aad/aam default to base 10 if no operand is specified.
2887 def : InstAlias<"aad", (AAD8i8 10)>, Requires<[Not64BitMode]>;
2888 def : InstAlias<"aam", (AAM8i8 10)>, Requires<[Not64BitMode]>;
2890 // Disambiguate the mem/imm form of bt-without-a-suffix as btl.
2891 // Likewise for btc/btr/bts.
2892 def : InstAlias<"bt\t{$imm, $mem|$mem, $imm}",
2893 (BT32mi8 i32mem:$mem, i32i8imm:$imm), 0>;
2894 def : InstAlias<"btc\t{$imm, $mem|$mem, $imm}",
2895 (BTC32mi8 i32mem:$mem, i32i8imm:$imm), 0>;
2896 def : InstAlias<"btr\t{$imm, $mem|$mem, $imm}",
2897 (BTR32mi8 i32mem:$mem, i32i8imm:$imm), 0>;
2898 def : InstAlias<"bts\t{$imm, $mem|$mem, $imm}",
2899 (BTS32mi8 i32mem:$mem, i32i8imm:$imm), 0>;
2902 def : InstAlias<"clrb\t$reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>;
2903 def : InstAlias<"clrw\t$reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
2904 def : InstAlias<"clrl\t$reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
2905 def : InstAlias<"clrq\t$reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
2907 // lods aliases. Accept the destination being omitted because it's implicit
2908 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
2909 // in the destination.
2910 def : InstAlias<"lodsb\t$src", (LODSB srcidx8:$src), 0>;
2911 def : InstAlias<"lodsw\t$src", (LODSW srcidx16:$src), 0>;
2912 def : InstAlias<"lods{l|d}\t$src", (LODSL srcidx32:$src), 0>;
2913 def : InstAlias<"lodsq\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
2914 def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
2915 def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
2916 def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
2917 def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
2918 def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0>;
2919 def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0>;
2920 def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0>;
2921 def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
2924 // stos aliases. Accept the source being omitted because it's implicit in
2925 // the mnemonic, or the mnemonic suffix being omitted because it's implicit
2927 def : InstAlias<"stosb\t$dst", (STOSB dstidx8:$dst), 0>;
2928 def : InstAlias<"stosw\t$dst", (STOSW dstidx16:$dst), 0>;
2929 def : InstAlias<"stos{l|d}\t$dst", (STOSL dstidx32:$dst), 0>;
2930 def : InstAlias<"stosq\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
2931 def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
2932 def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
2933 def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
2934 def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
2935 def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0>;
2936 def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0>;
2937 def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0>;
2938 def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
2941 // scas aliases. Accept the destination being omitted because it's implicit
2942 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
2943 // in the destination.
2944 def : InstAlias<"scasb\t$dst", (SCASB dstidx8:$dst), 0>;
2945 def : InstAlias<"scasw\t$dst", (SCASW dstidx16:$dst), 0>;
2946 def : InstAlias<"scas{l|d}\t$dst", (SCASL dstidx32:$dst), 0>;
2947 def : InstAlias<"scasq\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
2948 def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
2949 def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
2950 def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
2951 def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
2952 def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0>;
2953 def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0>;
2954 def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0>;
2955 def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
2957 // cmps aliases. Mnemonic suffix being omitted because it's implicit
2958 // in the destination.
2959 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0>;
2960 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0>;
2961 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0>;
2962 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0>, Requires<[In64BitMode]>;
2964 // movs aliases. Mnemonic suffix being omitted because it's implicit
2965 // in the destination.
2966 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0>;
2967 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0>;
2968 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0>;
2969 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0>, Requires<[In64BitMode]>;
2971 // div and idiv aliases for explicit A register.
2972 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
2973 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
2974 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>;
2975 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>;
2976 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>;
2977 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>;
2978 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>;
2979 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>;
2980 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>;
2981 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>;
2982 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>;
2983 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>;
2984 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>;
2985 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>;
2986 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>;
2987 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>;
2991 // Various unary fpstack operations default to operating on on ST1.
2992 // For example, "fxch" -> "fxch %st(1)"
2993 def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
2994 def: InstAlias<"fadd", (ADD_FPrST0 ST1), 0>;
2995 def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>;
2996 def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>;
2997 def : InstAlias<"fmul", (MUL_FPrST0 ST1), 0>;
2998 def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>;
2999 def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>;
3000 def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>;
3001 def : InstAlias<"fxch", (XCH_F ST1), 0>;
3002 def : InstAlias<"fcom", (COM_FST0r ST1), 0>;
3003 def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>;
3004 def : InstAlias<"fcomi", (COM_FIr ST1), 0>;
3005 def : InstAlias<"fcompi", (COM_FIPr ST1), 0>;
3006 def : InstAlias<"fucom", (UCOM_Fr ST1), 0>;
3007 def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>;
3008 def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>;
3009 def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>;
3011 // Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
3012 // For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
3013 // instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
3015 multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
3016 def : InstAlias<!strconcat(Mnemonic, "\t{$op, %st(0)|st(0), $op}"),
3017 (Inst RST:$op), EmitAlias>;
3018 def : InstAlias<!strconcat(Mnemonic, "\t{%st(0), %st(0)|st(0), st(0)}"),
3019 (Inst ST0), EmitAlias>;
3022 defm : FpUnaryAlias<"fadd", ADD_FST0r>;
3023 defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
3024 defm : FpUnaryAlias<"fsub", SUB_FST0r>;
3025 defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0>;
3026 defm : FpUnaryAlias<"fsubr", SUBR_FST0r>;
3027 defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0>;
3028 defm : FpUnaryAlias<"fmul", MUL_FST0r>;
3029 defm : FpUnaryAlias<"fmulp", MUL_FPrST0>;
3030 defm : FpUnaryAlias<"fdiv", DIV_FST0r>;
3031 defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0>;
3032 defm : FpUnaryAlias<"fdivr", DIVR_FST0r>;
3033 defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0>;
3034 defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
3035 defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
3036 defm : FpUnaryAlias<"fcompi", COM_FIPr>;
3037 defm : FpUnaryAlias<"fucompi", UCOM_FIPr>;
3040 // Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they
3041 // commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
3042 // solely because gas supports it.
3043 def : InstAlias<"faddp\t{%st(0), $op|$op, st(0)}", (ADD_FPrST0 RST:$op), 0>;
3044 def : InstAlias<"fmulp\t{%st(0), $op|$op, st(0)}", (MUL_FPrST0 RST:$op)>;
3045 def : InstAlias<"fsub{|r}p\t{%st(0), $op|$op, st(0)}", (SUBR_FPrST0 RST:$op)>;
3046 def : InstAlias<"fsub{r|}p\t{%st(0), $op|$op, st(0)}", (SUB_FPrST0 RST:$op)>;
3047 def : InstAlias<"fdiv{|r}p\t{%st(0), $op|$op, st(0)}", (DIVR_FPrST0 RST:$op)>;
3048 def : InstAlias<"fdiv{r|}p\t{%st(0), $op|$op, st(0)}", (DIV_FPrST0 RST:$op)>;
3050 // We accept "fnstsw %eax" even though it only writes %ax.
3051 def : InstAlias<"fnstsw\t{%eax|eax}", (FNSTSW16r)>;
3052 def : InstAlias<"fnstsw\t{%al|al}" , (FNSTSW16r)>;
3053 def : InstAlias<"fnstsw" , (FNSTSW16r)>;
3055 // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
3056 // this is compatible with what GAS does.
3057 def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3058 def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3059 def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
3060 def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaque48mem:$dst), 0>, Requires<[Not16BitMode]>;
3061 def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3062 def : InstAlias<"ljmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3063 def : InstAlias<"lcall\t{*}$dst", (FARCALL16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>;
3064 def : InstAlias<"ljmp\t{*}$dst", (FARJMP16m opaque32mem:$dst), 0>, Requires<[In16BitMode]>;
3066 def : InstAlias<"call\t{*}$dst", (CALL64m i64mem:$dst), 0>, Requires<[In64BitMode]>;
3067 def : InstAlias<"jmp\t{*}$dst", (JMP64m i64mem:$dst), 0>, Requires<[In64BitMode]>;
3068 def : InstAlias<"call\t{*}$dst", (CALL32m i32mem:$dst), 0>, Requires<[In32BitMode]>;
3069 def : InstAlias<"jmp\t{*}$dst", (JMP32m i32mem:$dst), 0>, Requires<[In32BitMode]>;
3070 def : InstAlias<"call\t{*}$dst", (CALL16m i16mem:$dst), 0>, Requires<[In16BitMode]>;
3071 def : InstAlias<"jmp\t{*}$dst", (JMP16m i16mem:$dst), 0>, Requires<[In16BitMode]>;
3074 // "imul <imm>, B" is an alias for "imul <imm>, B, B".
3075 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>;
3076 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>;
3077 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>;
3078 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>;
3079 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
3080 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
3082 // ins aliases. Accept the mnemonic suffix being omitted because it's implicit
3083 // in the destination.
3084 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0>;
3085 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0>;
3086 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0>;
3088 // outs aliases. Accept the mnemonic suffix being omitted because it's implicit
3090 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0>;
3091 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0>;
3092 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0>;
3094 // inb %dx -> inb %al, %dx
3095 def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
3096 def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
3097 def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>;
3098 def : InstAlias<"inb\t$port", (IN8ri u8imm:$port), 0>;
3099 def : InstAlias<"inw\t$port", (IN16ri u8imm:$port), 0>;
3100 def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
3103 // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
3104 def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3105 def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3106 def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3107 def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3108 def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3109 def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3110 def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3111 def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3113 // Force mov without a suffix with a segment and mem to prefer the 'l' form of
3114 // the move. All segment/mem forms are equivalent, this has the shortest
3116 def : InstAlias<"mov\t{$mem, $seg|$seg, $mem}", (MOV32sm SEGMENT_REG:$seg, i32mem:$mem), 0>;
3117 def : InstAlias<"mov\t{$seg, $mem|$mem, $seg}", (MOV32ms i32mem:$mem, SEGMENT_REG:$seg), 0>;
3119 // Match 'movq <largeimm>, <reg>' as an alias for movabsq.
3120 def : InstAlias<"mov{q}\t{$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
3122 // Match 'movq GR64, MMX' as an alias for movd.
3123 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3124 (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
3125 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3126 (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
3129 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0>;
3130 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0>;
3131 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0>;
3132 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0>;
3133 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0>;
3134 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0>;
3135 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0>;
3138 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0>;
3139 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0>;
3140 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0>;
3141 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0>;
3142 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr8 GR64:$dst, GR8:$src), 0>;
3143 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr16 GR64:$dst, GR16:$src), 0>;
3144 // Note: No GR32->GR64 movzx form.
3146 // outb %dx -> outb %al, %dx
3147 def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>;
3148 def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>;
3149 def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>;
3150 def : InstAlias<"outb\t$port", (OUT8ir u8imm:$port), 0>;
3151 def : InstAlias<"outw\t$port", (OUT16ir u8imm:$port), 0>;
3152 def : InstAlias<"outl\t$port", (OUT32ir u8imm:$port), 0>;
3154 // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
3155 // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
3156 // errors, since its encoding is the most compact.
3157 def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>;
3159 // shld/shrd op,op -> shld op, op, CL
3160 def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
3161 def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>;
3162 def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>;
3163 def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>;
3164 def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>;
3165 def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>;
3167 def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>;
3168 def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>;
3169 def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>;
3170 def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>;
3171 def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>;
3172 def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>;
3174 /* FIXME: This is disabled because the asm matcher is currently incapable of
3175 * matching a fixed immediate like $1.
3176 // "shl X, $1" is an alias for "shl X".
3177 multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
3178 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3179 (!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
3180 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3181 (!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
3182 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3183 (!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
3184 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3185 (!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
3186 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3187 (!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
3188 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3189 (!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
3190 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3191 (!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
3192 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3193 (!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
3196 defm : ShiftRotateByOneAlias<"rcl", "RCL">;
3197 defm : ShiftRotateByOneAlias<"rcr", "RCR">;
3198 defm : ShiftRotateByOneAlias<"rol", "ROL">;
3199 defm : ShiftRotateByOneAlias<"ror", "ROR">;
3202 // test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
3203 def : InstAlias<"test{b}\t{$val, $mem|$mem, $val}",
3204 (TEST8rm GR8 :$val, i8mem :$mem), 0>;
3205 def : InstAlias<"test{w}\t{$val, $mem|$mem, $val}",
3206 (TEST16rm GR16:$val, i16mem:$mem), 0>;
3207 def : InstAlias<"test{l}\t{$val, $mem|$mem, $val}",
3208 (TEST32rm GR32:$val, i32mem:$mem), 0>;
3209 def : InstAlias<"test{q}\t{$val, $mem|$mem, $val}",
3210 (TEST64rm GR64:$val, i64mem:$mem), 0>;
3212 // xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
3213 def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
3214 (XCHG8rm GR8 :$val, i8mem :$mem), 0>;
3215 def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
3216 (XCHG16rm GR16:$val, i16mem:$mem), 0>;
3217 def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
3218 (XCHG32rm GR32:$val, i32mem:$mem), 0>;
3219 def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
3220 (XCHG64rm GR64:$val, i64mem:$mem), 0>;
3222 // xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
3223 def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
3224 def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}",
3225 (XCHG32ar GR32:$src), 0>, Requires<[Not64BitMode]>;
3226 def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}",
3227 (XCHG32ar64 GR32_NOAX:$src), 0>, Requires<[In64BitMode]>;
3228 def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;
3230 // These aliases exist to get the parser to prioritize matching 8-bit
3231 // immediate encodings over matching the implicit ax/eax/rax encodings. By
3232 // explicitly mentioning the A register here, these entries will be ordered
3233 // first due to the more explicit immediate type.
3234 def : InstAlias<"adc{w}\t{$imm, %ax|ax, $imm}", (ADC16ri8 AX, i16i8imm:$imm), 0>;
3235 def : InstAlias<"add{w}\t{$imm, %ax|ax, $imm}", (ADD16ri8 AX, i16i8imm:$imm), 0>;
3236 def : InstAlias<"and{w}\t{$imm, %ax|ax, $imm}", (AND16ri8 AX, i16i8imm:$imm), 0>;
3237 def : InstAlias<"cmp{w}\t{$imm, %ax|ax, $imm}", (CMP16ri8 AX, i16i8imm:$imm), 0>;
3238 def : InstAlias<"or{w}\t{$imm, %ax|ax, $imm}", (OR16ri8 AX, i16i8imm:$imm), 0>;
3239 def : InstAlias<"sbb{w}\t{$imm, %ax|ax, $imm}", (SBB16ri8 AX, i16i8imm:$imm), 0>;
3240 def : InstAlias<"sub{w}\t{$imm, %ax|ax, $imm}", (SUB16ri8 AX, i16i8imm:$imm), 0>;
3241 def : InstAlias<"xor{w}\t{$imm, %ax|ax, $imm}", (XOR16ri8 AX, i16i8imm:$imm), 0>;
3243 def : InstAlias<"adc{l}\t{$imm, %eax|eax, $imm}", (ADC32ri8 EAX, i32i8imm:$imm), 0>;
3244 def : InstAlias<"add{l}\t{$imm, %eax|eax, $imm}", (ADD32ri8 EAX, i32i8imm:$imm), 0>;
3245 def : InstAlias<"and{l}\t{$imm, %eax|eax, $imm}", (AND32ri8 EAX, i32i8imm:$imm), 0>;
3246 def : InstAlias<"cmp{l}\t{$imm, %eax|eax, $imm}", (CMP32ri8 EAX, i32i8imm:$imm), 0>;
3247 def : InstAlias<"or{l}\t{$imm, %eax|eax, $imm}", (OR32ri8 EAX, i32i8imm:$imm), 0>;
3248 def : InstAlias<"sbb{l}\t{$imm, %eax|eax, $imm}", (SBB32ri8 EAX, i32i8imm:$imm), 0>;
3249 def : InstAlias<"sub{l}\t{$imm, %eax|eax, $imm}", (SUB32ri8 EAX, i32i8imm:$imm), 0>;
3250 def : InstAlias<"xor{l}\t{$imm, %eax|eax, $imm}", (XOR32ri8 EAX, i32i8imm:$imm), 0>;
3252 def : InstAlias<"adc{q}\t{$imm, %rax|rax, $imm}", (ADC64ri8 RAX, i64i8imm:$imm), 0>;
3253 def : InstAlias<"add{q}\t{$imm, %rax|rax, $imm}", (ADD64ri8 RAX, i64i8imm:$imm), 0>;
3254 def : InstAlias<"and{q}\t{$imm, %rax|rax, $imm}", (AND64ri8 RAX, i64i8imm:$imm), 0>;
3255 def : InstAlias<"cmp{q}\t{$imm, %rax|rax, $imm}", (CMP64ri8 RAX, i64i8imm:$imm), 0>;
3256 def : InstAlias<"or{q}\t{$imm, %rax|rax, $imm}", (OR64ri8 RAX, i64i8imm:$imm), 0>;
3257 def : InstAlias<"sbb{q}\t{$imm, %rax|rax, $imm}", (SBB64ri8 RAX, i64i8imm:$imm), 0>;
3258 def : InstAlias<"sub{q}\t{$imm, %rax|rax, $imm}", (SUB64ri8 RAX, i64i8imm:$imm), 0>;
3259 def : InstAlias<"xor{q}\t{$imm, %rax|rax, $imm}", (XOR64ri8 RAX, i64i8imm:$imm), 0>;