1 //===-- X86InstrInfo.td - Main X86 Instruction Definition --*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the X86 instruction set, defining the instructions, and
10 // properties of the instructions which are needed for code generation, machine
11 // code emission, and analysis.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // X86 specific DAG Nodes.
19 def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
21 def SDTX86FCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisFP<1>,
24 def SDTX86Cmov : SDTypeProfile<1, 4,
25 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
26 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
28 // Unary and binary operator instructions that set EFLAGS as a side-effect.
29 def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
31 SDTCisInt<0>, SDTCisVT<1, i32>]>;
33 def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
36 SDTCisInt<0>, SDTCisVT<1, i32>]>;
38 // SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS
39 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
45 // RES1, RES2, FLAGS = op LHS, RHS
46 def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2,
50 SDTCisInt<0>, SDTCisVT<1, i32>]>;
51 def SDTX86BrCond : SDTypeProfile<0, 3,
52 [SDTCisVT<0, OtherVT>,
53 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
55 def SDTX86SetCC : SDTypeProfile<1, 2,
57 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
58 def SDTX86SetCC_C : SDTypeProfile<1, 2,
60 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
62 def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>;
64 def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>;
66 def SDTX86rdpkru : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
67 def SDTX86wrpkru : SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
70 def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>,
72 def SDTX86caspair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
73 def SDTX86caspairSaveEbx8 : SDTypeProfile<1, 3,
74 [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
75 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
76 def SDTX86caspairSaveRbx16 : SDTypeProfile<1, 3,
77 [SDTCisVT<0, i64>, SDTCisPtrTy<1>,
78 SDTCisVT<2, i64>, SDTCisVT<3, i64>]>;
80 def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
84 def SDTLockUnaryArithWithFlags : SDTypeProfile<1, 1, [SDTCisVT<0, i32>,
87 def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
89 def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
91 def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
94 def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
96 def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
98 def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>,
102 def SDT_X86VAARG_64 : SDTypeProfile<1, -1, [SDTCisPtrTy<0>,
108 def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>;
110 def SDTX86Void : SDTypeProfile<0, 0, []>;
112 def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
114 def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
116 def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
118 def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
120 def SDT_X86WIN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>;
122 def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
124 def SDT_X86PROBED_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
126 def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
128 def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
130 def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
132 def SDT_X86ENQCMD : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
133 SDTCisPtrTy<1>, SDTCisSameAs<1, 2>]>;
135 def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
136 [SDNPHasChain,SDNPSideEffect]>;
137 def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
141 def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
142 def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
143 def X86fshl : SDNode<"X86ISD::FSHL", SDTIntShiftDOp>;
144 def X86fshr : SDNode<"X86ISD::FSHR", SDTIntShiftDOp>;
146 def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
147 def X86fcmp : SDNode<"X86ISD::FCMP", SDTX86FCmp>;
148 def X86strict_fcmp : SDNode<"X86ISD::STRICT_FCMP", SDTX86FCmp, [SDNPHasChain]>;
149 def X86strict_fcmps : SDNode<"X86ISD::STRICT_FCMPS", SDTX86FCmp, [SDNPHasChain]>;
150 def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
152 def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
153 def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond,
155 def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>;
156 def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>;
158 def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand,
159 [SDNPHasChain, SDNPSideEffect]>;
161 def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand,
162 [SDNPHasChain, SDNPSideEffect]>;
164 def X86rdpkru : SDNode<"X86ISD::RDPKRU", SDTX86rdpkru,
165 [SDNPHasChain, SDNPSideEffect]>;
166 def X86wrpkru : SDNode<"X86ISD::WRPKRU", SDTX86wrpkru,
167 [SDNPHasChain, SDNPSideEffect]>;
169 def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas,
170 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
171 SDNPMayLoad, SDNPMemOperand]>;
172 def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86caspair,
173 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
174 SDNPMayLoad, SDNPMemOperand]>;
175 def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86caspair,
176 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
177 SDNPMayLoad, SDNPMemOperand]>;
178 def X86cas8save_ebx : SDNode<"X86ISD::LCMPXCHG8_SAVE_EBX_DAG",
179 SDTX86caspairSaveEbx8,
180 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
181 SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
182 def X86cas16save_rbx : SDNode<"X86ISD::LCMPXCHG16_SAVE_RBX_DAG",
183 SDTX86caspairSaveRbx16,
184 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
185 SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
187 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
188 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
189 def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret,
190 [SDNPHasChain, SDNPOptInGlue]>;
192 def X86vastart_save_xmm_regs :
193 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
194 SDT_X86VASTART_SAVE_XMM_REGS,
195 [SDNPHasChain, SDNPVariadic]>;
197 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG_64,
198 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
200 def X86callseq_start :
201 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
202 [SDNPHasChain, SDNPOutGlue]>;
204 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd,
205 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
207 def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
208 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
211 def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call,
212 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
214 def X86NoTrackBrind : SDNode<"X86ISD::NT_BRIND", SDT_X86NtBrind,
217 def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
218 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>;
219 def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr,
220 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore,
223 def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>;
224 def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
226 def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER",
227 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
230 def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
231 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
233 def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
234 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
236 def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
239 def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
240 SDTypeProfile<1, 1, [SDTCisInt<0>,
242 [SDNPHasChain, SDNPSideEffect]>;
243 def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
244 SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
245 [SDNPHasChain, SDNPSideEffect]>;
246 def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH",
247 SDTypeProfile<0, 0, []>,
248 [SDNPHasChain, SDNPSideEffect]>;
250 def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
251 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
253 def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
255 def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>;
256 def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
258 def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags,
260 def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>;
261 def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>;
263 def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
265 def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags,
267 def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags,
270 def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags,
271 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
273 def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags,
274 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
276 def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags,
277 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
279 def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags,
280 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
282 def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags,
283 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
286 def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>;
288 def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntBinOp>;
290 def X86pdep : SDNode<"X86ISD::PDEP", SDTIntBinOp>;
291 def X86pext : SDNode<"X86ISD::PEXT", SDTIntBinOp>;
293 def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
295 def X86WinAlloca : SDNode<"X86ISD::WIN_ALLOCA", SDT_X86WIN_ALLOCA,
296 [SDNPHasChain, SDNPOutGlue]>;
298 def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA,
301 def X86ProbedAlloca : SDNode<"X86ISD::PROBED_ALLOCA", SDT_X86PROBED_ALLOCA,
304 def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
305 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
307 def X86lwpins : SDNode<"X86ISD::LWPINS",
308 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
309 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
310 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>;
312 def X86umwait : SDNode<"X86ISD::UMWAIT",
313 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
314 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
315 [SDNPHasChain, SDNPSideEffect]>;
317 def X86tpause : SDNode<"X86ISD::TPAUSE",
318 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>,
319 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>,
320 [SDNPHasChain, SDNPSideEffect]>;
322 def X86enqcmd : SDNode<"X86ISD::ENQCMD", SDT_X86ENQCMD,
323 [SDNPHasChain, SDNPSideEffect]>;
324 def X86enqcmds : SDNode<"X86ISD::ENQCMDS", SDT_X86ENQCMD,
325 [SDNPHasChain, SDNPSideEffect]>;
327 //===----------------------------------------------------------------------===//
328 // X86 Operand Definitions.
331 // A version of ptr_rc which excludes SP, ESP, and RSP. This is used for
332 // the index operand of an address, to conform to x86 encoding restrictions.
333 def ptr_rc_nosp : PointerLikeRegClass<1>;
335 // *mem - Operand definitions for the funky X86 addressing mode operands.
337 def X86MemAsmOperand : AsmOperandClass {
340 let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in {
341 def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; }
342 def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; }
343 def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; }
344 def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; }
345 def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; }
346 def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; }
347 def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; }
348 def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; }
349 // Gather mem operands
350 def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; }
351 def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; }
352 def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; }
353 def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; }
354 def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; }
356 def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; }
357 def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; }
358 def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; }
359 def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; }
360 def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; }
361 def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; }
362 def X86Mem256_RC512Operand : AsmOperandClass { let Name = "Mem256_RC512"; }
363 def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; }
365 def X86SibMemOperand : AsmOperandClass { let Name = "SibMem"; }
368 def X86AbsMemAsmOperand : AsmOperandClass {
370 let SuperClasses = [X86MemAsmOperand];
373 class X86MemOperand<string printMethod,
374 AsmOperandClass parserMatchClass = X86MemAsmOperand> : Operand<iPTR> {
375 let PrintMethod = printMethod;
376 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG);
377 let ParserMatchClass = parserMatchClass;
378 let OperandType = "OPERAND_MEMORY";
381 // Gather mem operands
382 class X86VMemOperand<RegisterClass RC, string printMethod,
383 AsmOperandClass parserMatchClass>
384 : X86MemOperand<printMethod, parserMatchClass> {
385 let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG);
388 def anymem : X86MemOperand<"printMemReference">;
389 def X86any_fcmp : PatFrags<(ops node:$lhs, node:$rhs),
390 [(X86strict_fcmp node:$lhs, node:$rhs),
391 (X86fcmp node:$lhs, node:$rhs)]>;
393 // FIXME: Right now we allow any size during parsing, but we might want to
394 // restrict to only unsized memory.
395 def opaquemem : X86MemOperand<"printMemReference">;
397 def sibmem: X86MemOperand<"printMemReference", X86SibMemOperand>;
399 def i8mem : X86MemOperand<"printbytemem", X86Mem8AsmOperand>;
400 def i16mem : X86MemOperand<"printwordmem", X86Mem16AsmOperand>;
401 def i32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand>;
402 def i64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand>;
403 def i128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand>;
404 def i256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand>;
405 def i512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand>;
406 def f32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand>;
407 def f64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand>;
408 def f80mem : X86MemOperand<"printtbytemem", X86Mem80AsmOperand>;
409 def f128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand>;
410 def f256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand>;
411 def f512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand>;
413 // Gather mem operands
414 def vx64mem : X86VMemOperand<VR128, "printqwordmem", X86Mem64_RC128Operand>;
415 def vx128mem : X86VMemOperand<VR128, "printxmmwordmem", X86Mem128_RC128Operand>;
416 def vx256mem : X86VMemOperand<VR128, "printymmwordmem", X86Mem256_RC128Operand>;
417 def vy128mem : X86VMemOperand<VR256, "printxmmwordmem", X86Mem128_RC256Operand>;
418 def vy256mem : X86VMemOperand<VR256, "printymmwordmem", X86Mem256_RC256Operand>;
420 def vx64xmem : X86VMemOperand<VR128X, "printqwordmem", X86Mem64_RC128XOperand>;
421 def vx128xmem : X86VMemOperand<VR128X, "printxmmwordmem", X86Mem128_RC128XOperand>;
422 def vx256xmem : X86VMemOperand<VR128X, "printymmwordmem", X86Mem256_RC128XOperand>;
423 def vy128xmem : X86VMemOperand<VR256X, "printxmmwordmem", X86Mem128_RC256XOperand>;
424 def vy256xmem : X86VMemOperand<VR256X, "printymmwordmem", X86Mem256_RC256XOperand>;
425 def vy512xmem : X86VMemOperand<VR256X, "printzmmwordmem", X86Mem512_RC256XOperand>;
426 def vz256mem : X86VMemOperand<VR512, "printymmwordmem", X86Mem256_RC512Operand>;
427 def vz512mem : X86VMemOperand<VR512, "printzmmwordmem", X86Mem512_RC512Operand>;
429 // A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead
430 // of a plain GPR, so that it doesn't potentially require a REX prefix.
431 def ptr_rc_norex : PointerLikeRegClass<2>;
432 def ptr_rc_norex_nosp : PointerLikeRegClass<3>;
434 def i8mem_NOREX : Operand<iPTR> {
435 let PrintMethod = "printbytemem";
436 let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm,
438 let ParserMatchClass = X86Mem8AsmOperand;
439 let OperandType = "OPERAND_MEMORY";
442 // GPRs available for tailcall.
443 // It represents GR32_TC, GR64_TC or GR64_TCW64.
444 def ptr_rc_tailcall : PointerLikeRegClass<4>;
446 // Special i32mem for addresses of load folding tail calls. These are not
447 // allowed to use callee-saved registers since they must be scheduled
448 // after callee-saved register are popped.
449 def i32mem_TC : Operand<i32> {
450 let PrintMethod = "printdwordmem";
451 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall,
452 i32imm, SEGMENT_REG);
453 let ParserMatchClass = X86Mem32AsmOperand;
454 let OperandType = "OPERAND_MEMORY";
457 // Special i64mem for addresses of load folding tail calls. These are not
458 // allowed to use callee-saved registers since they must be scheduled
459 // after callee-saved register are popped.
460 def i64mem_TC : Operand<i64> {
461 let PrintMethod = "printqwordmem";
462 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm,
463 ptr_rc_tailcall, i32imm, SEGMENT_REG);
464 let ParserMatchClass = X86Mem64AsmOperand;
465 let OperandType = "OPERAND_MEMORY";
468 // Special parser to detect 16-bit mode to select 16-bit displacement.
469 def X86AbsMem16AsmOperand : AsmOperandClass {
470 let Name = "AbsMem16";
471 let RenderMethod = "addAbsMemOperands";
472 let SuperClasses = [X86AbsMemAsmOperand];
475 // Branch targets print as pc-relative values.
476 class BranchTargetOperand<ValueType ty> : Operand<ty> {
477 let OperandType = "OPERAND_PCREL";
478 let PrintMethod = "printPCRelImm";
479 let ParserMatchClass = X86AbsMemAsmOperand;
482 def i32imm_brtarget : BranchTargetOperand<i32>;
483 def i16imm_brtarget : BranchTargetOperand<i16>;
485 // 64-bits but only 32 bits are significant, and those bits are treated as being
487 def i64i32imm_brtarget : BranchTargetOperand<i64>;
489 def brtarget : BranchTargetOperand<OtherVT>;
490 def brtarget8 : BranchTargetOperand<OtherVT>;
491 def brtarget16 : BranchTargetOperand<OtherVT> {
492 let ParserMatchClass = X86AbsMem16AsmOperand;
494 def brtarget32 : BranchTargetOperand<OtherVT>;
496 let RenderMethod = "addSrcIdxOperands" in {
497 def X86SrcIdx8Operand : AsmOperandClass {
498 let Name = "SrcIdx8";
499 let SuperClasses = [X86Mem8AsmOperand];
501 def X86SrcIdx16Operand : AsmOperandClass {
502 let Name = "SrcIdx16";
503 let SuperClasses = [X86Mem16AsmOperand];
505 def X86SrcIdx32Operand : AsmOperandClass {
506 let Name = "SrcIdx32";
507 let SuperClasses = [X86Mem32AsmOperand];
509 def X86SrcIdx64Operand : AsmOperandClass {
510 let Name = "SrcIdx64";
511 let SuperClasses = [X86Mem64AsmOperand];
513 } // RenderMethod = "addSrcIdxOperands"
515 let RenderMethod = "addDstIdxOperands" in {
516 def X86DstIdx8Operand : AsmOperandClass {
517 let Name = "DstIdx8";
518 let SuperClasses = [X86Mem8AsmOperand];
520 def X86DstIdx16Operand : AsmOperandClass {
521 let Name = "DstIdx16";
522 let SuperClasses = [X86Mem16AsmOperand];
524 def X86DstIdx32Operand : AsmOperandClass {
525 let Name = "DstIdx32";
526 let SuperClasses = [X86Mem32AsmOperand];
528 def X86DstIdx64Operand : AsmOperandClass {
529 let Name = "DstIdx64";
530 let SuperClasses = [X86Mem64AsmOperand];
532 } // RenderMethod = "addDstIdxOperands"
534 let RenderMethod = "addMemOffsOperands" in {
535 def X86MemOffs16_8AsmOperand : AsmOperandClass {
536 let Name = "MemOffs16_8";
537 let SuperClasses = [X86Mem8AsmOperand];
539 def X86MemOffs16_16AsmOperand : AsmOperandClass {
540 let Name = "MemOffs16_16";
541 let SuperClasses = [X86Mem16AsmOperand];
543 def X86MemOffs16_32AsmOperand : AsmOperandClass {
544 let Name = "MemOffs16_32";
545 let SuperClasses = [X86Mem32AsmOperand];
547 def X86MemOffs32_8AsmOperand : AsmOperandClass {
548 let Name = "MemOffs32_8";
549 let SuperClasses = [X86Mem8AsmOperand];
551 def X86MemOffs32_16AsmOperand : AsmOperandClass {
552 let Name = "MemOffs32_16";
553 let SuperClasses = [X86Mem16AsmOperand];
555 def X86MemOffs32_32AsmOperand : AsmOperandClass {
556 let Name = "MemOffs32_32";
557 let SuperClasses = [X86Mem32AsmOperand];
559 def X86MemOffs32_64AsmOperand : AsmOperandClass {
560 let Name = "MemOffs32_64";
561 let SuperClasses = [X86Mem64AsmOperand];
563 def X86MemOffs64_8AsmOperand : AsmOperandClass {
564 let Name = "MemOffs64_8";
565 let SuperClasses = [X86Mem8AsmOperand];
567 def X86MemOffs64_16AsmOperand : AsmOperandClass {
568 let Name = "MemOffs64_16";
569 let SuperClasses = [X86Mem16AsmOperand];
571 def X86MemOffs64_32AsmOperand : AsmOperandClass {
572 let Name = "MemOffs64_32";
573 let SuperClasses = [X86Mem32AsmOperand];
575 def X86MemOffs64_64AsmOperand : AsmOperandClass {
576 let Name = "MemOffs64_64";
577 let SuperClasses = [X86Mem64AsmOperand];
579 } // RenderMethod = "addMemOffsOperands"
581 class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
582 : X86MemOperand<printMethod, parserMatchClass> {
583 let MIOperandInfo = (ops ptr_rc, SEGMENT_REG);
586 class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass>
587 : X86MemOperand<printMethod, parserMatchClass> {
588 let MIOperandInfo = (ops ptr_rc);
591 def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>;
592 def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>;
593 def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>;
594 def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>;
595 def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>;
596 def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>;
597 def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>;
598 def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>;
600 class X86MemOffsOperand<Operand immOperand, string printMethod,
601 AsmOperandClass parserMatchClass>
602 : X86MemOperand<printMethod, parserMatchClass> {
603 let MIOperandInfo = (ops immOperand, SEGMENT_REG);
606 def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8",
607 X86MemOffs16_8AsmOperand>;
608 def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16",
609 X86MemOffs16_16AsmOperand>;
610 def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32",
611 X86MemOffs16_32AsmOperand>;
612 def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8",
613 X86MemOffs32_8AsmOperand>;
614 def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16",
615 X86MemOffs32_16AsmOperand>;
616 def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32",
617 X86MemOffs32_32AsmOperand>;
618 def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64",
619 X86MemOffs32_64AsmOperand>;
620 def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8",
621 X86MemOffs64_8AsmOperand>;
622 def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16",
623 X86MemOffs64_16AsmOperand>;
624 def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32",
625 X86MemOffs64_32AsmOperand>;
626 def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64",
627 X86MemOffs64_64AsmOperand>;
629 def ccode : Operand<i8> {
630 let PrintMethod = "printCondCode";
631 let OperandNamespace = "X86";
632 let OperandType = "OPERAND_COND_CODE";
635 class ImmSExtAsmOperandClass : AsmOperandClass {
636 let SuperClasses = [ImmAsmOperand];
637 let RenderMethod = "addImmOperands";
640 def X86GR32orGR64AsmOperand : AsmOperandClass {
641 let Name = "GR32orGR64";
643 def GR32orGR64 : RegisterOperand<GR32> {
644 let ParserMatchClass = X86GR32orGR64AsmOperand;
647 def X86GR16orGR32orGR64AsmOperand : AsmOperandClass {
648 let Name = "GR16orGR32orGR64";
650 def GR16orGR32orGR64 : RegisterOperand<GR16> {
651 let ParserMatchClass = X86GR16orGR32orGR64AsmOperand;
654 def AVX512RCOperand : AsmOperandClass {
655 let Name = "AVX512RC";
657 def AVX512RC : Operand<i32> {
658 let PrintMethod = "printRoundingControl";
659 let OperandNamespace = "X86";
660 let OperandType = "OPERAND_ROUNDING_CONTROL";
661 let ParserMatchClass = AVX512RCOperand;
664 // Sign-extended immediate classes. We don't need to define the full lattice
665 // here because there is no instruction with an ambiguity between ImmSExti64i32
668 // The strange ranges come from the fact that the assembler always works with
669 // 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
670 // (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
673 // [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
674 def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
675 let Name = "ImmSExti64i32";
678 // [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
679 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
680 def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
681 let Name = "ImmSExti16i8";
682 let SuperClasses = [ImmSExti64i32AsmOperand];
685 // [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
686 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
687 def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
688 let Name = "ImmSExti32i8";
692 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
693 def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
694 let Name = "ImmSExti64i8";
695 let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
696 ImmSExti64i32AsmOperand];
699 // 4-bit immediate used by some XOP instructions
701 def ImmUnsignedi4AsmOperand : AsmOperandClass {
702 let Name = "ImmUnsignedi4";
703 let RenderMethod = "addImmOperands";
704 let DiagnosticType = "InvalidImmUnsignedi4";
707 // Unsigned immediate used by SSE/AVX instructions
709 // [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
710 def ImmUnsignedi8AsmOperand : AsmOperandClass {
711 let Name = "ImmUnsignedi8";
712 let RenderMethod = "addImmOperands";
715 // A couple of more descriptive operand definitions.
716 // 16-bits but only 8 bits are significant.
717 def i16i8imm : Operand<i16> {
718 let ParserMatchClass = ImmSExti16i8AsmOperand;
719 let OperandType = "OPERAND_IMMEDIATE";
721 // 32-bits but only 8 bits are significant.
722 def i32i8imm : Operand<i32> {
723 let ParserMatchClass = ImmSExti32i8AsmOperand;
724 let OperandType = "OPERAND_IMMEDIATE";
727 // 64-bits but only 32 bits are significant.
728 def i64i32imm : Operand<i64> {
729 let ParserMatchClass = ImmSExti64i32AsmOperand;
730 let OperandType = "OPERAND_IMMEDIATE";
733 // 64-bits but only 8 bits are significant.
734 def i64i8imm : Operand<i64> {
735 let ParserMatchClass = ImmSExti64i8AsmOperand;
736 let OperandType = "OPERAND_IMMEDIATE";
739 // Unsigned 4-bit immediate used by some XOP instructions.
740 def u4imm : Operand<i8> {
741 let PrintMethod = "printU8Imm";
742 let ParserMatchClass = ImmUnsignedi4AsmOperand;
743 let OperandType = "OPERAND_IMMEDIATE";
746 // Unsigned 8-bit immediate used by SSE/AVX instructions.
747 def u8imm : Operand<i8> {
748 let PrintMethod = "printU8Imm";
749 let ParserMatchClass = ImmUnsignedi8AsmOperand;
750 let OperandType = "OPERAND_IMMEDIATE";
753 // 16-bit immediate but only 8-bits are significant and they are unsigned.
754 // Used by BT instructions.
755 def i16u8imm : Operand<i16> {
756 let PrintMethod = "printU8Imm";
757 let ParserMatchClass = ImmUnsignedi8AsmOperand;
758 let OperandType = "OPERAND_IMMEDIATE";
761 // 32-bit immediate but only 8-bits are significant and they are unsigned.
762 // Used by some SSE/AVX instructions that use intrinsics.
763 def i32u8imm : Operand<i32> {
764 let PrintMethod = "printU8Imm";
765 let ParserMatchClass = ImmUnsignedi8AsmOperand;
766 let OperandType = "OPERAND_IMMEDIATE";
769 // 64-bit immediate but only 8-bits are significant and they are unsigned.
770 // Used by BT instructions.
771 def i64u8imm : Operand<i64> {
772 let PrintMethod = "printU8Imm";
773 let ParserMatchClass = ImmUnsignedi8AsmOperand;
774 let OperandType = "OPERAND_IMMEDIATE";
777 def lea64_32mem : Operand<i32> {
778 let PrintMethod = "printMemReference";
779 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
780 let ParserMatchClass = X86MemAsmOperand;
783 // Memory operands that use 64-bit pointers in both ILP32 and LP64.
784 def lea64mem : Operand<i64> {
785 let PrintMethod = "printMemReference";
786 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG);
787 let ParserMatchClass = X86MemAsmOperand;
790 let RenderMethod = "addMaskPairOperands" in {
791 def VK1PairAsmOperand : AsmOperandClass { let Name = "VK1Pair"; }
792 def VK2PairAsmOperand : AsmOperandClass { let Name = "VK2Pair"; }
793 def VK4PairAsmOperand : AsmOperandClass { let Name = "VK4Pair"; }
794 def VK8PairAsmOperand : AsmOperandClass { let Name = "VK8Pair"; }
795 def VK16PairAsmOperand : AsmOperandClass { let Name = "VK16Pair"; }
798 def VK1Pair : RegisterOperand<VK1PAIR, "printVKPair"> {
799 let ParserMatchClass = VK1PairAsmOperand;
802 def VK2Pair : RegisterOperand<VK2PAIR, "printVKPair"> {
803 let ParserMatchClass = VK2PairAsmOperand;
806 def VK4Pair : RegisterOperand<VK4PAIR, "printVKPair"> {
807 let ParserMatchClass = VK4PairAsmOperand;
810 def VK8Pair : RegisterOperand<VK8PAIR, "printVKPair"> {
811 let ParserMatchClass = VK8PairAsmOperand;
814 def VK16Pair : RegisterOperand<VK16PAIR, "printVKPair"> {
815 let ParserMatchClass = VK16PairAsmOperand;
818 //===----------------------------------------------------------------------===//
819 // X86 Complex Pattern Definitions.
822 // Define X86-specific addressing mode.
823 def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>;
824 def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr",
825 [add, sub, mul, X86mul_imm, shl, or, frameindex],
827 // In 64-bit mode 32-bit LEAs can use RIP-relative addressing.
828 def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr",
829 [add, sub, mul, X86mul_imm, shl, or,
830 frameindex, X86WrapperRIP],
833 def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
834 [tglobaltlsaddr], []>;
836 def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr",
837 [tglobaltlsaddr], []>;
839 def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr",
840 [add, sub, mul, X86mul_imm, shl, or, frameindex,
843 def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
844 [tglobaltlsaddr], []>;
846 def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr",
847 [tglobaltlsaddr], []>;
849 def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>;
851 // A relocatable immediate is an operand that can be relocated by the linker to
852 // an immediate, such as a regular symbol in non-PIC code.
853 def relocImm : ComplexPattern<iAny, 1, "selectRelocImm",
854 [X86Wrapper], [], 0>;
856 //===----------------------------------------------------------------------===//
857 // X86 Instruction Predicate Definitions.
858 def TruePredicate : Predicate<"true">;
860 def HasCMov : Predicate<"Subtarget->hasCMov()">;
861 def NoCMov : Predicate<"!Subtarget->hasCMov()">;
863 def HasMMX : Predicate<"Subtarget->hasMMX()">;
864 def Has3DNow : Predicate<"Subtarget->has3DNow()">;
865 def Has3DNowA : Predicate<"Subtarget->has3DNowA()">;
866 def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
867 def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
868 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
869 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
870 def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
871 def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
872 def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
873 def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
874 def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
875 def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">;
876 def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
877 def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
878 def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
879 def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
880 def NoAVX : Predicate<"!Subtarget->hasAVX()">;
881 def HasAVX : Predicate<"Subtarget->hasAVX()">;
882 def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
883 def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">;
884 def HasAVX512 : Predicate<"Subtarget->hasAVX512()">;
885 def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">;
886 def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">;
887 def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">;
888 def HasCDI : Predicate<"Subtarget->hasCDI()">;
889 def HasVPOPCNTDQ : Predicate<"Subtarget->hasVPOPCNTDQ()">;
890 def HasPFI : Predicate<"Subtarget->hasPFI()">;
891 def HasERI : Predicate<"Subtarget->hasERI()">;
892 def HasDQI : Predicate<"Subtarget->hasDQI()">;
893 def NoDQI : Predicate<"!Subtarget->hasDQI()">;
894 def HasBWI : Predicate<"Subtarget->hasBWI()">;
895 def NoBWI : Predicate<"!Subtarget->hasBWI()">;
896 def HasVLX : Predicate<"Subtarget->hasVLX()">;
897 def NoVLX : Predicate<"!Subtarget->hasVLX()">;
898 def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">;
899 def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">;
900 def PKU : Predicate<"Subtarget->hasPKU()">;
901 def HasVNNI : Predicate<"Subtarget->hasVNNI()">;
902 def HasVP2INTERSECT : Predicate<"Subtarget->hasVP2INTERSECT()">;
903 def HasBF16 : Predicate<"Subtarget->hasBF16()">;
905 def HasBITALG : Predicate<"Subtarget->hasBITALG()">;
906 def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;
907 def HasAES : Predicate<"Subtarget->hasAES()">;
908 def HasVAES : Predicate<"Subtarget->hasVAES()">;
909 def NoVLX_Or_NoVAES : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVAES()">;
910 def HasFXSR : Predicate<"Subtarget->hasFXSR()">;
911 def HasXSAVE : Predicate<"Subtarget->hasXSAVE()">;
912 def HasXSAVEOPT : Predicate<"Subtarget->hasXSAVEOPT()">;
913 def HasXSAVEC : Predicate<"Subtarget->hasXSAVEC()">;
914 def HasXSAVES : Predicate<"Subtarget->hasXSAVES()">;
915 def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">;
916 def NoVLX_Or_NoVPCLMULQDQ :
917 Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">;
918 def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">;
919 def HasGFNI : Predicate<"Subtarget->hasGFNI()">;
920 def HasFMA : Predicate<"Subtarget->hasFMA()">;
921 def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
922 def NoFMA4 : Predicate<"!Subtarget->hasFMA4()">;
923 def HasXOP : Predicate<"Subtarget->hasXOP()">;
924 def HasTBM : Predicate<"Subtarget->hasTBM()">;
925 def NoTBM : Predicate<"!Subtarget->hasTBM()">;
926 def HasLWP : Predicate<"Subtarget->hasLWP()">;
927 def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">;
928 def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">;
929 def HasF16C : Predicate<"Subtarget->hasF16C()">;
930 def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">;
931 def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">;
932 def HasBMI : Predicate<"Subtarget->hasBMI()">;
933 def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
934 def NoBMI2 : Predicate<"!Subtarget->hasBMI2()">;
935 def HasVBMI : Predicate<"Subtarget->hasVBMI()">;
936 def HasVBMI2 : Predicate<"Subtarget->hasVBMI2()">;
937 def HasIFMA : Predicate<"Subtarget->hasIFMA()">;
938 def HasRTM : Predicate<"Subtarget->hasRTM()">;
939 def HasADX : Predicate<"Subtarget->hasADX()">;
940 def HasSHA : Predicate<"Subtarget->hasSHA()">;
941 def HasSGX : Predicate<"Subtarget->hasSGX()">;
942 def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">;
943 def HasSSEPrefetch : Predicate<"Subtarget->hasSSEPrefetch()">;
944 def NoSSEPrefetch : Predicate<"!Subtarget->hasSSEPrefetch()">;
945 def HasPrefetchW : Predicate<"Subtarget->hasPrefetchW()">;
946 def HasPREFETCHWT1 : Predicate<"Subtarget->hasPREFETCHWT1()">;
947 def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">;
948 def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">;
949 def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">;
950 def HasCLDEMOTE : Predicate<"Subtarget->hasCLDEMOTE()">;
951 def HasMOVDIRI : Predicate<"Subtarget->hasMOVDIRI()">;
952 def HasMOVDIR64B : Predicate<"Subtarget->hasMOVDIR64B()">;
953 def HasPTWRITE : Predicate<"Subtarget->hasPTWRITE()">;
954 def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
955 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
956 def HasSHSTK : Predicate<"Subtarget->hasSHSTK()">;
957 def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">;
958 def HasCLWB : Predicate<"Subtarget->hasCLWB()">;
959 def HasWBNOINVD : Predicate<"Subtarget->hasWBNOINVD()">;
960 def HasRDPID : Predicate<"Subtarget->hasRDPID()">;
961 def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">;
962 def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">;
963 def HasCmpxchg8b : Predicate<"Subtarget->hasCmpxchg8b()">;
964 def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;
965 def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">;
966 def HasENQCMD : Predicate<"Subtarget->hasENQCMD()">;
967 def HasSERIALIZE : Predicate<"Subtarget->hasSERIALIZE()">;
968 def HasTSXLDTRK : Predicate<"Subtarget->hasTSXLDTRK()">;
969 def HasAMXTILE : Predicate<"Subtarget->hasAMXTILE()">;
970 def HasAMXBF16 : Predicate<"Subtarget->hasAMXBF16()">;
971 def HasAMXINT8 : Predicate<"Subtarget->hasAMXINT8()">;
972 def Not64BitMode : Predicate<"!Subtarget->is64Bit()">,
973 AssemblerPredicate<(all_of (not Mode64Bit)), "Not 64-bit mode">;
974 def In64BitMode : Predicate<"Subtarget->is64Bit()">,
975 AssemblerPredicate<(all_of Mode64Bit), "64-bit mode">;
976 def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">;
977 def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">;
978 def In16BitMode : Predicate<"Subtarget->is16Bit()">,
979 AssemblerPredicate<(all_of Mode16Bit), "16-bit mode">;
980 def Not16BitMode : Predicate<"!Subtarget->is16Bit()">,
981 AssemblerPredicate<(all_of (not Mode16Bit)), "Not 16-bit mode">;
982 def In32BitMode : Predicate<"Subtarget->is32Bit()">,
983 AssemblerPredicate<(all_of Mode32Bit), "32-bit mode">;
984 def IsWin64 : Predicate<"Subtarget->isTargetWin64()">;
985 def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">;
986 def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||"
987 "Subtarget->getFrameLowering()->hasFP(*MF)"> {
988 let RecomputePerFunction = 1;
990 def IsPS4 : Predicate<"Subtarget->isTargetPS4()">;
991 def NotPS4 : Predicate<"!Subtarget->isTargetPS4()">;
992 def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">;
993 def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">;
994 def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
995 def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">;
996 def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
997 "TM.getCodeModel() == CodeModel::Kernel">;
998 def IsNotPIC : Predicate<"!TM.isPositionIndependent()">;
1000 // We could compute these on a per-module basis but doing so requires accessing
1001 // the Function object through the <Target>Subtarget and objections were raised
1002 // to that (see post-commit review comments for r301750).
1003 let RecomputePerFunction = 1 in {
1004 def OptForSize : Predicate<"shouldOptForSize(MF)">;
1005 def OptForMinSize : Predicate<"MF->getFunction().hasMinSize()">;
1006 def OptForSpeed : Predicate<"!shouldOptForSize(MF)">;
1007 def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
1008 "shouldOptForSize(MF)">;
1009 def NoSSE41_Or_OptForSize : Predicate<"shouldOptForSize(MF) || "
1010 "!Subtarget->hasSSE41()">;
1013 def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
1014 def FavorMemIndirectCall : Predicate<"!Subtarget->slowTwoMemOps()">;
1015 def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">;
1016 def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">;
1017 def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">;
1018 def HasERMSB : Predicate<"Subtarget->hasERMSB()">;
1019 def HasMFence : Predicate<"Subtarget->hasMFence()">;
1020 def UseIndirectThunkCalls : Predicate<"Subtarget->useIndirectThunkCalls()">;
1021 def NotUseIndirectThunkCalls : Predicate<"!Subtarget->useIndirectThunkCalls()">;
1023 //===----------------------------------------------------------------------===//
1024 // X86 Instruction Format Definitions.
1027 include "X86InstrFormats.td"
1029 //===----------------------------------------------------------------------===//
1030 // Pattern fragments.
1033 // X86 specific condition code. These correspond to CondCode in
1034 // X86InstrInfo.h. They must be kept in synch.
1035 def X86_COND_O : PatLeaf<(i8 0)>;
1036 def X86_COND_NO : PatLeaf<(i8 1)>;
1037 def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
1038 def X86_COND_AE : PatLeaf<(i8 3)>; // alt. COND_NC
1039 def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
1040 def X86_COND_NE : PatLeaf<(i8 5)>; // alt. COND_NZ
1041 def X86_COND_BE : PatLeaf<(i8 6)>; // alt. COND_NA
1042 def X86_COND_A : PatLeaf<(i8 7)>; // alt. COND_NBE
1043 def X86_COND_S : PatLeaf<(i8 8)>;
1044 def X86_COND_NS : PatLeaf<(i8 9)>;
1045 def X86_COND_P : PatLeaf<(i8 10)>; // alt. COND_PE
1046 def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
1047 def X86_COND_L : PatLeaf<(i8 12)>; // alt. COND_NGE
1048 def X86_COND_GE : PatLeaf<(i8 13)>; // alt. COND_NL
1049 def X86_COND_LE : PatLeaf<(i8 14)>; // alt. COND_NG
1050 def X86_COND_G : PatLeaf<(i8 15)>; // alt. COND_NLE
1052 def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>;
1053 def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>;
1054 def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>;
1055 def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>;
1057 def i16relocImmSExt8 : PatLeaf<(i16 relocImm), [{
1058 return isSExtAbsoluteSymbolRef(8, N);
1060 def i32relocImmSExt8 : PatLeaf<(i32 relocImm), [{
1061 return isSExtAbsoluteSymbolRef(8, N);
1063 def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{
1064 return isSExtAbsoluteSymbolRef(8, N);
1066 def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{
1067 return isSExtAbsoluteSymbolRef(32, N);
1070 // If we have multiple users of an immediate, it's much smaller to reuse
1071 // the register, rather than encode the immediate in every instruction.
1072 // This has the risk of increasing register pressure from stretched live
1073 // ranges, however, the immediates should be trivial to rematerialize by
1074 // the RA in the event of high register pressure.
1075 // TODO : This is currently enabled for stores and binary ops. There are more
1076 // cases for which this can be enabled, though this catches the bulk of the
1078 // TODO2 : This should really also be enabled under O2, but there's currently
1079 // an issue with RA where we don't pull the constants into their users
1080 // when we rematerialize them. I'll follow-up on enabling O2 after we fix that
1082 // TODO3 : This is currently limited to single basic blocks (DAG creation
1083 // pulls block immediates to the top and merges them if necessary).
1084 // Eventually, it would be nice to allow ConstantHoisting to merge constants
1085 // globally for potentially added savings.
1087 def imm_su : PatLeaf<(imm), [{
1088 return !shouldAvoidImmediateInstFormsForSize(N);
1090 def i64immSExt32_su : PatLeaf<(i64immSExt32), [{
1091 return !shouldAvoidImmediateInstFormsForSize(N);
1094 def relocImm8_su : PatLeaf<(i8 relocImm), [{
1095 return !shouldAvoidImmediateInstFormsForSize(N);
1097 def relocImm16_su : PatLeaf<(i16 relocImm), [{
1098 return !shouldAvoidImmediateInstFormsForSize(N);
1100 def relocImm32_su : PatLeaf<(i32 relocImm), [{
1101 return !shouldAvoidImmediateInstFormsForSize(N);
1104 def i16relocImmSExt8_su : PatLeaf<(i16relocImmSExt8), [{
1105 return !shouldAvoidImmediateInstFormsForSize(N);
1107 def i32relocImmSExt8_su : PatLeaf<(i32relocImmSExt8), [{
1108 return !shouldAvoidImmediateInstFormsForSize(N);
1110 def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{
1111 return !shouldAvoidImmediateInstFormsForSize(N);
1113 def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{
1114 return !shouldAvoidImmediateInstFormsForSize(N);
1117 def i16immSExt8_su : PatLeaf<(i16immSExt8), [{
1118 return !shouldAvoidImmediateInstFormsForSize(N);
1120 def i32immSExt8_su : PatLeaf<(i32immSExt8), [{
1121 return !shouldAvoidImmediateInstFormsForSize(N);
1123 def i64immSExt8_su : PatLeaf<(i64immSExt8), [{
1124 return !shouldAvoidImmediateInstFormsForSize(N);
1127 // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
1129 def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>;
1131 def i64immZExt32SExt8 : ImmLeaf<i64, [{
1132 return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm));
1135 // Helper fragments for loads.
1137 // It's safe to fold a zextload/extload from i1 as a regular i8 load. The
1138 // upper bits are guaranteed to be zero and we were going to emit a MOV8rm
1139 // which might get folded during peephole anyway.
1140 def loadi8 : PatFrag<(ops node:$ptr), (i8 (unindexedload node:$ptr)), [{
1141 LoadSDNode *LD = cast<LoadSDNode>(N);
1142 ISD::LoadExtType ExtType = LD->getExtensionType();
1143 return ExtType == ISD::NON_EXTLOAD || ExtType == ISD::EXTLOAD ||
1144 ExtType == ISD::ZEXTLOAD;
1147 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
1148 // known to be 32-bit aligned or better. Ditto for i8 to i16.
1149 def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
1150 LoadSDNode *LD = cast<LoadSDNode>(N);
1151 ISD::LoadExtType ExtType = LD->getExtensionType();
1152 if (ExtType == ISD::NON_EXTLOAD)
1154 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad)
1155 return LD->getAlignment() >= 2 && LD->isSimple();
1159 def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
1160 LoadSDNode *LD = cast<LoadSDNode>(N);
1161 ISD::LoadExtType ExtType = LD->getExtensionType();
1162 if (ExtType == ISD::NON_EXTLOAD)
1164 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad)
1165 return LD->getAlignment() >= 4 && LD->isSimple();
1169 def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>;
1170 def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>;
1171 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
1172 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
1173 def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>;
1174 def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1175 LoadSDNode *Ld = cast<LoadSDNode>(N);
1176 return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1178 def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{
1179 LoadSDNode *Ld = cast<LoadSDNode>(N);
1180 return Subtarget->hasSSEUnalignedMem() ||
1181 Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize();
1184 def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
1185 def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
1186 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
1187 def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
1188 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
1189 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
1191 def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>;
1192 def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>;
1193 def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>;
1194 def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>;
1195 def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>;
1196 def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>;
1197 def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>;
1198 def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>;
1199 def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>;
1200 def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>;
1202 def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>;
1203 def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>;
1204 def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>;
1205 def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>;
1206 def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>;
1207 def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>;
1208 def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>;
1209 def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>;
1210 def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>;
1212 // We can treat an i8/i16 extending load to i64 as a 32 bit load if its known
1213 // to be 4 byte aligned or better.
1214 def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (unindexedload node:$ptr)), [{
1215 LoadSDNode *LD = cast<LoadSDNode>(N);
1216 ISD::LoadExtType ExtType = LD->getExtensionType();
1217 if (ExtType != ISD::EXTLOAD)
1219 if (LD->getMemoryVT() == MVT::i32)
1222 return LD->getAlignment() >= 4 && LD->isSimple();
1226 // An 'and' node with a single use.
1227 def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{
1228 return N->hasOneUse();
1230 // An 'srl' node with a single use.
1231 def srl_su : PatFrag<(ops node:$lhs, node:$rhs), (srl node:$lhs, node:$rhs), [{
1232 return N->hasOneUse();
1234 // An 'trunc' node with a single use.
1235 def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
1236 return N->hasOneUse();
1239 //===----------------------------------------------------------------------===//
1240 // Instruction list.
1244 let hasSideEffects = 0, SchedRW = [WriteNop] in {
1245 def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>;
1246 def NOOPW : I<0x1f, MRMXm, (outs), (ins i16mem:$zero),
1247 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1248 def NOOPL : I<0x1f, MRMXm, (outs), (ins i32mem:$zero),
1249 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1250 def NOOPQ : RI<0x1f, MRMXm, (outs), (ins i64mem:$zero),
1251 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1252 Requires<[In64BitMode]>;
1253 // Also allow register so we can assemble/disassemble
1254 def NOOPWr : I<0x1f, MRMXr, (outs), (ins GR16:$zero),
1255 "nop{w}\t$zero", []>, TB, OpSize16, NotMemoryFoldable;
1256 def NOOPLr : I<0x1f, MRMXr, (outs), (ins GR32:$zero),
1257 "nop{l}\t$zero", []>, TB, OpSize32, NotMemoryFoldable;
1258 def NOOPQr : RI<0x1f, MRMXr, (outs), (ins GR64:$zero),
1259 "nop{q}\t$zero", []>, TB, NotMemoryFoldable,
1260 Requires<[In64BitMode]>;
1264 // Constructing a stack frame.
1265 def ENTER : Ii16<0xC8, RawFrmImm8, (outs), (ins i16imm:$len, i8imm:$lvl),
1266 "enter\t$len, $lvl", []>, Sched<[WriteMicrocoded]>;
1268 let SchedRW = [WriteALU] in {
1269 let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, hasSideEffects=0 in
1270 def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1271 Requires<[Not64BitMode]>;
1273 let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, hasSideEffects = 0 in
1274 def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>,
1275 Requires<[In64BitMode]>;
1278 //===----------------------------------------------------------------------===//
1279 // Miscellaneous Instructions.
1282 let isBarrier = 1, hasSideEffects = 1, usesCustomInserter = 1,
1283 SchedRW = [WriteSystem] in
1284 def Int_eh_sjlj_setup_dispatch
1285 : PseudoI<(outs), (ins), [(X86eh_sjlj_setup_dispatch)]>;
1287 let Defs = [ESP], Uses = [ESP], hasSideEffects=0 in {
1288 let mayLoad = 1, SchedRW = [WriteLoad] in {
1289 def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1291 def POP32r : I<0x58, AddRegFrm, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1292 OpSize32, Requires<[Not64BitMode]>;
1293 // Long form for the disassembler.
1294 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1295 def POP16rmr: I<0x8F, MRM0r, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>,
1296 OpSize16, NotMemoryFoldable;
1297 def POP32rmr: I<0x8F, MRM0r, (outs GR32:$reg), (ins), "pop{l}\t$reg", []>,
1298 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1299 } // isCodeGenOnly = 1, ForceDisassemble = 1
1300 } // mayLoad, SchedRW
1301 let mayStore = 1, mayLoad = 1, SchedRW = [WriteCopy] in {
1302 def POP16rmm: I<0x8F, MRM0m, (outs), (ins i16mem:$dst), "pop{w}\t$dst", []>,
1304 def POP32rmm: I<0x8F, MRM0m, (outs), (ins i32mem:$dst), "pop{l}\t$dst", []>,
1305 OpSize32, Requires<[Not64BitMode]>;
1306 } // mayStore, mayLoad, SchedRW
1308 let mayStore = 1, SchedRW = [WriteStore] in {
1309 def PUSH16r : I<0x50, AddRegFrm, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1311 def PUSH32r : I<0x50, AddRegFrm, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1312 OpSize32, Requires<[Not64BitMode]>;
1313 // Long form for the disassembler.
1314 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1315 def PUSH16rmr: I<0xFF, MRM6r, (outs), (ins GR16:$reg), "push{w}\t$reg",[]>,
1316 OpSize16, NotMemoryFoldable;
1317 def PUSH32rmr: I<0xFF, MRM6r, (outs), (ins GR32:$reg), "push{l}\t$reg",[]>,
1318 OpSize32, Requires<[Not64BitMode]>, NotMemoryFoldable;
1319 } // isCodeGenOnly = 1, ForceDisassemble = 1
1321 def PUSH16i8 : Ii8<0x6a, RawFrm, (outs), (ins i16i8imm:$imm),
1322 "push{w}\t$imm", []>, OpSize16;
1323 def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
1324 "push{w}\t$imm", []>, OpSize16;
1326 def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
1327 "push{l}\t$imm", []>, OpSize32,
1328 Requires<[Not64BitMode]>;
1329 def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
1330 "push{l}\t$imm", []>, OpSize32,
1331 Requires<[Not64BitMode]>;
1332 } // mayStore, SchedRW
1334 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1335 def PUSH16rmm: I<0xFF, MRM6m, (outs), (ins i16mem:$src), "push{w}\t$src", []>,
1337 def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src", []>,
1338 OpSize32, Requires<[Not64BitMode]>;
1339 } // mayLoad, mayStore, SchedRW
1343 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1344 SchedRW = [WriteRMW], Defs = [ESP] in {
1346 def RDFLAGS32 : PseudoI<(outs GR32:$dst), (ins),
1347 [(set GR32:$dst, (int_x86_flags_read_u32))]>,
1348 Requires<[Not64BitMode]>;
1351 def RDFLAGS64 : PseudoI<(outs GR64:$dst), (ins),
1352 [(set GR64:$dst, (int_x86_flags_read_u64))]>,
1353 Requires<[In64BitMode]>;
1356 let mayLoad = 1, mayStore = 1, usesCustomInserter = 1,
1357 SchedRW = [WriteRMW] in {
1358 let Defs = [ESP, EFLAGS, DF], Uses = [ESP] in
1359 def WRFLAGS32 : PseudoI<(outs), (ins GR32:$src),
1360 [(int_x86_flags_write_u32 GR32:$src)]>,
1361 Requires<[Not64BitMode]>;
1363 let Defs = [RSP, EFLAGS, DF], Uses = [RSP] in
1364 def WRFLAGS64 : PseudoI<(outs), (ins GR64:$src),
1365 [(int_x86_flags_write_u64 GR64:$src)]>,
1366 Requires<[In64BitMode]>;
1369 let Defs = [ESP, EFLAGS, DF], Uses = [ESP], mayLoad = 1, hasSideEffects=0,
1370 SchedRW = [WriteLoad] in {
1371 def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize16;
1372 def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>, OpSize32,
1373 Requires<[Not64BitMode]>;
1376 let Defs = [ESP], Uses = [ESP, EFLAGS, DF], mayStore = 1, hasSideEffects=0,
1377 SchedRW = [WriteStore] in {
1378 def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize16;
1379 def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>, OpSize32,
1380 Requires<[Not64BitMode]>;
1383 let Defs = [RSP], Uses = [RSP], hasSideEffects=0 in {
1384 let mayLoad = 1, SchedRW = [WriteLoad] in {
1385 def POP64r : I<0x58, AddRegFrm, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1386 OpSize32, Requires<[In64BitMode]>;
1387 // Long form for the disassembler.
1388 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1389 def POP64rmr: I<0x8F, MRM0r, (outs GR64:$reg), (ins), "pop{q}\t$reg", []>,
1390 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1391 } // isCodeGenOnly = 1, ForceDisassemble = 1
1392 } // mayLoad, SchedRW
1393 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in
1394 def POP64rmm: I<0x8F, MRM0m, (outs), (ins i64mem:$dst), "pop{q}\t$dst", []>,
1395 OpSize32, Requires<[In64BitMode]>;
1396 let mayStore = 1, SchedRW = [WriteStore] in {
1397 def PUSH64r : I<0x50, AddRegFrm, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1398 OpSize32, Requires<[In64BitMode]>;
1399 // Long form for the disassembler.
1400 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
1401 def PUSH64rmr: I<0xFF, MRM6r, (outs), (ins GR64:$reg), "push{q}\t$reg", []>,
1402 OpSize32, Requires<[In64BitMode]>, NotMemoryFoldable;
1403 } // isCodeGenOnly = 1, ForceDisassemble = 1
1404 } // mayStore, SchedRW
1405 let mayLoad = 1, mayStore = 1, SchedRW = [WriteCopy] in {
1406 def PUSH64rmm: I<0xFF, MRM6m, (outs), (ins i64mem:$src), "push{q}\t$src", []>,
1407 OpSize32, Requires<[In64BitMode]>;
1408 } // mayLoad, mayStore, SchedRW
1411 let Defs = [RSP], Uses = [RSP], hasSideEffects = 0, mayStore = 1,
1412 SchedRW = [WriteStore] in {
1413 def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i64i8imm:$imm),
1414 "push{q}\t$imm", []>, OpSize32,
1415 Requires<[In64BitMode]>;
1416 def PUSH64i32 : Ii32S<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
1417 "push{q}\t$imm", []>, OpSize32,
1418 Requires<[In64BitMode]>;
1421 let Defs = [RSP, EFLAGS, DF], Uses = [RSP], mayLoad = 1, hasSideEffects=0 in
1422 def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
1423 OpSize32, Requires<[In64BitMode]>, Sched<[WriteLoad]>;
1424 let Defs = [RSP], Uses = [RSP, EFLAGS, DF], mayStore = 1, hasSideEffects=0 in
1425 def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
1426 OpSize32, Requires<[In64BitMode]>, Sched<[WriteStore]>;
1428 let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
1429 mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in {
1430 def POPA32 : I<0x61, RawFrm, (outs), (ins), "popal", []>,
1431 OpSize32, Requires<[Not64BitMode]>;
1432 def POPA16 : I<0x61, RawFrm, (outs), (ins), "popaw", []>,
1433 OpSize16, Requires<[Not64BitMode]>;
1435 let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
1436 mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in {
1437 def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pushal", []>,
1438 OpSize32, Requires<[Not64BitMode]>;
1439 def PUSHA16 : I<0x60, RawFrm, (outs), (ins), "pushaw", []>,
1440 OpSize16, Requires<[Not64BitMode]>;
1443 let Constraints = "$src = $dst", SchedRW = [WriteBSWAP32] in {
1444 // This instruction is a consequence of BSWAP32r observing operand size. The
1445 // encoding is valid, but the behavior is undefined.
1446 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
1447 def BSWAP16r_BAD : I<0xC8, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
1448 "bswap{w}\t$dst", []>, OpSize16, TB;
1449 // GR32 = bswap GR32
1450 def BSWAP32r : I<0xC8, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
1452 [(set GR32:$dst, (bswap GR32:$src))]>, OpSize32, TB;
1454 let SchedRW = [WriteBSWAP64] in
1455 def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
1457 [(set GR64:$dst, (bswap GR64:$src))]>, TB;
1458 } // Constraints = "$src = $dst", SchedRW
1460 // Bit scan instructions.
1461 let Defs = [EFLAGS] in {
1462 def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1463 "bsf{w}\t{$src, $dst|$dst, $src}",
1464 [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>,
1465 PS, OpSize16, Sched<[WriteBSF]>;
1466 def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1467 "bsf{w}\t{$src, $dst|$dst, $src}",
1468 [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>,
1469 PS, OpSize16, Sched<[WriteBSFLd]>;
1470 def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1471 "bsf{l}\t{$src, $dst|$dst, $src}",
1472 [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>,
1473 PS, OpSize32, Sched<[WriteBSF]>;
1474 def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1475 "bsf{l}\t{$src, $dst|$dst, $src}",
1476 [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>,
1477 PS, OpSize32, Sched<[WriteBSFLd]>;
1478 def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1479 "bsf{q}\t{$src, $dst|$dst, $src}",
1480 [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>,
1481 PS, Sched<[WriteBSF]>;
1482 def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1483 "bsf{q}\t{$src, $dst|$dst, $src}",
1484 [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>,
1485 PS, Sched<[WriteBSFLd]>;
1487 def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1488 "bsr{w}\t{$src, $dst|$dst, $src}",
1489 [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>,
1490 PS, OpSize16, Sched<[WriteBSR]>;
1491 def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1492 "bsr{w}\t{$src, $dst|$dst, $src}",
1493 [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>,
1494 PS, OpSize16, Sched<[WriteBSRLd]>;
1495 def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1496 "bsr{l}\t{$src, $dst|$dst, $src}",
1497 [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>,
1498 PS, OpSize32, Sched<[WriteBSR]>;
1499 def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1500 "bsr{l}\t{$src, $dst|$dst, $src}",
1501 [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>,
1502 PS, OpSize32, Sched<[WriteBSRLd]>;
1503 def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1504 "bsr{q}\t{$src, $dst|$dst, $src}",
1505 [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>,
1506 PS, Sched<[WriteBSR]>;
1507 def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1508 "bsr{q}\t{$src, $dst|$dst, $src}",
1509 [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>,
1510 PS, Sched<[WriteBSRLd]>;
1511 } // Defs = [EFLAGS]
1513 let SchedRW = [WriteMicrocoded] in {
1514 let Defs = [EDI,ESI], Uses = [EDI,ESI,DF] in {
1515 def MOVSB : I<0xA4, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1516 "movsb\t{$src, $dst|$dst, $src}", []>;
1517 def MOVSW : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1518 "movsw\t{$src, $dst|$dst, $src}", []>, OpSize16;
1519 def MOVSL : I<0xA5, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1520 "movs{l|d}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1521 def MOVSQ : RI<0xA5, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1522 "movsq\t{$src, $dst|$dst, $src}", []>,
1523 Requires<[In64BitMode]>;
1526 let Defs = [EDI], Uses = [AL,EDI,DF] in
1527 def STOSB : I<0xAA, RawFrmDst, (outs), (ins dstidx8:$dst),
1528 "stosb\t{%al, $dst|$dst, al}", []>;
1529 let Defs = [EDI], Uses = [AX,EDI,DF] in
1530 def STOSW : I<0xAB, RawFrmDst, (outs), (ins dstidx16:$dst),
1531 "stosw\t{%ax, $dst|$dst, ax}", []>, OpSize16;
1532 let Defs = [EDI], Uses = [EAX,EDI,DF] in
1533 def STOSL : I<0xAB, RawFrmDst, (outs), (ins dstidx32:$dst),
1534 "stos{l|d}\t{%eax, $dst|$dst, eax}", []>, OpSize32;
1535 let Defs = [RDI], Uses = [RAX,RDI,DF] in
1536 def STOSQ : RI<0xAB, RawFrmDst, (outs), (ins dstidx64:$dst),
1537 "stosq\t{%rax, $dst|$dst, rax}", []>,
1538 Requires<[In64BitMode]>;
1540 let Defs = [EDI,EFLAGS], Uses = [AL,EDI,DF] in
1541 def SCASB : I<0xAE, RawFrmDst, (outs), (ins dstidx8:$dst),
1542 "scasb\t{$dst, %al|al, $dst}", []>;
1543 let Defs = [EDI,EFLAGS], Uses = [AX,EDI,DF] in
1544 def SCASW : I<0xAF, RawFrmDst, (outs), (ins dstidx16:$dst),
1545 "scasw\t{$dst, %ax|ax, $dst}", []>, OpSize16;
1546 let Defs = [EDI,EFLAGS], Uses = [EAX,EDI,DF] in
1547 def SCASL : I<0xAF, RawFrmDst, (outs), (ins dstidx32:$dst),
1548 "scas{l|d}\t{$dst, %eax|eax, $dst}", []>, OpSize32;
1549 let Defs = [EDI,EFLAGS], Uses = [RAX,EDI,DF] in
1550 def SCASQ : RI<0xAF, RawFrmDst, (outs), (ins dstidx64:$dst),
1551 "scasq\t{$dst, %rax|rax, $dst}", []>,
1552 Requires<[In64BitMode]>;
1554 let Defs = [EDI,ESI,EFLAGS], Uses = [EDI,ESI,DF] in {
1555 def CMPSB : I<0xA6, RawFrmDstSrc, (outs), (ins dstidx8:$dst, srcidx8:$src),
1556 "cmpsb\t{$dst, $src|$src, $dst}", []>;
1557 def CMPSW : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx16:$dst, srcidx16:$src),
1558 "cmpsw\t{$dst, $src|$src, $dst}", []>, OpSize16;
1559 def CMPSL : I<0xA7, RawFrmDstSrc, (outs), (ins dstidx32:$dst, srcidx32:$src),
1560 "cmps{l|d}\t{$dst, $src|$src, $dst}", []>, OpSize32;
1561 def CMPSQ : RI<0xA7, RawFrmDstSrc, (outs), (ins dstidx64:$dst, srcidx64:$src),
1562 "cmpsq\t{$dst, $src|$src, $dst}", []>,
1563 Requires<[In64BitMode]>;
1567 //===----------------------------------------------------------------------===//
1568 // Move Instructions.
1570 let SchedRW = [WriteMove] in {
1571 let hasSideEffects = 0, isMoveReg = 1 in {
1572 def MOV8rr : I<0x88, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src),
1573 "mov{b}\t{$src, $dst|$dst, $src}", []>;
1574 def MOV16rr : I<0x89, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
1575 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16;
1576 def MOV32rr : I<0x89, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
1577 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32;
1578 def MOV64rr : RI<0x89, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
1579 "mov{q}\t{$src, $dst|$dst, $src}", []>;
1582 let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
1583 def MOV8ri : Ii8 <0xB0, AddRegFrm, (outs GR8 :$dst), (ins i8imm :$src),
1584 "mov{b}\t{$src, $dst|$dst, $src}",
1585 [(set GR8:$dst, imm:$src)]>;
1586 def MOV16ri : Ii16<0xB8, AddRegFrm, (outs GR16:$dst), (ins i16imm:$src),
1587 "mov{w}\t{$src, $dst|$dst, $src}",
1588 [(set GR16:$dst, imm:$src)]>, OpSize16;
1589 def MOV32ri : Ii32<0xB8, AddRegFrm, (outs GR32:$dst), (ins i32imm:$src),
1590 "mov{l}\t{$src, $dst|$dst, $src}",
1591 [(set GR32:$dst, imm:$src)]>, OpSize32;
1592 def MOV64ri32 : RIi32S<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
1593 "mov{q}\t{$src, $dst|$dst, $src}",
1594 [(set GR64:$dst, i64immSExt32:$src)]>;
1596 let isReMaterializable = 1, isMoveImm = 1 in {
1597 def MOV64ri : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
1598 "movabs{q}\t{$src, $dst|$dst, $src}",
1599 [(set GR64:$dst, imm:$src)]>;
1602 // Longer forms that use a ModR/M byte. Needed for disassembler
1603 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
1604 def MOV8ri_alt : Ii8 <0xC6, MRM0r, (outs GR8 :$dst), (ins i8imm :$src),
1605 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1606 FoldGenData<"MOV8ri">;
1607 def MOV16ri_alt : Ii16<0xC7, MRM0r, (outs GR16:$dst), (ins i16imm:$src),
1608 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1609 FoldGenData<"MOV16ri">;
1610 def MOV32ri_alt : Ii32<0xC7, MRM0r, (outs GR32:$dst), (ins i32imm:$src),
1611 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1612 FoldGenData<"MOV32ri">;
1616 let SchedRW = [WriteStore] in {
1617 def MOV8mi : Ii8 <0xC6, MRM0m, (outs), (ins i8mem :$dst, i8imm :$src),
1618 "mov{b}\t{$src, $dst|$dst, $src}",
1619 [(store (i8 imm_su:$src), addr:$dst)]>;
1620 def MOV16mi : Ii16<0xC7, MRM0m, (outs), (ins i16mem:$dst, i16imm:$src),
1621 "mov{w}\t{$src, $dst|$dst, $src}",
1622 [(store (i16 imm_su:$src), addr:$dst)]>, OpSize16;
1623 def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
1624 "mov{l}\t{$src, $dst|$dst, $src}",
1625 [(store (i32 imm_su:$src), addr:$dst)]>, OpSize32;
1626 def MOV64mi32 : RIi32S<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
1627 "mov{q}\t{$src, $dst|$dst, $src}",
1628 [(store i64immSExt32_su:$src, addr:$dst)]>,
1629 Requires<[In64BitMode]>;
1632 def : Pat<(i32 relocImm:$src), (MOV32ri relocImm:$src)>;
1633 def : Pat<(i64 relocImm:$src), (MOV64ri relocImm:$src)>;
1635 def : Pat<(store (i8 relocImm8_su:$src), addr:$dst),
1636 (MOV8mi addr:$dst, relocImm8_su:$src)>;
1637 def : Pat<(store (i16 relocImm16_su:$src), addr:$dst),
1638 (MOV16mi addr:$dst, relocImm16_su:$src)>;
1639 def : Pat<(store (i32 relocImm32_su:$src), addr:$dst),
1640 (MOV32mi addr:$dst, relocImm32_su:$src)>;
1641 def : Pat<(store (i64 i64relocImmSExt32_su:$src), addr:$dst),
1642 (MOV64mi32 addr:$dst, i64immSExt32_su:$src)>;
1644 let hasSideEffects = 0 in {
1646 /// Memory offset versions of moves. The immediate is an address mode sized
1647 /// offset from the segment base.
1648 let SchedRW = [WriteALU] in {
1649 let mayLoad = 1 in {
1651 def MOV8ao32 : Ii32<0xA0, RawFrmMemOffs, (outs), (ins offset32_8:$src),
1652 "mov{b}\t{$src, %al|al, $src}", []>,
1655 def MOV16ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_16:$src),
1656 "mov{w}\t{$src, %ax|ax, $src}", []>,
1659 def MOV32ao32 : Ii32<0xA1, RawFrmMemOffs, (outs), (ins offset32_32:$src),
1660 "mov{l}\t{$src, %eax|eax, $src}", []>,
1663 def MOV64ao32 : RIi32<0xA1, RawFrmMemOffs, (outs), (ins offset32_64:$src),
1664 "mov{q}\t{$src, %rax|rax, $src}", []>,
1668 def MOV8ao16 : Ii16<0xA0, RawFrmMemOffs, (outs), (ins offset16_8:$src),
1669 "mov{b}\t{$src, %al|al, $src}", []>, AdSize16;
1671 def MOV16ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_16:$src),
1672 "mov{w}\t{$src, %ax|ax, $src}", []>,
1675 def MOV32ao16 : Ii16<0xA1, RawFrmMemOffs, (outs), (ins offset16_32:$src),
1676 "mov{l}\t{$src, %eax|eax, $src}", []>,
1679 let mayStore = 1 in {
1681 def MOV8o32a : Ii32<0xA2, RawFrmMemOffs, (outs), (ins offset32_8:$dst),
1682 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize32;
1684 def MOV16o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_16:$dst),
1685 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1688 def MOV32o32a : Ii32<0xA3, RawFrmMemOffs, (outs), (ins offset32_32:$dst),
1689 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1692 def MOV64o32a : RIi32<0xA3, RawFrmMemOffs, (outs), (ins offset32_64:$dst),
1693 "mov{q}\t{%rax, $dst|$dst, rax}", []>,
1697 def MOV8o16a : Ii16<0xA2, RawFrmMemOffs, (outs), (ins offset16_8:$dst),
1698 "mov{b}\t{%al, $dst|$dst, al}", []>, AdSize16;
1700 def MOV16o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_16:$dst),
1701 "mov{w}\t{%ax, $dst|$dst, ax}", []>,
1704 def MOV32o16a : Ii16<0xA3, RawFrmMemOffs, (outs), (ins offset16_32:$dst),
1705 "mov{l}\t{%eax, $dst|$dst, eax}", []>,
1709 // These forms all have full 64-bit absolute addresses in their instructions
1710 // and use the movabs mnemonic to indicate this specific form.
1711 let mayLoad = 1 in {
1713 def MOV8ao64 : Ii64<0xA0, RawFrmMemOffs, (outs), (ins offset64_8:$src),
1714 "movabs{b}\t{$src, %al|al, $src}", []>,
1717 def MOV16ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_16:$src),
1718 "movabs{w}\t{$src, %ax|ax, $src}", []>,
1721 def MOV32ao64 : Ii64<0xA1, RawFrmMemOffs, (outs), (ins offset64_32:$src),
1722 "movabs{l}\t{$src, %eax|eax, $src}", []>,
1725 def MOV64ao64 : RIi64<0xA1, RawFrmMemOffs, (outs), (ins offset64_64:$src),
1726 "movabs{q}\t{$src, %rax|rax, $src}", []>,
1730 let mayStore = 1 in {
1732 def MOV8o64a : Ii64<0xA2, RawFrmMemOffs, (outs), (ins offset64_8:$dst),
1733 "movabs{b}\t{%al, $dst|$dst, al}", []>,
1736 def MOV16o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_16:$dst),
1737 "movabs{w}\t{%ax, $dst|$dst, ax}", []>,
1740 def MOV32o64a : Ii64<0xA3, RawFrmMemOffs, (outs), (ins offset64_32:$dst),
1741 "movabs{l}\t{%eax, $dst|$dst, eax}", []>,
1744 def MOV64o64a : RIi64<0xA3, RawFrmMemOffs, (outs), (ins offset64_64:$dst),
1745 "movabs{q}\t{%rax, $dst|$dst, rax}", []>,
1749 } // hasSideEffects = 0
1751 let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
1752 SchedRW = [WriteMove], isMoveReg = 1 in {
1753 def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
1754 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1755 FoldGenData<"MOV8rr">;
1756 def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
1757 "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize16,
1758 FoldGenData<"MOV16rr">;
1759 def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
1760 "mov{l}\t{$src, $dst|$dst, $src}", []>, OpSize32,
1761 FoldGenData<"MOV32rr">;
1762 def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
1763 "mov{q}\t{$src, $dst|$dst, $src}", []>,
1764 FoldGenData<"MOV64rr">;
1767 // Reversed version with ".s" suffix for GAS compatibility.
1768 def : InstAlias<"mov{b}.s\t{$src, $dst|$dst, $src}",
1769 (MOV8rr_REV GR8:$dst, GR8:$src), 0>;
1770 def : InstAlias<"mov{w}.s\t{$src, $dst|$dst, $src}",
1771 (MOV16rr_REV GR16:$dst, GR16:$src), 0>;
1772 def : InstAlias<"mov{l}.s\t{$src, $dst|$dst, $src}",
1773 (MOV32rr_REV GR32:$dst, GR32:$src), 0>;
1774 def : InstAlias<"mov{q}.s\t{$src, $dst|$dst, $src}",
1775 (MOV64rr_REV GR64:$dst, GR64:$src), 0>;
1776 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1777 (MOV8rr_REV GR8:$dst, GR8:$src), 0, "att">;
1778 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1779 (MOV16rr_REV GR16:$dst, GR16:$src), 0, "att">;
1780 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1781 (MOV32rr_REV GR32:$dst, GR32:$src), 0, "att">;
1782 def : InstAlias<"mov.s\t{$src, $dst|$dst, $src}",
1783 (MOV64rr_REV GR64:$dst, GR64:$src), 0, "att">;
1785 let canFoldAsLoad = 1, isReMaterializable = 1, SchedRW = [WriteLoad] in {
1786 def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
1787 "mov{b}\t{$src, $dst|$dst, $src}",
1788 [(set GR8:$dst, (loadi8 addr:$src))]>;
1789 def MOV16rm : I<0x8B, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
1790 "mov{w}\t{$src, $dst|$dst, $src}",
1791 [(set GR16:$dst, (loadi16 addr:$src))]>, OpSize16;
1792 def MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
1793 "mov{l}\t{$src, $dst|$dst, $src}",
1794 [(set GR32:$dst, (loadi32 addr:$src))]>, OpSize32;
1795 def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
1796 "mov{q}\t{$src, $dst|$dst, $src}",
1797 [(set GR64:$dst, (load addr:$src))]>;
1800 let SchedRW = [WriteStore] in {
1801 def MOV8mr : I<0x88, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src),
1802 "mov{b}\t{$src, $dst|$dst, $src}",
1803 [(store GR8:$src, addr:$dst)]>;
1804 def MOV16mr : I<0x89, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
1805 "mov{w}\t{$src, $dst|$dst, $src}",
1806 [(store GR16:$src, addr:$dst)]>, OpSize16;
1807 def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1808 "mov{l}\t{$src, $dst|$dst, $src}",
1809 [(store GR32:$src, addr:$dst)]>, OpSize32;
1810 def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1811 "mov{q}\t{$src, $dst|$dst, $src}",
1812 [(store GR64:$src, addr:$dst)]>;
1815 // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
1816 // that they can be used for copying and storing h registers, which can't be
1817 // encoded when a REX prefix is present.
1818 let isCodeGenOnly = 1 in {
1819 let hasSideEffects = 0, isMoveReg = 1 in
1820 def MOV8rr_NOREX : I<0x88, MRMDestReg,
1821 (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
1822 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1824 let mayStore = 1, hasSideEffects = 0 in
1825 def MOV8mr_NOREX : I<0x88, MRMDestMem,
1826 (outs), (ins i8mem_NOREX:$dst, GR8_NOREX:$src),
1827 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1828 Sched<[WriteStore]>;
1829 let mayLoad = 1, hasSideEffects = 0,
1830 canFoldAsLoad = 1, isReMaterializable = 1 in
1831 def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
1832 (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
1833 "mov{b}\t{$src, $dst|$dst, $src}", []>,
1838 // Condition code ops, incl. set if equal/not equal/...
1839 let SchedRW = [WriteLAHFSAHF] in {
1840 let Defs = [EFLAGS], Uses = [AH], hasSideEffects = 0 in
1841 def SAHF : I<0x9E, RawFrm, (outs), (ins), "sahf", []>, // flags = AH
1842 Requires<[HasLAHFSAHF]>;
1843 let Defs = [AH], Uses = [EFLAGS], hasSideEffects = 0 in
1844 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>, // AH = flags
1845 Requires<[HasLAHFSAHF]>;
1848 //===----------------------------------------------------------------------===//
1849 // Bit tests instructions: BT, BTS, BTR, BTC.
1851 let Defs = [EFLAGS] in {
1852 let SchedRW = [WriteBitTest] in {
1853 def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
1854 "bt{w}\t{$src2, $src1|$src1, $src2}",
1855 [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>,
1856 OpSize16, TB, NotMemoryFoldable;
1857 def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
1858 "bt{l}\t{$src2, $src1|$src1, $src2}",
1859 [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>,
1860 OpSize32, TB, NotMemoryFoldable;
1861 def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
1862 "bt{q}\t{$src2, $src1|$src1, $src2}",
1863 [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB,
1867 // Unlike with the register+register form, the memory+register form of the
1868 // bt instruction does not ignore the high bits of the index. From ISel's
1869 // perspective, this is pretty bizarre. Make these instructions disassembly
1870 // only for now. These instructions are also slow on modern CPUs so that's
1871 // another reason to avoid generating them.
1873 let mayLoad = 1, hasSideEffects = 0, SchedRW = [WriteBitTestRegLd] in {
1874 def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1875 "bt{w}\t{$src2, $src1|$src1, $src2}",
1876 []>, OpSize16, TB, NotMemoryFoldable;
1877 def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1878 "bt{l}\t{$src2, $src1|$src1, $src2}",
1879 []>, OpSize32, TB, NotMemoryFoldable;
1880 def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1881 "bt{q}\t{$src2, $src1|$src1, $src2}",
1882 []>, TB, NotMemoryFoldable;
1885 let SchedRW = [WriteBitTest] in {
1886 def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16u8imm:$src2),
1887 "bt{w}\t{$src2, $src1|$src1, $src2}",
1888 [(set EFLAGS, (X86bt GR16:$src1, imm:$src2))]>,
1890 def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32u8imm:$src2),
1891 "bt{l}\t{$src2, $src1|$src1, $src2}",
1892 [(set EFLAGS, (X86bt GR32:$src1, imm:$src2))]>,
1894 def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64u8imm:$src2),
1895 "bt{q}\t{$src2, $src1|$src1, $src2}",
1896 [(set EFLAGS, (X86bt GR64:$src1, imm:$src2))]>, TB;
1899 // Note that these instructions aren't slow because that only applies when the
1900 // other operand is in a register. When it's an immediate, bt is still fast.
1901 let SchedRW = [WriteBitTestImmLd] in {
1902 def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
1903 "bt{w}\t{$src2, $src1|$src1, $src2}",
1904 [(set EFLAGS, (X86bt (loadi16 addr:$src1),
1907 def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
1908 "bt{l}\t{$src2, $src1|$src1, $src2}",
1909 [(set EFLAGS, (X86bt (loadi32 addr:$src1),
1912 def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
1913 "bt{q}\t{$src2, $src1|$src1, $src2}",
1914 [(set EFLAGS, (X86bt (loadi64 addr:$src1),
1916 Requires<[In64BitMode]>;
1919 let hasSideEffects = 0 in {
1920 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1921 def BTC16rr : I<0xBB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1922 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1923 OpSize16, TB, NotMemoryFoldable;
1924 def BTC32rr : I<0xBB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1925 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1926 OpSize32, TB, NotMemoryFoldable;
1927 def BTC64rr : RI<0xBB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1928 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1932 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
1933 def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1934 "btc{w}\t{$src2, $src1|$src1, $src2}", []>,
1935 OpSize16, TB, NotMemoryFoldable;
1936 def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1937 "btc{l}\t{$src2, $src1|$src1, $src2}", []>,
1938 OpSize32, TB, NotMemoryFoldable;
1939 def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1940 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1944 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1945 def BTC16ri8 : Ii8<0xBA, MRM7r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
1946 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1947 def BTC32ri8 : Ii8<0xBA, MRM7r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
1948 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1949 def BTC64ri8 : RIi8<0xBA, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
1950 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1953 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
1954 def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
1955 "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
1956 def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
1957 "btc{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
1958 def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
1959 "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1960 Requires<[In64BitMode]>;
1963 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1964 def BTR16rr : I<0xB3, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1965 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1966 OpSize16, TB, NotMemoryFoldable;
1967 def BTR32rr : I<0xB3, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1968 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1969 OpSize32, TB, NotMemoryFoldable;
1970 def BTR64rr : RI<0xB3, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1971 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1975 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
1976 def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
1977 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1978 OpSize16, TB, NotMemoryFoldable;
1979 def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
1980 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1981 OpSize32, TB, NotMemoryFoldable;
1982 def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
1983 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
1987 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
1988 def BTR16ri8 : Ii8<0xBA, MRM6r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
1989 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
1991 def BTR32ri8 : Ii8<0xBA, MRM6r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
1992 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
1994 def BTR64ri8 : RIi8<0xBA, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
1995 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
1998 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
1999 def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
2000 "btr{w}\t{$src2, $src1|$src1, $src2}", []>,
2002 def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
2003 "btr{l}\t{$src2, $src1|$src1, $src2}", []>,
2005 def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
2006 "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2007 Requires<[In64BitMode]>;
2010 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2011 def BTS16rr : I<0xAB, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
2012 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
2013 OpSize16, TB, NotMemoryFoldable;
2014 def BTS32rr : I<0xAB, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
2015 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
2016 OpSize32, TB, NotMemoryFoldable;
2017 def BTS64rr : RI<0xAB, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
2018 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2022 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetRegRMW] in {
2023 def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
2024 "bts{w}\t{$src2, $src1|$src1, $src2}", []>,
2025 OpSize16, TB, NotMemoryFoldable;
2026 def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
2027 "bts{l}\t{$src2, $src1|$src1, $src2}", []>,
2028 OpSize32, TB, NotMemoryFoldable;
2029 def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
2030 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2034 let SchedRW = [WriteBitTestSet], Constraints = "$src1 = $dst" in {
2035 def BTS16ri8 : Ii8<0xBA, MRM5r, (outs GR16:$dst), (ins GR16:$src1, i16u8imm:$src2),
2036 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
2037 def BTS32ri8 : Ii8<0xBA, MRM5r, (outs GR32:$dst), (ins GR32:$src1, i32u8imm:$src2),
2038 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
2039 def BTS64ri8 : RIi8<0xBA, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i64u8imm:$src2),
2040 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2043 let mayLoad = 1, mayStore = 1, SchedRW = [WriteBitTestSetImmRMW] in {
2044 def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16u8imm:$src2),
2045 "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16, TB;
2046 def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32u8imm:$src2),
2047 "bts{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32, TB;
2048 def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64u8imm:$src2),
2049 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB,
2050 Requires<[In64BitMode]>;
2052 } // hasSideEffects = 0
2053 } // Defs = [EFLAGS]
2056 //===----------------------------------------------------------------------===//
2060 // Atomic swap. These are just normal xchg instructions. But since a memory
2061 // operand is referenced, the atomicity is ensured.
2062 multiclass ATOMIC_SWAP<bits<8> opc8, bits<8> opc, string mnemonic, string frag> {
2063 let Constraints = "$val = $dst", SchedRW = [WriteALULd, WriteRMW] in {
2064 def NAME#8rm : I<opc8, MRMSrcMem, (outs GR8:$dst),
2065 (ins GR8:$val, i8mem:$ptr),
2066 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
2069 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
2070 def NAME#16rm : I<opc, MRMSrcMem, (outs GR16:$dst),
2071 (ins GR16:$val, i16mem:$ptr),
2072 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
2075 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
2077 def NAME#32rm : I<opc, MRMSrcMem, (outs GR32:$dst),
2078 (ins GR32:$val, i32mem:$ptr),
2079 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
2082 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
2084 def NAME#64rm : RI<opc, MRMSrcMem, (outs GR64:$dst),
2085 (ins GR64:$val, i64mem:$ptr),
2086 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
2089 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
2093 defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">, NotMemoryFoldable;
2095 // Swap between registers.
2096 let SchedRW = [WriteXCHG] in {
2097 let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in {
2098 def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2),
2099 (ins GR8:$src1, GR8:$src2),
2100 "xchg{b}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2101 def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2),
2102 (ins GR16:$src1, GR16:$src2),
2103 "xchg{w}\t{$src2, $src1|$src1, $src2}", []>,
2104 OpSize16, NotMemoryFoldable;
2105 def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2),
2106 (ins GR32:$src1, GR32:$src2),
2107 "xchg{l}\t{$src2, $src1|$src1, $src2}", []>,
2108 OpSize32, NotMemoryFoldable;
2109 def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2),
2110 (ins GR64:$src1 ,GR64:$src2),
2111 "xchg{q}\t{$src2, $src1|$src1, $src2}", []>, NotMemoryFoldable;
2114 // Swap between EAX and other registers.
2115 let Constraints = "$src = $dst", hasSideEffects = 0 in {
2116 let Uses = [AX], Defs = [AX] in
2117 def XCHG16ar : I<0x90, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
2118 "xchg{w}\t{$src, %ax|ax, $src}", []>, OpSize16;
2119 let Uses = [EAX], Defs = [EAX] in
2120 def XCHG32ar : I<0x90, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
2121 "xchg{l}\t{$src, %eax|eax, $src}", []>, OpSize32;
2122 let Uses = [RAX], Defs = [RAX] in
2123 def XCHG64ar : RI<0x90, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
2124 "xchg{q}\t{$src, %rax|rax, $src}", []>;
2128 let hasSideEffects = 0, Constraints = "$src1 = $dst1, $src2 = $dst2",
2129 Defs = [EFLAGS], SchedRW = [WriteXCHG] in {
2130 def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst1, GR8:$dst2),
2131 (ins GR8:$src1, GR8:$src2),
2132 "xadd{b}\t{$src2, $src1|$src1, $src2}", []>, TB;
2133 def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst1, GR16:$dst2),
2134 (ins GR16:$src1, GR16:$src2),
2135 "xadd{w}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize16;
2136 def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst1, GR32:$dst2),
2137 (ins GR32:$src1, GR32:$src2),
2138 "xadd{l}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize32;
2139 def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst1, GR64:$dst2),
2140 (ins GR64:$src1, GR64:$src2),
2141 "xadd{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
2144 let mayLoad = 1, mayStore = 1, hasSideEffects = 0, Constraints = "$val = $dst",
2145 Defs = [EFLAGS], SchedRW = [WriteALULd, WriteRMW] in {
2146 def XADD8rm : I<0xC0, MRMSrcMem, (outs GR8:$dst),
2147 (ins GR8:$val, i8mem:$ptr),
2148 "xadd{b}\t{$val, $ptr|$ptr, $val}", []>, TB;
2149 def XADD16rm : I<0xC1, MRMSrcMem, (outs GR16:$dst),
2150 (ins GR16:$val, i16mem:$ptr),
2151 "xadd{w}\t{$val, $ptr|$ptr, $val}", []>, TB,
2153 def XADD32rm : I<0xC1, MRMSrcMem, (outs GR32:$dst),
2154 (ins GR32:$val, i32mem:$ptr),
2155 "xadd{l}\t{$val, $ptr|$ptr, $val}", []>, TB,
2157 def XADD64rm : RI<0xC1, MRMSrcMem, (outs GR64:$dst),
2158 (ins GR64:$val, i64mem:$ptr),
2159 "xadd{q}\t{$val, $ptr|$ptr, $val}", []>, TB;
2163 let SchedRW = [WriteCMPXCHG], hasSideEffects = 0 in {
2164 let Defs = [AL, EFLAGS], Uses = [AL] in
2165 def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
2166 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2168 let Defs = [AX, EFLAGS], Uses = [AX] in
2169 def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2170 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2172 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2173 def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
2174 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2176 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2177 def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
2178 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2180 } // SchedRW, hasSideEffects
2182 let SchedRW = [WriteCMPXCHGRMW], mayLoad = 1, mayStore = 1,
2183 hasSideEffects = 0 in {
2184 let Defs = [AL, EFLAGS], Uses = [AL] in
2185 def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
2186 "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB,
2188 let Defs = [AX, EFLAGS], Uses = [AX] in
2189 def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2190 "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize16,
2192 let Defs = [EAX, EFLAGS], Uses = [EAX] in
2193 def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2194 "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB, OpSize32,
2196 let Defs = [RAX, EFLAGS], Uses = [RAX] in
2197 def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2198 "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB,
2201 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
2202 def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
2203 "cmpxchg8b\t$dst", []>, TB, Requires<[HasCmpxchg8b]>;
2205 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX] in
2206 // NOTE: In64BitMode check needed for the AssemblerPredicate.
2207 def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst),
2208 "cmpxchg16b\t$dst", []>,
2209 TB, Requires<[HasCmpxchg16b,In64BitMode]>;
2210 } // SchedRW, mayLoad, mayStore, hasSideEffects
2213 // Lock instruction prefix
2214 let SchedRW = [WriteMicrocoded] in
2215 def LOCK_PREFIX : I<0xF0, PrefixByte, (outs), (ins), "lock", []>;
2217 let SchedRW = [WriteNop] in {
2219 // Rex64 instruction prefix
2220 def REX64_PREFIX : I<0x48, PrefixByte, (outs), (ins), "rex64", []>,
2221 Requires<[In64BitMode]>;
2223 // Data16 instruction prefix
2224 def DATA16_PREFIX : I<0x66, PrefixByte, (outs), (ins), "data16", []>;
2227 // Repeat string operation instruction prefixes
2228 let Defs = [ECX], Uses = [ECX,DF], SchedRW = [WriteMicrocoded] in {
2229 // Repeat (used with INS, OUTS, MOVS, LODS and STOS)
2230 def REP_PREFIX : I<0xF3, PrefixByte, (outs), (ins), "rep", []>;
2231 // Repeat while not equal (used with CMPS and SCAS)
2232 def REPNE_PREFIX : I<0xF2, PrefixByte, (outs), (ins), "repne", []>;
2235 // String manipulation instructions
2236 let SchedRW = [WriteMicrocoded] in {
2237 let Defs = [AL,ESI], Uses = [ESI,DF] in
2238 def LODSB : I<0xAC, RawFrmSrc, (outs), (ins srcidx8:$src),
2239 "lodsb\t{$src, %al|al, $src}", []>;
2240 let Defs = [AX,ESI], Uses = [ESI,DF] in
2241 def LODSW : I<0xAD, RawFrmSrc, (outs), (ins srcidx16:$src),
2242 "lodsw\t{$src, %ax|ax, $src}", []>, OpSize16;
2243 let Defs = [EAX,ESI], Uses = [ESI,DF] in
2244 def LODSL : I<0xAD, RawFrmSrc, (outs), (ins srcidx32:$src),
2245 "lods{l|d}\t{$src, %eax|eax, $src}", []>, OpSize32;
2246 let Defs = [RAX,ESI], Uses = [ESI,DF] in
2247 def LODSQ : RI<0xAD, RawFrmSrc, (outs), (ins srcidx64:$src),
2248 "lodsq\t{$src, %rax|rax, $src}", []>,
2249 Requires<[In64BitMode]>;
2252 let SchedRW = [WriteSystem] in {
2253 let Defs = [ESI], Uses = [DX,ESI,DF] in {
2254 def OUTSB : I<0x6E, RawFrmSrc, (outs), (ins srcidx8:$src),
2255 "outsb\t{$src, %dx|dx, $src}", []>;
2256 def OUTSW : I<0x6F, RawFrmSrc, (outs), (ins srcidx16:$src),
2257 "outsw\t{$src, %dx|dx, $src}", []>, OpSize16;
2258 def OUTSL : I<0x6F, RawFrmSrc, (outs), (ins srcidx32:$src),
2259 "outs{l|d}\t{$src, %dx|dx, $src}", []>, OpSize32;
2262 let Defs = [EDI], Uses = [DX,EDI,DF] in {
2263 def INSB : I<0x6C, RawFrmDst, (outs), (ins dstidx8:$dst),
2264 "insb\t{%dx, $dst|$dst, dx}", []>;
2265 def INSW : I<0x6D, RawFrmDst, (outs), (ins dstidx16:$dst),
2266 "insw\t{%dx, $dst|$dst, dx}", []>, OpSize16;
2267 def INSL : I<0x6D, RawFrmDst, (outs), (ins dstidx32:$dst),
2268 "ins{l|d}\t{%dx, $dst|$dst, dx}", []>, OpSize32;
2272 // EFLAGS management instructions.
2273 let SchedRW = [WriteALU], Defs = [EFLAGS], Uses = [EFLAGS] in {
2274 def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>;
2275 def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>;
2276 def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>;
2279 // DF management instructions.
2280 let SchedRW = [WriteALU], Defs = [DF] in {
2281 def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>;
2282 def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>;
2285 // Table lookup instructions
2286 let Uses = [AL,EBX], Defs = [AL], hasSideEffects = 0, mayLoad = 1 in
2287 def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>, Sched<[WriteLoad]>;
2289 let SchedRW = [WriteMicrocoded] in {
2290 // ASCII Adjust After Addition
2291 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2292 def AAA : I<0x37, RawFrm, (outs), (ins), "aaa", []>,
2293 Requires<[Not64BitMode]>;
2295 // ASCII Adjust AX Before Division
2296 let Uses = [AX], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2297 def AAD8i8 : Ii8<0xD5, RawFrm, (outs), (ins i8imm:$src),
2298 "aad\t$src", []>, Requires<[Not64BitMode]>;
2300 // ASCII Adjust AX After Multiply
2301 let Uses = [AL], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2302 def AAM8i8 : Ii8<0xD4, RawFrm, (outs), (ins i8imm:$src),
2303 "aam\t$src", []>, Requires<[Not64BitMode]>;
2305 // ASCII Adjust AL After Subtraction - sets
2306 let Uses = [AL,EFLAGS], Defs = [AX,EFLAGS], hasSideEffects = 0 in
2307 def AAS : I<0x3F, RawFrm, (outs), (ins), "aas", []>,
2308 Requires<[Not64BitMode]>;
2310 // Decimal Adjust AL after Addition
2311 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2312 def DAA : I<0x27, RawFrm, (outs), (ins), "daa", []>,
2313 Requires<[Not64BitMode]>;
2315 // Decimal Adjust AL after Subtraction
2316 let Uses = [AL,EFLAGS], Defs = [AL,EFLAGS], hasSideEffects = 0 in
2317 def DAS : I<0x2F, RawFrm, (outs), (ins), "das", []>,
2318 Requires<[Not64BitMode]>;
2321 let SchedRW = [WriteSystem] in {
2322 // Check Array Index Against Bounds
2323 // Note: "bound" does not have reversed operands in at&t syntax.
2324 def BOUNDS16rm : I<0x62, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2325 "bound\t$dst, $src", []>, OpSize16,
2326 Requires<[Not64BitMode]>;
2327 def BOUNDS32rm : I<0x62, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2328 "bound\t$dst, $src", []>, OpSize32,
2329 Requires<[Not64BitMode]>;
2331 // Adjust RPL Field of Segment Selector
2332 def ARPL16rr : I<0x63, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
2333 "arpl\t{$src, $dst|$dst, $src}", []>,
2334 Requires<[Not64BitMode]>, NotMemoryFoldable;
2336 def ARPL16mr : I<0x63, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2337 "arpl\t{$src, $dst|$dst, $src}", []>,
2338 Requires<[Not64BitMode]>, NotMemoryFoldable;
2341 //===----------------------------------------------------------------------===//
2342 // MOVBE Instructions
2344 let Predicates = [HasMOVBE] in {
2345 let SchedRW = [WriteALULd] in {
2346 def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2347 "movbe{w}\t{$src, $dst|$dst, $src}",
2348 [(set GR16:$dst, (bswap (loadi16 addr:$src)))]>,
2350 def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2351 "movbe{l}\t{$src, $dst|$dst, $src}",
2352 [(set GR32:$dst, (bswap (loadi32 addr:$src)))]>,
2354 def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2355 "movbe{q}\t{$src, $dst|$dst, $src}",
2356 [(set GR64:$dst, (bswap (loadi64 addr:$src)))]>,
2359 let SchedRW = [WriteStore] in {
2360 def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
2361 "movbe{w}\t{$src, $dst|$dst, $src}",
2362 [(store (bswap GR16:$src), addr:$dst)]>,
2364 def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2365 "movbe{l}\t{$src, $dst|$dst, $src}",
2366 [(store (bswap GR32:$src), addr:$dst)]>,
2368 def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2369 "movbe{q}\t{$src, $dst|$dst, $src}",
2370 [(store (bswap GR64:$src), addr:$dst)]>,
2375 //===----------------------------------------------------------------------===//
2376 // RDRAND Instruction
2378 let Predicates = [HasRDRAND], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2379 def RDRAND16r : I<0xC7, MRM6r, (outs GR16:$dst), (ins),
2380 "rdrand{w}\t$dst", [(set GR16:$dst, EFLAGS, (X86rdrand))]>,
2382 def RDRAND32r : I<0xC7, MRM6r, (outs GR32:$dst), (ins),
2383 "rdrand{l}\t$dst", [(set GR32:$dst, EFLAGS, (X86rdrand))]>,
2385 def RDRAND64r : RI<0xC7, MRM6r, (outs GR64:$dst), (ins),
2386 "rdrand{q}\t$dst", [(set GR64:$dst, EFLAGS, (X86rdrand))]>,
2390 //===----------------------------------------------------------------------===//
2391 // RDSEED Instruction
2393 let Predicates = [HasRDSEED], Defs = [EFLAGS], SchedRW = [WriteSystem] in {
2394 def RDSEED16r : I<0xC7, MRM7r, (outs GR16:$dst), (ins), "rdseed{w}\t$dst",
2395 [(set GR16:$dst, EFLAGS, (X86rdseed))]>, OpSize16, PS;
2396 def RDSEED32r : I<0xC7, MRM7r, (outs GR32:$dst), (ins), "rdseed{l}\t$dst",
2397 [(set GR32:$dst, EFLAGS, (X86rdseed))]>, OpSize32, PS;
2398 def RDSEED64r : RI<0xC7, MRM7r, (outs GR64:$dst), (ins), "rdseed{q}\t$dst",
2399 [(set GR64:$dst, EFLAGS, (X86rdseed))]>, PS;
2402 //===----------------------------------------------------------------------===//
2403 // LZCNT Instruction
2405 let Predicates = [HasLZCNT], Defs = [EFLAGS] in {
2406 def LZCNT16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2407 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2408 [(set GR16:$dst, (ctlz GR16:$src)), (implicit EFLAGS)]>,
2409 XS, OpSize16, Sched<[WriteLZCNT]>;
2410 def LZCNT16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2411 "lzcnt{w}\t{$src, $dst|$dst, $src}",
2412 [(set GR16:$dst, (ctlz (loadi16 addr:$src))),
2413 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteLZCNTLd]>;
2415 def LZCNT32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2416 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2417 [(set GR32:$dst, (ctlz GR32:$src)), (implicit EFLAGS)]>,
2418 XS, OpSize32, Sched<[WriteLZCNT]>;
2419 def LZCNT32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2420 "lzcnt{l}\t{$src, $dst|$dst, $src}",
2421 [(set GR32:$dst, (ctlz (loadi32 addr:$src))),
2422 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteLZCNTLd]>;
2424 def LZCNT64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2425 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2426 [(set GR64:$dst, (ctlz GR64:$src)), (implicit EFLAGS)]>,
2427 XS, Sched<[WriteLZCNT]>;
2428 def LZCNT64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2429 "lzcnt{q}\t{$src, $dst|$dst, $src}",
2430 [(set GR64:$dst, (ctlz (loadi64 addr:$src))),
2431 (implicit EFLAGS)]>, XS, Sched<[WriteLZCNTLd]>;
2434 //===----------------------------------------------------------------------===//
2437 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2438 def TZCNT16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
2439 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2440 [(set GR16:$dst, (cttz GR16:$src)), (implicit EFLAGS)]>,
2441 XS, OpSize16, Sched<[WriteTZCNT]>;
2442 def TZCNT16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
2443 "tzcnt{w}\t{$src, $dst|$dst, $src}",
2444 [(set GR16:$dst, (cttz (loadi16 addr:$src))),
2445 (implicit EFLAGS)]>, XS, OpSize16, Sched<[WriteTZCNTLd]>;
2447 def TZCNT32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
2448 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2449 [(set GR32:$dst, (cttz GR32:$src)), (implicit EFLAGS)]>,
2450 XS, OpSize32, Sched<[WriteTZCNT]>;
2451 def TZCNT32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
2452 "tzcnt{l}\t{$src, $dst|$dst, $src}",
2453 [(set GR32:$dst, (cttz (loadi32 addr:$src))),
2454 (implicit EFLAGS)]>, XS, OpSize32, Sched<[WriteTZCNTLd]>;
2456 def TZCNT64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
2457 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2458 [(set GR64:$dst, (cttz GR64:$src)), (implicit EFLAGS)]>,
2459 XS, Sched<[WriteTZCNT]>;
2460 def TZCNT64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
2461 "tzcnt{q}\t{$src, $dst|$dst, $src}",
2462 [(set GR64:$dst, (cttz (loadi64 addr:$src))),
2463 (implicit EFLAGS)]>, XS, Sched<[WriteTZCNTLd]>;
2466 multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
2467 RegisterClass RC, X86MemOperand x86memop,
2468 X86FoldableSchedWrite sched> {
2469 let hasSideEffects = 0 in {
2470 def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
2471 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2472 T8PS, VEX_4V, Sched<[sched]>;
2474 def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
2475 !strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"), []>,
2476 T8PS, VEX_4V, Sched<[sched.Folded]>;
2480 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2481 defm BLSR32 : bmi_bls<"blsr{l}", MRM1r, MRM1m, GR32, i32mem, WriteBLS>;
2482 defm BLSR64 : bmi_bls<"blsr{q}", MRM1r, MRM1m, GR64, i64mem, WriteBLS>, VEX_W;
2483 defm BLSMSK32 : bmi_bls<"blsmsk{l}", MRM2r, MRM2m, GR32, i32mem, WriteBLS>;
2484 defm BLSMSK64 : bmi_bls<"blsmsk{q}", MRM2r, MRM2m, GR64, i64mem, WriteBLS>, VEX_W;
2485 defm BLSI32 : bmi_bls<"blsi{l}", MRM3r, MRM3m, GR32, i32mem, WriteBLS>;
2486 defm BLSI64 : bmi_bls<"blsi{q}", MRM3r, MRM3m, GR64, i64mem, WriteBLS>, VEX_W;
2489 //===----------------------------------------------------------------------===//
2490 // Pattern fragments to auto generate BMI instructions.
2491 //===----------------------------------------------------------------------===//
2493 def or_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2494 (X86or_flag node:$lhs, node:$rhs), [{
2495 return hasNoCarryFlagUses(SDValue(N, 1));
2498 def xor_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2499 (X86xor_flag node:$lhs, node:$rhs), [{
2500 return hasNoCarryFlagUses(SDValue(N, 1));
2503 def and_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs),
2504 (X86and_flag node:$lhs, node:$rhs), [{
2505 return hasNoCarryFlagUses(SDValue(N, 1));
2508 let Predicates = [HasBMI] in {
2509 // FIXME: patterns for the load versions are not implemented
2510 def : Pat<(and GR32:$src, (add GR32:$src, -1)),
2511 (BLSR32rr GR32:$src)>;
2512 def : Pat<(and GR64:$src, (add GR64:$src, -1)),
2513 (BLSR64rr GR64:$src)>;
2515 def : Pat<(xor GR32:$src, (add GR32:$src, -1)),
2516 (BLSMSK32rr GR32:$src)>;
2517 def : Pat<(xor GR64:$src, (add GR64:$src, -1)),
2518 (BLSMSK64rr GR64:$src)>;
2520 def : Pat<(and GR32:$src, (ineg GR32:$src)),
2521 (BLSI32rr GR32:$src)>;
2522 def : Pat<(and GR64:$src, (ineg GR64:$src)),
2523 (BLSI64rr GR64:$src)>;
2525 // Versions to match flag producing ops.
2526 def : Pat<(and_flag_nocf GR32:$src, (add GR32:$src, -1)),
2527 (BLSR32rr GR32:$src)>;
2528 def : Pat<(and_flag_nocf GR64:$src, (add GR64:$src, -1)),
2529 (BLSR64rr GR64:$src)>;
2531 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, -1)),
2532 (BLSMSK32rr GR32:$src)>;
2533 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, -1)),
2534 (BLSMSK64rr GR64:$src)>;
2536 def : Pat<(and_flag_nocf GR32:$src, (ineg GR32:$src)),
2537 (BLSI32rr GR32:$src)>;
2538 def : Pat<(and_flag_nocf GR64:$src, (ineg GR64:$src)),
2539 (BLSI64rr GR64:$src)>;
2542 multiclass bmi_bextr<bits<8> opc, string mnemonic, RegisterClass RC,
2543 X86MemOperand x86memop, SDNode OpNode,
2544 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2545 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2546 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2547 [(set RC:$dst, (OpNode RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2548 T8PS, VEX, Sched<[Sched]>;
2549 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2550 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2551 [(set RC:$dst, (OpNode (ld_frag addr:$src1), RC:$src2)),
2552 (implicit EFLAGS)]>, T8PS, VEX,
2553 Sched<[Sched.Folded,
2555 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2558 Sched.ReadAfterFold]>;
2561 let Predicates = [HasBMI], Defs = [EFLAGS] in {
2562 defm BEXTR32 : bmi_bextr<0xF7, "bextr{l}", GR32, i32mem,
2563 X86bextr, loadi32, WriteBEXTR>;
2564 defm BEXTR64 : bmi_bextr<0xF7, "bextr{q}", GR64, i64mem,
2565 X86bextr, loadi64, WriteBEXTR>, VEX_W;
2568 multiclass bmi_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
2569 X86MemOperand x86memop, Intrinsic Int,
2570 PatFrag ld_frag, X86FoldableSchedWrite Sched> {
2571 def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2572 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2573 [(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
2574 T8PS, VEX, Sched<[Sched]>;
2575 def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
2576 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2577 [(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
2578 (implicit EFLAGS)]>, T8PS, VEX,
2579 Sched<[Sched.Folded,
2581 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
2584 Sched.ReadAfterFold]>;
2587 let Predicates = [HasBMI2], Defs = [EFLAGS] in {
2588 defm BZHI32 : bmi_bzhi<0xF5, "bzhi{l}", GR32, i32mem,
2589 X86bzhi, loadi32, WriteBZHI>;
2590 defm BZHI64 : bmi_bzhi<0xF5, "bzhi{q}", GR64, i64mem,
2591 X86bzhi, loadi64, WriteBZHI>, VEX_W;
2594 def CountTrailingOnes : SDNodeXForm<imm, [{
2595 // Count the trailing ones in the immediate.
2596 return getI8Imm(countTrailingOnes(N->getZExtValue()), SDLoc(N));
2599 def BEXTRMaskXForm : SDNodeXForm<imm, [{
2600 unsigned Length = countTrailingOnes(N->getZExtValue());
2601 return getI32Imm(Length << 8, SDLoc(N));
2604 def AndMask64 : ImmLeaf<i64, [{
2605 return isMask_64(Imm) && !isUInt<32>(Imm);
2608 // Use BEXTR for 64-bit 'and' with large immediate 'mask'.
2609 let Predicates = [HasBMI, NoBMI2, NoTBM] in {
2610 def : Pat<(and GR64:$src, AndMask64:$mask),
2611 (BEXTR64rr GR64:$src,
2612 (SUBREG_TO_REG (i64 0),
2613 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2614 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2615 (BEXTR64rm addr:$src,
2616 (SUBREG_TO_REG (i64 0),
2617 (MOV32ri (BEXTRMaskXForm imm:$mask)), sub_32bit))>;
2620 // Use BZHI for 64-bit 'and' with large immediate 'mask'.
2621 let Predicates = [HasBMI2, NoTBM] in {
2622 def : Pat<(and GR64:$src, AndMask64:$mask),
2623 (BZHI64rr GR64:$src,
2624 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2625 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2626 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2627 (BZHI64rm addr:$src,
2628 (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
2629 (MOV8ri (CountTrailingOnes imm:$mask)), sub_8bit))>;
2632 multiclass bmi_pdep_pext<string mnemonic, RegisterClass RC,
2633 X86MemOperand x86memop, SDNode OpNode,
2635 def rr : I<0xF5, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
2636 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2637 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>,
2638 VEX_4V, Sched<[WriteALU]>;
2639 def rm : I<0xF5, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2640 !strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2641 [(set RC:$dst, (OpNode RC:$src1, (ld_frag addr:$src2)))]>,
2642 VEX_4V, Sched<[WriteALU.Folded, WriteALU.ReadAfterFold]>;
2645 let Predicates = [HasBMI2] in {
2646 defm PDEP32 : bmi_pdep_pext<"pdep{l}", GR32, i32mem,
2647 X86pdep, loadi32>, T8XD;
2648 defm PDEP64 : bmi_pdep_pext<"pdep{q}", GR64, i64mem,
2649 X86pdep, loadi64>, T8XD, VEX_W;
2650 defm PEXT32 : bmi_pdep_pext<"pext{l}", GR32, i32mem,
2651 X86pext, loadi32>, T8XS;
2652 defm PEXT64 : bmi_pdep_pext<"pext{q}", GR64, i64mem,
2653 X86pext, loadi64>, T8XS, VEX_W;
2656 //===----------------------------------------------------------------------===//
2659 let Predicates = [HasTBM], Defs = [EFLAGS] in {
2661 multiclass tbm_ternary_imm<bits<8> opc, RegisterClass RC, string OpcodeStr,
2662 X86MemOperand x86memop, PatFrag ld_frag,
2663 SDNode OpNode, Operand immtype,
2664 SDPatternOperator immoperator,
2665 X86FoldableSchedWrite Sched> {
2666 def ri : Ii32<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, immtype:$cntl),
2667 !strconcat(OpcodeStr,
2668 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2669 [(set RC:$dst, (OpNode RC:$src1, immoperator:$cntl))]>,
2670 XOP, XOPA, Sched<[Sched]>;
2671 def mi : Ii32<opc, MRMSrcMem, (outs RC:$dst),
2672 (ins x86memop:$src1, immtype:$cntl),
2673 !strconcat(OpcodeStr,
2674 "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"),
2675 [(set RC:$dst, (OpNode (ld_frag addr:$src1), immoperator:$cntl))]>,
2676 XOP, XOPA, Sched<[Sched.Folded]>;
2679 defm BEXTRI32 : tbm_ternary_imm<0x10, GR32, "bextr{l}", i32mem, loadi32,
2680 X86bextr, i32imm, imm, WriteBEXTR>;
2681 let ImmT = Imm32S in
2682 defm BEXTRI64 : tbm_ternary_imm<0x10, GR64, "bextr{q}", i64mem, loadi64,
2683 X86bextr, i64i32imm,
2684 i64immSExt32, WriteBEXTR>, VEX_W;
2686 multiclass tbm_binary_rm<bits<8> opc, Format FormReg, Format FormMem,
2687 RegisterClass RC, string OpcodeStr,
2688 X86MemOperand x86memop, X86FoldableSchedWrite Sched> {
2689 let hasSideEffects = 0 in {
2690 def rr : I<opc, FormReg, (outs RC:$dst), (ins RC:$src),
2691 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2692 XOP_4V, XOP9, Sched<[Sched]>;
2694 def rm : I<opc, FormMem, (outs RC:$dst), (ins x86memop:$src),
2695 !strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"), []>,
2696 XOP_4V, XOP9, Sched<[Sched.Folded]>;
2700 multiclass tbm_binary_intr<bits<8> opc, string OpcodeStr,
2701 X86FoldableSchedWrite Sched,
2702 Format FormReg, Format FormMem> {
2703 defm NAME#32 : tbm_binary_rm<opc, FormReg, FormMem, GR32, OpcodeStr#"{l}",
2705 defm NAME#64 : tbm_binary_rm<opc, FormReg, FormMem, GR64, OpcodeStr#"{q}",
2706 i64mem, Sched>, VEX_W;
2709 defm BLCFILL : tbm_binary_intr<0x01, "blcfill", WriteALU, MRM1r, MRM1m>;
2710 defm BLCI : tbm_binary_intr<0x02, "blci", WriteALU, MRM6r, MRM6m>;
2711 defm BLCIC : tbm_binary_intr<0x01, "blcic", WriteALU, MRM5r, MRM5m>;
2712 defm BLCMSK : tbm_binary_intr<0x02, "blcmsk", WriteALU, MRM1r, MRM1m>;
2713 defm BLCS : tbm_binary_intr<0x01, "blcs", WriteALU, MRM3r, MRM3m>;
2714 defm BLSFILL : tbm_binary_intr<0x01, "blsfill", WriteALU, MRM2r, MRM2m>;
2715 defm BLSIC : tbm_binary_intr<0x01, "blsic", WriteALU, MRM6r, MRM6m>;
2716 defm T1MSKC : tbm_binary_intr<0x01, "t1mskc", WriteALU, MRM7r, MRM7m>;
2717 defm TZMSK : tbm_binary_intr<0x01, "tzmsk", WriteALU, MRM4r, MRM4m>;
2720 // Use BEXTRI for 64-bit 'and' with large immediate 'mask'.
2721 let Predicates = [HasTBM] in {
2722 def : Pat<(and GR64:$src, AndMask64:$mask),
2723 (BEXTRI64ri GR64:$src, (BEXTRMaskXForm imm:$mask))>;
2725 def : Pat<(and (loadi64 addr:$src), AndMask64:$mask),
2726 (BEXTRI64mi addr:$src, (BEXTRMaskXForm imm:$mask))>;
2729 //===----------------------------------------------------------------------===//
2730 // Lightweight Profiling Instructions
2732 let Predicates = [HasLWP], SchedRW = [WriteSystem] in {
2734 def LLWPCB : I<0x12, MRM0r, (outs), (ins GR32:$src), "llwpcb\t$src",
2735 [(int_x86_llwpcb GR32:$src)]>, XOP, XOP9;
2736 def SLWPCB : I<0x12, MRM1r, (outs GR32:$dst), (ins), "slwpcb\t$dst",
2737 [(set GR32:$dst, (int_x86_slwpcb))]>, XOP, XOP9;
2739 def LLWPCB64 : I<0x12, MRM0r, (outs), (ins GR64:$src), "llwpcb\t$src",
2740 [(int_x86_llwpcb GR64:$src)]>, XOP, XOP9, VEX_W;
2741 def SLWPCB64 : I<0x12, MRM1r, (outs GR64:$dst), (ins), "slwpcb\t$dst",
2742 [(set GR64:$dst, (int_x86_slwpcb))]>, XOP, XOP9, VEX_W;
2744 multiclass lwpins_intr<RegisterClass RC> {
2745 def rri : Ii32<0x12, MRM0r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2746 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2747 [(set EFLAGS, (X86lwpins RC:$src0, GR32:$src1, timm:$cntl))]>,
2750 def rmi : Ii32<0x12, MRM0m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2751 "lwpins\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2752 [(set EFLAGS, (X86lwpins RC:$src0, (loadi32 addr:$src1), timm:$cntl))]>,
2756 let Defs = [EFLAGS] in {
2757 defm LWPINS32 : lwpins_intr<GR32>;
2758 defm LWPINS64 : lwpins_intr<GR64>, VEX_W;
2761 multiclass lwpval_intr<RegisterClass RC, Intrinsic Int> {
2762 def rri : Ii32<0x12, MRM1r, (outs), (ins RC:$src0, GR32:$src1, i32imm:$cntl),
2763 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2764 [(Int RC:$src0, GR32:$src1, timm:$cntl)]>, XOP_4V, XOPA;
2766 def rmi : Ii32<0x12, MRM1m, (outs), (ins RC:$src0, i32mem:$src1, i32imm:$cntl),
2767 "lwpval\t{$cntl, $src1, $src0|$src0, $src1, $cntl}",
2768 [(Int RC:$src0, (loadi32 addr:$src1), timm:$cntl)]>,
2772 defm LWPVAL32 : lwpval_intr<GR32, int_x86_lwpval32>;
2773 defm LWPVAL64 : lwpval_intr<GR64, int_x86_lwpval64>, VEX_W;
2775 } // HasLWP, SchedRW
2777 //===----------------------------------------------------------------------===//
2778 // MONITORX/MWAITX Instructions
2780 let SchedRW = [ WriteSystem ] in {
2781 let Uses = [ EAX, ECX, EDX ] in
2782 def MONITORX32rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2783 TB, Requires<[ HasMWAITX, Not64BitMode ]>;
2784 let Uses = [ RAX, ECX, EDX ] in
2785 def MONITORX64rrr : I<0x01, MRM_FA, (outs), (ins), "monitorx", []>,
2786 TB, Requires<[ HasMWAITX, In64BitMode ]>;
2788 let Uses = [ ECX, EAX, EBX ] in {
2789 def MWAITXrrr : I<0x01, MRM_FB, (outs), (ins), "mwaitx",
2790 [(int_x86_mwaitx ECX, EAX, EBX)]>,
2791 TB, Requires<[ HasMWAITX ]>;
2795 def : InstAlias<"mwaitx\t{%eax, %ecx, %ebx|ebx, ecx, eax}", (MWAITXrrr)>,
2796 Requires<[ Not64BitMode ]>;
2797 def : InstAlias<"mwaitx\t{%rax, %rcx, %rbx|rbx, rcx, rax}", (MWAITXrrr)>,
2798 Requires<[ In64BitMode ]>;
2800 def : InstAlias<"monitorx\t{%eax, %ecx, %edx|edx, ecx, eax}", (MONITORX32rrr)>,
2801 Requires<[ Not64BitMode ]>;
2802 def : InstAlias<"monitorx\t{%rax, %rcx, %rdx|rdx, rcx, rax}", (MONITORX64rrr)>,
2803 Requires<[ In64BitMode ]>;
2805 //===----------------------------------------------------------------------===//
2806 // WAITPKG Instructions
2808 let SchedRW = [WriteSystem] in {
2809 def UMONITOR16 : I<0xAE, MRM6r, (outs), (ins GR16:$src),
2810 "umonitor\t$src", [(int_x86_umonitor GR16:$src)]>,
2811 XS, AdSize16, Requires<[HasWAITPKG, Not64BitMode]>;
2812 def UMONITOR32 : I<0xAE, MRM6r, (outs), (ins GR32:$src),
2813 "umonitor\t$src", [(int_x86_umonitor GR32:$src)]>,
2814 XS, AdSize32, Requires<[HasWAITPKG]>;
2815 def UMONITOR64 : I<0xAE, MRM6r, (outs), (ins GR64:$src),
2816 "umonitor\t$src", [(int_x86_umonitor GR64:$src)]>,
2817 XS, AdSize64, Requires<[HasWAITPKG, In64BitMode]>;
2818 let Uses = [EAX, EDX], Defs = [EFLAGS] in {
2819 def UMWAIT : I<0xAE, MRM6r,
2820 (outs), (ins GR32orGR64:$src), "umwait\t$src",
2821 [(set EFLAGS, (X86umwait GR32orGR64:$src, EDX, EAX))]>,
2822 XD, Requires<[HasWAITPKG]>;
2823 def TPAUSE : I<0xAE, MRM6r,
2824 (outs), (ins GR32orGR64:$src), "tpause\t$src",
2825 [(set EFLAGS, (X86tpause GR32orGR64:$src, EDX, EAX))]>,
2826 PD, Requires<[HasWAITPKG]>, NotMemoryFoldable;
2830 //===----------------------------------------------------------------------===//
2831 // MOVDIRI - Move doubleword/quadword as direct store
2833 let SchedRW = [WriteStore] in {
2834 def MOVDIRI32 : I<0xF9, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2835 "movdiri\t{$src, $dst|$dst, $src}",
2836 [(int_x86_directstore32 addr:$dst, GR32:$src)]>,
2837 T8PS, Requires<[HasMOVDIRI]>;
2838 def MOVDIRI64 : RI<0xF9, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2839 "movdiri\t{$src, $dst|$dst, $src}",
2840 [(int_x86_directstore64 addr:$dst, GR64:$src)]>,
2841 T8PS, Requires<[In64BitMode, HasMOVDIRI]>;
2844 //===----------------------------------------------------------------------===//
2845 // MOVDIR64B - Move 64 bytes as direct store
2847 let SchedRW = [WriteStore] in {
2848 def MOVDIR64B16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2849 "movdir64b\t{$src, $dst|$dst, $src}", []>,
2850 T8PD, AdSize16, Requires<[HasMOVDIR64B, Not64BitMode]>;
2851 def MOVDIR64B32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2852 "movdir64b\t{$src, $dst|$dst, $src}",
2853 [(int_x86_movdir64b GR32:$dst, addr:$src)]>,
2854 T8PD, AdSize32, Requires<[HasMOVDIR64B]>;
2855 def MOVDIR64B64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2856 "movdir64b\t{$src, $dst|$dst, $src}",
2857 [(int_x86_movdir64b GR64:$dst, addr:$src)]>,
2858 T8PD, AdSize64, Requires<[HasMOVDIR64B, In64BitMode]>;
2861 //===----------------------------------------------------------------------===//
2862 // ENQCMD/S - Enqueue 64-byte command as user with 64-byte write atomicity
2864 let SchedRW = [WriteStore], Defs = [EFLAGS] in {
2865 def ENQCMD16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2866 "enqcmd\t{$src, $dst|$dst, $src}",
2867 [(set EFLAGS, (X86enqcmd GR16:$dst, addr:$src))]>,
2868 T8XD, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
2869 def ENQCMD32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2870 "enqcmd\t{$src, $dst|$dst, $src}",
2871 [(set EFLAGS, (X86enqcmd GR32:$dst, addr:$src))]>,
2872 T8XD, AdSize32, Requires<[HasENQCMD]>;
2873 def ENQCMD64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2874 "enqcmd\t{$src, $dst|$dst, $src}",
2875 [(set EFLAGS, (X86enqcmd GR64:$dst, addr:$src))]>,
2876 T8XD, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
2878 def ENQCMDS16 : I<0xF8, MRMSrcMem, (outs), (ins GR16:$dst, i512mem:$src),
2879 "enqcmds\t{$src, $dst|$dst, $src}",
2880 [(set EFLAGS, (X86enqcmds GR16:$dst, addr:$src))]>,
2881 T8XS, AdSize16, Requires<[HasENQCMD, Not64BitMode]>;
2882 def ENQCMDS32 : I<0xF8, MRMSrcMem, (outs), (ins GR32:$dst, i512mem:$src),
2883 "enqcmds\t{$src, $dst|$dst, $src}",
2884 [(set EFLAGS, (X86enqcmds GR32:$dst, addr:$src))]>,
2885 T8XS, AdSize32, Requires<[HasENQCMD]>;
2886 def ENQCMDS64 : I<0xF8, MRMSrcMem, (outs), (ins GR64:$dst, i512mem:$src),
2887 "enqcmds\t{$src, $dst|$dst, $src}",
2888 [(set EFLAGS, (X86enqcmds GR64:$dst, addr:$src))]>,
2889 T8XS, AdSize64, Requires<[HasENQCMD, In64BitMode]>;
2892 //===----------------------------------------------------------------------===//
2893 // CLZERO Instruction
2895 let SchedRW = [WriteLoad] in {
2897 def CLZERO32r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2898 TB, Requires<[HasCLZERO, Not64BitMode]>;
2900 def CLZERO64r : I<0x01, MRM_FC, (outs), (ins), "clzero", []>,
2901 TB, Requires<[HasCLZERO, In64BitMode]>;
2904 def : InstAlias<"clzero\t{%eax|eax}", (CLZERO32r)>, Requires<[Not64BitMode]>;
2905 def : InstAlias<"clzero\t{%rax|rax}", (CLZERO64r)>, Requires<[In64BitMode]>;
2907 //===----------------------------------------------------------------------===//
2908 // SERIALIZE Instruction
2910 def SERIALIZE : I<0x01, MRM_E8, (outs), (ins), "serialize",
2911 [(int_x86_serialize)]>, PS,
2912 Requires<[HasSERIALIZE]>;
2914 //===----------------------------------------------------------------------===//
2915 // TSXLDTRK - TSX Suspend Load Address Tracking
2917 let Predicates = [HasTSXLDTRK] in {
2918 def XSUSLDTRK : I<0x01, MRM_E8, (outs), (ins), "xsusldtrk",
2919 [(int_x86_xsusldtrk)]>, XD;
2920 def XRESLDTRK : I<0x01, MRM_E9, (outs), (ins), "xresldtrk",
2921 [(int_x86_xresldtrk)]>, XD;
2924 //===----------------------------------------------------------------------===//
2925 // Pattern fragments to auto generate TBM instructions.
2926 //===----------------------------------------------------------------------===//
2928 let Predicates = [HasTBM] in {
2929 // FIXME: patterns for the load versions are not implemented
2930 def : Pat<(and GR32:$src, (add GR32:$src, 1)),
2931 (BLCFILL32rr GR32:$src)>;
2932 def : Pat<(and GR64:$src, (add GR64:$src, 1)),
2933 (BLCFILL64rr GR64:$src)>;
2935 def : Pat<(or GR32:$src, (not (add GR32:$src, 1))),
2936 (BLCI32rr GR32:$src)>;
2937 def : Pat<(or GR64:$src, (not (add GR64:$src, 1))),
2938 (BLCI64rr GR64:$src)>;
2940 // Extra patterns because opt can optimize the above patterns to this.
2941 def : Pat<(or GR32:$src, (sub -2, GR32:$src)),
2942 (BLCI32rr GR32:$src)>;
2943 def : Pat<(or GR64:$src, (sub -2, GR64:$src)),
2944 (BLCI64rr GR64:$src)>;
2946 def : Pat<(and (not GR32:$src), (add GR32:$src, 1)),
2947 (BLCIC32rr GR32:$src)>;
2948 def : Pat<(and (not GR64:$src), (add GR64:$src, 1)),
2949 (BLCIC64rr GR64:$src)>;
2951 def : Pat<(xor GR32:$src, (add GR32:$src, 1)),
2952 (BLCMSK32rr GR32:$src)>;
2953 def : Pat<(xor GR64:$src, (add GR64:$src, 1)),
2954 (BLCMSK64rr GR64:$src)>;
2956 def : Pat<(or GR32:$src, (add GR32:$src, 1)),
2957 (BLCS32rr GR32:$src)>;
2958 def : Pat<(or GR64:$src, (add GR64:$src, 1)),
2959 (BLCS64rr GR64:$src)>;
2961 def : Pat<(or GR32:$src, (add GR32:$src, -1)),
2962 (BLSFILL32rr GR32:$src)>;
2963 def : Pat<(or GR64:$src, (add GR64:$src, -1)),
2964 (BLSFILL64rr GR64:$src)>;
2966 def : Pat<(or (not GR32:$src), (add GR32:$src, -1)),
2967 (BLSIC32rr GR32:$src)>;
2968 def : Pat<(or (not GR64:$src), (add GR64:$src, -1)),
2969 (BLSIC64rr GR64:$src)>;
2971 def : Pat<(or (not GR32:$src), (add GR32:$src, 1)),
2972 (T1MSKC32rr GR32:$src)>;
2973 def : Pat<(or (not GR64:$src), (add GR64:$src, 1)),
2974 (T1MSKC64rr GR64:$src)>;
2976 def : Pat<(and (not GR32:$src), (add GR32:$src, -1)),
2977 (TZMSK32rr GR32:$src)>;
2978 def : Pat<(and (not GR64:$src), (add GR64:$src, -1)),
2979 (TZMSK64rr GR64:$src)>;
2981 // Patterns to match flag producing ops.
2982 def : Pat<(and_flag_nocf GR32:$src, (add GR32:$src, 1)),
2983 (BLCFILL32rr GR32:$src)>;
2984 def : Pat<(and_flag_nocf GR64:$src, (add GR64:$src, 1)),
2985 (BLCFILL64rr GR64:$src)>;
2987 def : Pat<(or_flag_nocf GR32:$src, (not (add GR32:$src, 1))),
2988 (BLCI32rr GR32:$src)>;
2989 def : Pat<(or_flag_nocf GR64:$src, (not (add GR64:$src, 1))),
2990 (BLCI64rr GR64:$src)>;
2992 // Extra patterns because opt can optimize the above patterns to this.
2993 def : Pat<(or_flag_nocf GR32:$src, (sub -2, GR32:$src)),
2994 (BLCI32rr GR32:$src)>;
2995 def : Pat<(or_flag_nocf GR64:$src, (sub -2, GR64:$src)),
2996 (BLCI64rr GR64:$src)>;
2998 def : Pat<(and_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
2999 (BLCIC32rr GR32:$src)>;
3000 def : Pat<(and_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
3001 (BLCIC64rr GR64:$src)>;
3003 def : Pat<(xor_flag_nocf GR32:$src, (add GR32:$src, 1)),
3004 (BLCMSK32rr GR32:$src)>;
3005 def : Pat<(xor_flag_nocf GR64:$src, (add GR64:$src, 1)),
3006 (BLCMSK64rr GR64:$src)>;
3008 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, 1)),
3009 (BLCS32rr GR32:$src)>;
3010 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, 1)),
3011 (BLCS64rr GR64:$src)>;
3013 def : Pat<(or_flag_nocf GR32:$src, (add GR32:$src, -1)),
3014 (BLSFILL32rr GR32:$src)>;
3015 def : Pat<(or_flag_nocf GR64:$src, (add GR64:$src, -1)),
3016 (BLSFILL64rr GR64:$src)>;
3018 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
3019 (BLSIC32rr GR32:$src)>;
3020 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
3021 (BLSIC64rr GR64:$src)>;
3023 def : Pat<(or_flag_nocf (not GR32:$src), (add GR32:$src, 1)),
3024 (T1MSKC32rr GR32:$src)>;
3025 def : Pat<(or_flag_nocf (not GR64:$src), (add GR64:$src, 1)),
3026 (T1MSKC64rr GR64:$src)>;
3028 def : Pat<(and_flag_nocf (not GR32:$src), (add GR32:$src, -1)),
3029 (TZMSK32rr GR32:$src)>;
3030 def : Pat<(and_flag_nocf (not GR64:$src), (add GR64:$src, -1)),
3031 (TZMSK64rr GR64:$src)>;
3034 //===----------------------------------------------------------------------===//
3035 // Memory Instructions
3038 let Predicates = [HasCLFLUSHOPT], SchedRW = [WriteLoad] in
3039 def CLFLUSHOPT : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3040 "clflushopt\t$src", [(int_x86_clflushopt addr:$src)]>, PD;
3042 let Predicates = [HasCLWB], SchedRW = [WriteLoad] in
3043 def CLWB : I<0xAE, MRM6m, (outs), (ins i8mem:$src), "clwb\t$src",
3044 [(int_x86_clwb addr:$src)]>, PD, NotMemoryFoldable;
3046 let Predicates = [HasCLDEMOTE], SchedRW = [WriteLoad] in
3047 def CLDEMOTE : I<0x1C, MRM0m, (outs), (ins i8mem:$src), "cldemote\t$src",
3048 [(int_x86_cldemote addr:$src)]>, PS;
3050 //===----------------------------------------------------------------------===//
3052 //===----------------------------------------------------------------------===//
3054 include "X86InstrArithmetic.td"
3055 include "X86InstrCMovSetCC.td"
3056 include "X86InstrExtension.td"
3057 include "X86InstrControl.td"
3058 include "X86InstrShiftRotate.td"
3060 // X87 Floating Point Stack.
3061 include "X86InstrFPStack.td"
3063 // SIMD support (SSE, MMX and AVX)
3064 include "X86InstrFragmentsSIMD.td"
3066 // FMA - Fused Multiply-Add support (requires FMA)
3067 include "X86InstrFMA.td"
3070 include "X86InstrXOP.td"
3072 // SSE, MMX and 3DNow! vector support.
3073 include "X86InstrSSE.td"
3074 include "X86InstrAVX512.td"
3075 include "X86InstrMMX.td"
3076 include "X86Instr3DNow.td"
3079 include "X86InstrMPX.td"
3081 include "X86InstrVMX.td"
3082 include "X86InstrSVM.td"
3084 include "X86InstrTSX.td"
3085 include "X86InstrSGX.td"
3088 include "X86InstrAMX.td"
3090 // System instructions.
3091 include "X86InstrSystem.td"
3093 // Compiler Pseudo Instructions and Pat Patterns
3094 include "X86InstrCompiler.td"
3095 include "X86InstrVecCompiler.td"
3097 //===----------------------------------------------------------------------===//
3098 // Assembler Mnemonic Aliases
3099 //===----------------------------------------------------------------------===//
3101 def : MnemonicAlias<"call", "callw", "att">, Requires<[In16BitMode]>;
3102 def : MnemonicAlias<"call", "calll", "att">, Requires<[In32BitMode]>;
3103 def : MnemonicAlias<"call", "callq", "att">, Requires<[In64BitMode]>;
3105 def : MnemonicAlias<"cbw", "cbtw", "att">;
3106 def : MnemonicAlias<"cwde", "cwtl", "att">;
3107 def : MnemonicAlias<"cwd", "cwtd", "att">;
3108 def : MnemonicAlias<"cdq", "cltd", "att">;
3109 def : MnemonicAlias<"cdqe", "cltq", "att">;
3110 def : MnemonicAlias<"cqo", "cqto", "att">;
3112 // In 64-bit mode lret maps to lretl; it is not ambiguous with lretq.
3113 def : MnemonicAlias<"lret", "lretw", "att">, Requires<[In16BitMode]>;
3114 def : MnemonicAlias<"lret", "lretl", "att">, Requires<[Not16BitMode]>;
3116 def : MnemonicAlias<"leavel", "leave", "att">, Requires<[Not64BitMode]>;
3117 def : MnemonicAlias<"leaveq", "leave", "att">, Requires<[In64BitMode]>;
3119 def : MnemonicAlias<"loopz", "loope">;
3120 def : MnemonicAlias<"loopnz", "loopne">;
3122 def : MnemonicAlias<"pop", "popw", "att">, Requires<[In16BitMode]>;
3123 def : MnemonicAlias<"pop", "popl", "att">, Requires<[In32BitMode]>;
3124 def : MnemonicAlias<"pop", "popq", "att">, Requires<[In64BitMode]>;
3125 def : MnemonicAlias<"popf", "popfw", "att">, Requires<[In16BitMode]>;
3126 def : MnemonicAlias<"popf", "popfl", "att">, Requires<[In32BitMode]>;
3127 def : MnemonicAlias<"popf", "popfq", "att">, Requires<[In64BitMode]>;
3128 def : MnemonicAlias<"popf", "popfq", "intel">, Requires<[In64BitMode]>;
3129 def : MnemonicAlias<"popfd", "popfl", "att">;
3130 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In32BitMode]>;
3131 def : MnemonicAlias<"popfw", "popf", "intel">, Requires<[In64BitMode]>;
3133 // FIXME: This is wrong for "push reg". "push %bx" should turn into pushw in
3134 // all modes. However: "push (addr)" and "push $42" should default to
3135 // pushl/pushq depending on the current mode. Similar for "pop %bx"
3136 def : MnemonicAlias<"push", "pushw", "att">, Requires<[In16BitMode]>;
3137 def : MnemonicAlias<"push", "pushl", "att">, Requires<[In32BitMode]>;
3138 def : MnemonicAlias<"push", "pushq", "att">, Requires<[In64BitMode]>;
3139 def : MnemonicAlias<"pushf", "pushfw", "att">, Requires<[In16BitMode]>;
3140 def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
3141 def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
3142 def : MnemonicAlias<"pushf", "pushfq", "intel">, Requires<[In64BitMode]>;
3143 def : MnemonicAlias<"pushfd", "pushfl", "att">;
3144 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In32BitMode]>;
3145 def : MnemonicAlias<"pushfw", "pushf", "intel">, Requires<[In64BitMode]>;
3147 def : MnemonicAlias<"popad", "popal", "intel">, Requires<[Not64BitMode]>;
3148 def : MnemonicAlias<"pushad", "pushal", "intel">, Requires<[Not64BitMode]>;
3149 def : MnemonicAlias<"popa", "popaw", "intel">, Requires<[In16BitMode]>;
3150 def : MnemonicAlias<"pusha", "pushaw", "intel">, Requires<[In16BitMode]>;
3151 def : MnemonicAlias<"popa", "popal", "intel">, Requires<[In32BitMode]>;
3152 def : MnemonicAlias<"pusha", "pushal", "intel">, Requires<[In32BitMode]>;
3154 def : MnemonicAlias<"popa", "popaw", "att">, Requires<[In16BitMode]>;
3155 def : MnemonicAlias<"pusha", "pushaw", "att">, Requires<[In16BitMode]>;
3156 def : MnemonicAlias<"popa", "popal", "att">, Requires<[In32BitMode]>;
3157 def : MnemonicAlias<"pusha", "pushal", "att">, Requires<[In32BitMode]>;
3159 def : MnemonicAlias<"repe", "rep">;
3160 def : MnemonicAlias<"repz", "rep">;
3161 def : MnemonicAlias<"repnz", "repne">;
3163 def : MnemonicAlias<"ret", "retw", "att">, Requires<[In16BitMode]>;
3164 def : MnemonicAlias<"ret", "retl", "att">, Requires<[In32BitMode]>;
3165 def : MnemonicAlias<"ret", "retq", "att">, Requires<[In64BitMode]>;
3167 // Apply 'ret' behavior to 'retn'
3168 def : MnemonicAlias<"retn", "retw", "att">, Requires<[In16BitMode]>;
3169 def : MnemonicAlias<"retn", "retl", "att">, Requires<[In32BitMode]>;
3170 def : MnemonicAlias<"retn", "retq", "att">, Requires<[In64BitMode]>;
3171 def : MnemonicAlias<"retn", "ret", "intel">;
3173 def : MnemonicAlias<"sal", "shl", "intel">;
3174 def : MnemonicAlias<"salb", "shlb", "att">;
3175 def : MnemonicAlias<"salw", "shlw", "att">;
3176 def : MnemonicAlias<"sall", "shll", "att">;
3177 def : MnemonicAlias<"salq", "shlq", "att">;
3179 def : MnemonicAlias<"smovb", "movsb", "att">;
3180 def : MnemonicAlias<"smovw", "movsw", "att">;
3181 def : MnemonicAlias<"smovl", "movsl", "att">;
3182 def : MnemonicAlias<"smovq", "movsq", "att">;
3184 def : MnemonicAlias<"ud2a", "ud2", "att">;
3185 def : MnemonicAlias<"ud2bw", "ud1w", "att">;
3186 def : MnemonicAlias<"ud2bl", "ud1l", "att">;
3187 def : MnemonicAlias<"ud2bq", "ud1q", "att">;
3188 def : MnemonicAlias<"verrw", "verr", "att">;
3190 // MS recognizes 'xacquire'/'xrelease' as 'acquire'/'release'
3191 def : MnemonicAlias<"acquire", "xacquire", "intel">;
3192 def : MnemonicAlias<"release", "xrelease", "intel">;
3194 // System instruction aliases.
3195 def : MnemonicAlias<"iret", "iretw", "att">, Requires<[In16BitMode]>;
3196 def : MnemonicAlias<"iret", "iretl", "att">, Requires<[Not16BitMode]>;
3197 def : MnemonicAlias<"sysret", "sysretl", "att">;
3198 def : MnemonicAlias<"sysexit", "sysexitl", "att">;
3200 def : MnemonicAlias<"lgdt", "lgdtw", "att">, Requires<[In16BitMode]>;
3201 def : MnemonicAlias<"lgdt", "lgdtl", "att">, Requires<[In32BitMode]>;
3202 def : MnemonicAlias<"lgdt", "lgdtq", "att">, Requires<[In64BitMode]>;
3203 def : MnemonicAlias<"lidt", "lidtw", "att">, Requires<[In16BitMode]>;
3204 def : MnemonicAlias<"lidt", "lidtl", "att">, Requires<[In32BitMode]>;
3205 def : MnemonicAlias<"lidt", "lidtq", "att">, Requires<[In64BitMode]>;
3206 def : MnemonicAlias<"sgdt", "sgdtw", "att">, Requires<[In16BitMode]>;
3207 def : MnemonicAlias<"sgdt", "sgdtl", "att">, Requires<[In32BitMode]>;
3208 def : MnemonicAlias<"sgdt", "sgdtq", "att">, Requires<[In64BitMode]>;
3209 def : MnemonicAlias<"sidt", "sidtw", "att">, Requires<[In16BitMode]>;
3210 def : MnemonicAlias<"sidt", "sidtl", "att">, Requires<[In32BitMode]>;
3211 def : MnemonicAlias<"sidt", "sidtq", "att">, Requires<[In64BitMode]>;
3212 def : MnemonicAlias<"lgdt", "lgdtw", "intel">, Requires<[In16BitMode]>;
3213 def : MnemonicAlias<"lgdt", "lgdtd", "intel">, Requires<[In32BitMode]>;
3214 def : MnemonicAlias<"lidt", "lidtw", "intel">, Requires<[In16BitMode]>;
3215 def : MnemonicAlias<"lidt", "lidtd", "intel">, Requires<[In32BitMode]>;
3216 def : MnemonicAlias<"sgdt", "sgdtw", "intel">, Requires<[In16BitMode]>;
3217 def : MnemonicAlias<"sgdt", "sgdtd", "intel">, Requires<[In32BitMode]>;
3218 def : MnemonicAlias<"sidt", "sidtw", "intel">, Requires<[In16BitMode]>;
3219 def : MnemonicAlias<"sidt", "sidtd", "intel">, Requires<[In32BitMode]>;
3222 // Floating point stack aliases.
3223 def : MnemonicAlias<"fcmovz", "fcmove", "att">;
3224 def : MnemonicAlias<"fcmova", "fcmovnbe", "att">;
3225 def : MnemonicAlias<"fcmovnae", "fcmovb", "att">;
3226 def : MnemonicAlias<"fcmovna", "fcmovbe", "att">;
3227 def : MnemonicAlias<"fcmovae", "fcmovnb", "att">;
3228 def : MnemonicAlias<"fcomip", "fcompi">;
3229 def : MnemonicAlias<"fildq", "fildll", "att">;
3230 def : MnemonicAlias<"fistpq", "fistpll", "att">;
3231 def : MnemonicAlias<"fisttpq", "fisttpll", "att">;
3232 def : MnemonicAlias<"fldcww", "fldcw", "att">;
3233 def : MnemonicAlias<"fnstcww", "fnstcw", "att">;
3234 def : MnemonicAlias<"fnstsww", "fnstsw", "att">;
3235 def : MnemonicAlias<"fucomip", "fucompi">;
3236 def : MnemonicAlias<"fwait", "wait">;
3238 def : MnemonicAlias<"fxsaveq", "fxsave64", "att">;
3239 def : MnemonicAlias<"fxrstorq", "fxrstor64", "att">;
3240 def : MnemonicAlias<"xsaveq", "xsave64", "att">;
3241 def : MnemonicAlias<"xrstorq", "xrstor64", "att">;
3242 def : MnemonicAlias<"xsaveoptq", "xsaveopt64", "att">;
3243 def : MnemonicAlias<"xrstorsq", "xrstors64", "att">;
3244 def : MnemonicAlias<"xsavecq", "xsavec64", "att">;
3245 def : MnemonicAlias<"xsavesq", "xsaves64", "att">;
3247 class CondCodeAlias<string Prefix,string Suffix, string OldCond, string NewCond,
3249 : MnemonicAlias<!strconcat(Prefix, OldCond, Suffix),
3250 !strconcat(Prefix, NewCond, Suffix), VariantName>;
3252 /// IntegerCondCodeMnemonicAlias - This multiclass defines a bunch of
3253 /// MnemonicAlias's that canonicalize the condition code in a mnemonic, for
3254 /// example "setz" -> "sete".
3255 multiclass IntegerCondCodeMnemonicAlias<string Prefix, string Suffix,
3257 def C : CondCodeAlias<Prefix, Suffix, "c", "b", V>; // setc -> setb
3258 def Z : CondCodeAlias<Prefix, Suffix, "z" , "e", V>; // setz -> sete
3259 def NA : CondCodeAlias<Prefix, Suffix, "na", "be", V>; // setna -> setbe
3260 def NB : CondCodeAlias<Prefix, Suffix, "nb", "ae", V>; // setnb -> setae
3261 def NC : CondCodeAlias<Prefix, Suffix, "nc", "ae", V>; // setnc -> setae
3262 def NG : CondCodeAlias<Prefix, Suffix, "ng", "le", V>; // setng -> setle
3263 def NL : CondCodeAlias<Prefix, Suffix, "nl", "ge", V>; // setnl -> setge
3264 def NZ : CondCodeAlias<Prefix, Suffix, "nz", "ne", V>; // setnz -> setne
3265 def PE : CondCodeAlias<Prefix, Suffix, "pe", "p", V>; // setpe -> setp
3266 def PO : CondCodeAlias<Prefix, Suffix, "po", "np", V>; // setpo -> setnp
3268 def NAE : CondCodeAlias<Prefix, Suffix, "nae", "b", V>; // setnae -> setb
3269 def NBE : CondCodeAlias<Prefix, Suffix, "nbe", "a", V>; // setnbe -> seta
3270 def NGE : CondCodeAlias<Prefix, Suffix, "nge", "l", V>; // setnge -> setl
3271 def NLE : CondCodeAlias<Prefix, Suffix, "nle", "g", V>; // setnle -> setg
3274 // Aliases for set<CC>
3275 defm : IntegerCondCodeMnemonicAlias<"set", "">;
3276 // Aliases for j<CC>
3277 defm : IntegerCondCodeMnemonicAlias<"j", "">;
3278 // Aliases for cmov<CC>{w,l,q}
3279 defm : IntegerCondCodeMnemonicAlias<"cmov", "w", "att">;
3280 defm : IntegerCondCodeMnemonicAlias<"cmov", "l", "att">;
3281 defm : IntegerCondCodeMnemonicAlias<"cmov", "q", "att">;
3282 // No size suffix for intel-style asm.
3283 defm : IntegerCondCodeMnemonicAlias<"cmov", "", "intel">;
3286 //===----------------------------------------------------------------------===//
3287 // Assembler Instruction Aliases
3288 //===----------------------------------------------------------------------===//
3290 // aad/aam default to base 10 if no operand is specified.
3291 def : InstAlias<"aad", (AAD8i8 10)>, Requires<[Not64BitMode]>;
3292 def : InstAlias<"aam", (AAM8i8 10)>, Requires<[Not64BitMode]>;
3294 // Disambiguate the mem/imm form of bt-without-a-suffix as btl.
3295 // Likewise for btc/btr/bts.
3296 def : InstAlias<"bt\t{$imm, $mem|$mem, $imm}",
3297 (BT32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3298 def : InstAlias<"btc\t{$imm, $mem|$mem, $imm}",
3299 (BTC32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3300 def : InstAlias<"btr\t{$imm, $mem|$mem, $imm}",
3301 (BTR32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3302 def : InstAlias<"bts\t{$imm, $mem|$mem, $imm}",
3303 (BTS32mi8 i32mem:$mem, i32u8imm:$imm), 0, "att">;
3306 def : InstAlias<"clr{b}\t$reg", (XOR8rr GR8 :$reg, GR8 :$reg), 0>;
3307 def : InstAlias<"clr{w}\t$reg", (XOR16rr GR16:$reg, GR16:$reg), 0>;
3308 def : InstAlias<"clr{l}\t$reg", (XOR32rr GR32:$reg, GR32:$reg), 0>;
3309 def : InstAlias<"clr{q}\t$reg", (XOR64rr GR64:$reg, GR64:$reg), 0>;
3311 // lods aliases. Accept the destination being omitted because it's implicit
3312 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3313 // in the destination.
3314 def : InstAlias<"lodsb\t$src", (LODSB srcidx8:$src), 0>;
3315 def : InstAlias<"lodsw\t$src", (LODSW srcidx16:$src), 0>;
3316 def : InstAlias<"lods{l|d}\t$src", (LODSL srcidx32:$src), 0>;
3317 def : InstAlias<"lodsq\t$src", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3318 def : InstAlias<"lods\t{$src, %al|al, $src}", (LODSB srcidx8:$src), 0>;
3319 def : InstAlias<"lods\t{$src, %ax|ax, $src}", (LODSW srcidx16:$src), 0>;
3320 def : InstAlias<"lods\t{$src, %eax|eax, $src}", (LODSL srcidx32:$src), 0>;
3321 def : InstAlias<"lods\t{$src, %rax|rax, $src}", (LODSQ srcidx64:$src), 0>, Requires<[In64BitMode]>;
3322 def : InstAlias<"lods\t$src", (LODSB srcidx8:$src), 0, "intel">;
3323 def : InstAlias<"lods\t$src", (LODSW srcidx16:$src), 0, "intel">;
3324 def : InstAlias<"lods\t$src", (LODSL srcidx32:$src), 0, "intel">;
3325 def : InstAlias<"lods\t$src", (LODSQ srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3328 // stos aliases. Accept the source being omitted because it's implicit in
3329 // the mnemonic, or the mnemonic suffix being omitted because it's implicit
3331 def : InstAlias<"stosb\t$dst", (STOSB dstidx8:$dst), 0>;
3332 def : InstAlias<"stosw\t$dst", (STOSW dstidx16:$dst), 0>;
3333 def : InstAlias<"stos{l|d}\t$dst", (STOSL dstidx32:$dst), 0>;
3334 def : InstAlias<"stosq\t$dst", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3335 def : InstAlias<"stos\t{%al, $dst|$dst, al}", (STOSB dstidx8:$dst), 0>;
3336 def : InstAlias<"stos\t{%ax, $dst|$dst, ax}", (STOSW dstidx16:$dst), 0>;
3337 def : InstAlias<"stos\t{%eax, $dst|$dst, eax}", (STOSL dstidx32:$dst), 0>;
3338 def : InstAlias<"stos\t{%rax, $dst|$dst, rax}", (STOSQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3339 def : InstAlias<"stos\t$dst", (STOSB dstidx8:$dst), 0, "intel">;
3340 def : InstAlias<"stos\t$dst", (STOSW dstidx16:$dst), 0, "intel">;
3341 def : InstAlias<"stos\t$dst", (STOSL dstidx32:$dst), 0, "intel">;
3342 def : InstAlias<"stos\t$dst", (STOSQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3345 // scas aliases. Accept the destination being omitted because it's implicit
3346 // in the mnemonic, or the mnemonic suffix being omitted because it's implicit
3347 // in the destination.
3348 def : InstAlias<"scasb\t$dst", (SCASB dstidx8:$dst), 0>;
3349 def : InstAlias<"scasw\t$dst", (SCASW dstidx16:$dst), 0>;
3350 def : InstAlias<"scas{l|d}\t$dst", (SCASL dstidx32:$dst), 0>;
3351 def : InstAlias<"scasq\t$dst", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3352 def : InstAlias<"scas\t{$dst, %al|al, $dst}", (SCASB dstidx8:$dst), 0>;
3353 def : InstAlias<"scas\t{$dst, %ax|ax, $dst}", (SCASW dstidx16:$dst), 0>;
3354 def : InstAlias<"scas\t{$dst, %eax|eax, $dst}", (SCASL dstidx32:$dst), 0>;
3355 def : InstAlias<"scas\t{$dst, %rax|rax, $dst}", (SCASQ dstidx64:$dst), 0>, Requires<[In64BitMode]>;
3356 def : InstAlias<"scas\t$dst", (SCASB dstidx8:$dst), 0, "intel">;
3357 def : InstAlias<"scas\t$dst", (SCASW dstidx16:$dst), 0, "intel">;
3358 def : InstAlias<"scas\t$dst", (SCASL dstidx32:$dst), 0, "intel">;
3359 def : InstAlias<"scas\t$dst", (SCASQ dstidx64:$dst), 0, "intel">, Requires<[In64BitMode]>;
3361 // cmps aliases. Mnemonic suffix being omitted because it's implicit
3362 // in the destination.
3363 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3364 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3365 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3366 def : InstAlias<"cmps\t{$dst, $src|$src, $dst}", (CMPSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3368 // movs aliases. Mnemonic suffix being omitted because it's implicit
3369 // in the destination.
3370 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSB dstidx8:$dst, srcidx8:$src), 0, "intel">;
3371 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSW dstidx16:$dst, srcidx16:$src), 0, "intel">;
3372 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSL dstidx32:$dst, srcidx32:$src), 0, "intel">;
3373 def : InstAlias<"movs\t{$src, $dst|$dst, $src}", (MOVSQ dstidx64:$dst, srcidx64:$src), 0, "intel">, Requires<[In64BitMode]>;
3375 // div and idiv aliases for explicit A register.
3376 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8r GR8 :$src)>;
3377 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16r GR16:$src)>;
3378 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32r GR32:$src)>;
3379 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64r GR64:$src)>;
3380 def : InstAlias<"div{b}\t{$src, %al|al, $src}", (DIV8m i8mem :$src)>;
3381 def : InstAlias<"div{w}\t{$src, %ax|ax, $src}", (DIV16m i16mem:$src)>;
3382 def : InstAlias<"div{l}\t{$src, %eax|eax, $src}", (DIV32m i32mem:$src)>;
3383 def : InstAlias<"div{q}\t{$src, %rax|rax, $src}", (DIV64m i64mem:$src)>;
3384 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8r GR8 :$src)>;
3385 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16r GR16:$src)>;
3386 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32r GR32:$src)>;
3387 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64r GR64:$src)>;
3388 def : InstAlias<"idiv{b}\t{$src, %al|al, $src}", (IDIV8m i8mem :$src)>;
3389 def : InstAlias<"idiv{w}\t{$src, %ax|ax, $src}", (IDIV16m i16mem:$src)>;
3390 def : InstAlias<"idiv{l}\t{$src, %eax|eax, $src}", (IDIV32m i32mem:$src)>;
3391 def : InstAlias<"idiv{q}\t{$src, %rax|rax, $src}", (IDIV64m i64mem:$src)>;
3395 // Various unary fpstack operations default to operating on ST1.
3396 // For example, "fxch" -> "fxch %st(1)"
3397 def : InstAlias<"faddp", (ADD_FPrST0 ST1), 0>;
3398 def: InstAlias<"fadd", (ADD_FPrST0 ST1), 0>;
3399 def : InstAlias<"fsub{|r}p", (SUBR_FPrST0 ST1), 0>;
3400 def : InstAlias<"fsub{r|}p", (SUB_FPrST0 ST1), 0>;
3401 def : InstAlias<"fmul", (MUL_FPrST0 ST1), 0>;
3402 def : InstAlias<"fmulp", (MUL_FPrST0 ST1), 0>;
3403 def : InstAlias<"fdiv{|r}p", (DIVR_FPrST0 ST1), 0>;
3404 def : InstAlias<"fdiv{r|}p", (DIV_FPrST0 ST1), 0>;
3405 def : InstAlias<"fxch", (XCH_F ST1), 0>;
3406 def : InstAlias<"fcom", (COM_FST0r ST1), 0>;
3407 def : InstAlias<"fcomp", (COMP_FST0r ST1), 0>;
3408 def : InstAlias<"fcomi", (COM_FIr ST1), 0>;
3409 def : InstAlias<"fcompi", (COM_FIPr ST1), 0>;
3410 def : InstAlias<"fucom", (UCOM_Fr ST1), 0>;
3411 def : InstAlias<"fucomp", (UCOM_FPr ST1), 0>;
3412 def : InstAlias<"fucomi", (UCOM_FIr ST1), 0>;
3413 def : InstAlias<"fucompi", (UCOM_FIPr ST1), 0>;
3415 // Handle fmul/fadd/fsub/fdiv instructions with explicitly written st(0) op.
3416 // For example, "fadd %st(4), %st(0)" -> "fadd %st(4)". We also disambiguate
3417 // instructions like "fadd %st(0), %st(0)" as "fadd %st(0)" for consistency with
3419 multiclass FpUnaryAlias<string Mnemonic, Instruction Inst, bit EmitAlias = 1> {
3420 def : InstAlias<!strconcat(Mnemonic, "\t$op"),
3421 (Inst RSTi:$op), EmitAlias>;
3422 def : InstAlias<!strconcat(Mnemonic, "\t{%st, %st|st, st}"),
3423 (Inst ST0), EmitAlias>;
3426 defm : FpUnaryAlias<"fadd", ADD_FST0r, 0>;
3427 defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>;
3428 defm : FpUnaryAlias<"fsub", SUB_FST0r, 0>;
3429 defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0, 0>;
3430 defm : FpUnaryAlias<"fsubr", SUBR_FST0r, 0>;
3431 defm : FpUnaryAlias<"fsub{r|}p", SUB_FPrST0, 0>;
3432 defm : FpUnaryAlias<"fmul", MUL_FST0r, 0>;
3433 defm : FpUnaryAlias<"fmulp", MUL_FPrST0, 0>;
3434 defm : FpUnaryAlias<"fdiv", DIV_FST0r, 0>;
3435 defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0, 0>;
3436 defm : FpUnaryAlias<"fdivr", DIVR_FST0r, 0>;
3437 defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0, 0>;
3438 defm : FpUnaryAlias<"fcomi", COM_FIr, 0>;
3439 defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>;
3440 defm : FpUnaryAlias<"fcompi", COM_FIPr, 0>;
3441 defm : FpUnaryAlias<"fucompi", UCOM_FIPr, 0>;
3444 // Handle "f{mulp,addp} $op, %st(0)" the same as "f{mulp,addp} $op", since they
3445 // commute. We also allow fdiv[r]p/fsubrp even though they don't commute,
3446 // solely because gas supports it.
3447 def : InstAlias<"faddp\t{$op, %st|st, $op}", (ADD_FPrST0 RSTi:$op), 0>;
3448 def : InstAlias<"fmulp\t{$op, %st|st, $op}", (MUL_FPrST0 RSTi:$op), 0>;
3449 def : InstAlias<"fsub{|r}p\t{$op, %st|st, $op}", (SUBR_FPrST0 RSTi:$op), 0>;
3450 def : InstAlias<"fsub{r|}p\t{$op, %st|st, $op}", (SUB_FPrST0 RSTi:$op), 0>;
3451 def : InstAlias<"fdiv{|r}p\t{$op, %st|st, $op}", (DIVR_FPrST0 RSTi:$op), 0>;
3452 def : InstAlias<"fdiv{r|}p\t{$op, %st|st, $op}", (DIV_FPrST0 RSTi:$op), 0>;
3454 def : InstAlias<"fnstsw" , (FNSTSW16r), 0>;
3456 // lcall and ljmp aliases. This seems to be an odd mapping in 64-bit mode, but
3457 // this is compatible with what GAS does.
3458 def : InstAlias<"lcall\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3459 def : InstAlias<"ljmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg), 0>, Requires<[In32BitMode]>;
3460 def : InstAlias<"lcall\t{*}$dst", (FARCALL32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3461 def : InstAlias<"ljmp\t{*}$dst", (FARJMP32m opaquemem:$dst), 0>, Requires<[Not16BitMode]>;
3462 def : InstAlias<"lcall\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3463 def : InstAlias<"ljmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg), 0>, Requires<[In16BitMode]>;
3464 def : InstAlias<"lcall\t{*}$dst", (FARCALL16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3465 def : InstAlias<"ljmp\t{*}$dst", (FARJMP16m opaquemem:$dst), 0>, Requires<[In16BitMode]>;
3467 def : InstAlias<"jmp\t{*}$dst", (JMP64m i64mem:$dst), 0, "att">, Requires<[In64BitMode]>;
3468 def : InstAlias<"jmp\t{*}$dst", (JMP32m i32mem:$dst), 0, "att">, Requires<[In32BitMode]>;
3469 def : InstAlias<"jmp\t{*}$dst", (JMP16m i16mem:$dst), 0, "att">, Requires<[In16BitMode]>;
3472 // "imul <imm>, B" is an alias for "imul <imm>, B, B".
3473 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri GR16:$r, GR16:$r, i16imm:$imm), 0>;
3474 def : InstAlias<"imul{w}\t{$imm, $r|$r, $imm}", (IMUL16rri8 GR16:$r, GR16:$r, i16i8imm:$imm), 0>;
3475 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri GR32:$r, GR32:$r, i32imm:$imm), 0>;
3476 def : InstAlias<"imul{l}\t{$imm, $r|$r, $imm}", (IMUL32rri8 GR32:$r, GR32:$r, i32i8imm:$imm), 0>;
3477 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri32 GR64:$r, GR64:$r, i64i32imm:$imm), 0>;
3478 def : InstAlias<"imul{q}\t{$imm, $r|$r, $imm}", (IMUL64rri8 GR64:$r, GR64:$r, i64i8imm:$imm), 0>;
3480 // ins aliases. Accept the mnemonic suffix being omitted because it's implicit
3481 // in the destination.
3482 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSB dstidx8:$dst), 0, "intel">;
3483 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSW dstidx16:$dst), 0, "intel">;
3484 def : InstAlias<"ins\t{%dx, $dst|$dst, dx}", (INSL dstidx32:$dst), 0, "intel">;
3486 // outs aliases. Accept the mnemonic suffix being omitted because it's implicit
3488 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSB srcidx8:$src), 0, "intel">;
3489 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSW srcidx16:$src), 0, "intel">;
3490 def : InstAlias<"outs\t{$src, %dx|dx, $src}", (OUTSL srcidx32:$src), 0, "intel">;
3492 // inb %dx -> inb %al, %dx
3493 def : InstAlias<"inb\t{%dx|dx}", (IN8rr), 0>;
3494 def : InstAlias<"inw\t{%dx|dx}", (IN16rr), 0>;
3495 def : InstAlias<"inl\t{%dx|dx}", (IN32rr), 0>;
3496 def : InstAlias<"inb\t$port", (IN8ri u8imm:$port), 0>;
3497 def : InstAlias<"inw\t$port", (IN16ri u8imm:$port), 0>;
3498 def : InstAlias<"inl\t$port", (IN32ri u8imm:$port), 0>;
3501 // jmp and call aliases for lcall and ljmp. jmp $42,$5 -> ljmp
3502 def : InstAlias<"call\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3503 def : InstAlias<"jmp\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[In16BitMode]>;
3504 def : InstAlias<"call\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3505 def : InstAlias<"jmp\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[In32BitMode]>;
3506 def : InstAlias<"callw\t$seg, $off", (FARCALL16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3507 def : InstAlias<"jmpw\t$seg, $off", (FARJMP16i i16imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3508 def : InstAlias<"calll\t$seg, $off", (FARCALL32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3509 def : InstAlias<"jmpl\t$seg, $off", (FARJMP32i i32imm:$off, i16imm:$seg)>, Requires<[Not64BitMode]>;
3511 // Match 'movq <largeimm>, <reg>' as an alias for movabsq.
3512 def : InstAlias<"mov{q}\t{$imm, $reg|$reg, $imm}", (MOV64ri GR64:$reg, i64imm:$imm), 0>;
3514 // Match 'movd GR64, MMX' as an alias for movq to be compatible with gas,
3515 // which supports this due to an old AMD documentation bug when 64-bit mode was
3517 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3518 (MMX_MOVD64to64rr VR64:$dst, GR64:$src), 0>;
3519 def : InstAlias<"movd\t{$src, $dst|$dst, $src}",
3520 (MMX_MOVD64from64rr GR64:$dst, VR64:$src), 0>;
3523 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3524 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3525 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3526 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3527 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3528 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3529 def : InstAlias<"movsx\t{$src, $dst|$dst, $src}", (MOVSX64rr32 GR64:$dst, GR32:$src), 0, "att">;
3532 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rr8 GR16:$dst, GR8:$src), 0, "att">;
3533 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX16rm8 GR16:$dst, i8mem:$src), 0, "att">;
3534 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr8 GR32:$dst, GR8:$src), 0, "att">;
3535 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX32rr16 GR32:$dst, GR16:$src), 0, "att">;
3536 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr8 GR64:$dst, GR8:$src), 0, "att">;
3537 def : InstAlias<"movzx\t{$src, $dst|$dst, $src}", (MOVZX64rr16 GR64:$dst, GR16:$src), 0, "att">;
3538 // Note: No GR32->GR64 movzx form.
3540 // outb %dx -> outb %al, %dx
3541 def : InstAlias<"outb\t{%dx|dx}", (OUT8rr), 0>;
3542 def : InstAlias<"outw\t{%dx|dx}", (OUT16rr), 0>;
3543 def : InstAlias<"outl\t{%dx|dx}", (OUT32rr), 0>;
3544 def : InstAlias<"outb\t$port", (OUT8ir u8imm:$port), 0>;
3545 def : InstAlias<"outw\t$port", (OUT16ir u8imm:$port), 0>;
3546 def : InstAlias<"outl\t$port", (OUT32ir u8imm:$port), 0>;
3548 // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
3549 // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
3550 // errors, since its encoding is the most compact.
3551 def : InstAlias<"sldt $mem", (SLDT16m i16mem:$mem), 0>;
3553 // shld/shrd op,op -> shld op, op, CL
3554 def : InstAlias<"shld{w}\t{$r2, $r1|$r1, $r2}", (SHLD16rrCL GR16:$r1, GR16:$r2), 0>;
3555 def : InstAlias<"shld{l}\t{$r2, $r1|$r1, $r2}", (SHLD32rrCL GR32:$r1, GR32:$r2), 0>;
3556 def : InstAlias<"shld{q}\t{$r2, $r1|$r1, $r2}", (SHLD64rrCL GR64:$r1, GR64:$r2), 0>;
3557 def : InstAlias<"shrd{w}\t{$r2, $r1|$r1, $r2}", (SHRD16rrCL GR16:$r1, GR16:$r2), 0>;
3558 def : InstAlias<"shrd{l}\t{$r2, $r1|$r1, $r2}", (SHRD32rrCL GR32:$r1, GR32:$r2), 0>;
3559 def : InstAlias<"shrd{q}\t{$r2, $r1|$r1, $r2}", (SHRD64rrCL GR64:$r1, GR64:$r2), 0>;
3561 def : InstAlias<"shld{w}\t{$reg, $mem|$mem, $reg}", (SHLD16mrCL i16mem:$mem, GR16:$reg), 0>;
3562 def : InstAlias<"shld{l}\t{$reg, $mem|$mem, $reg}", (SHLD32mrCL i32mem:$mem, GR32:$reg), 0>;
3563 def : InstAlias<"shld{q}\t{$reg, $mem|$mem, $reg}", (SHLD64mrCL i64mem:$mem, GR64:$reg), 0>;
3564 def : InstAlias<"shrd{w}\t{$reg, $mem|$mem, $reg}", (SHRD16mrCL i16mem:$mem, GR16:$reg), 0>;
3565 def : InstAlias<"shrd{l}\t{$reg, $mem|$mem, $reg}", (SHRD32mrCL i32mem:$mem, GR32:$reg), 0>;
3566 def : InstAlias<"shrd{q}\t{$reg, $mem|$mem, $reg}", (SHRD64mrCL i64mem:$mem, GR64:$reg), 0>;
3568 /* FIXME: This is disabled because the asm matcher is currently incapable of
3569 * matching a fixed immediate like $1.
3570 // "shl X, $1" is an alias for "shl X".
3571 multiclass ShiftRotateByOneAlias<string Mnemonic, string Opc> {
3572 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3573 (!cast<Instruction>(!strconcat(Opc, "8r1")) GR8:$op)>;
3574 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3575 (!cast<Instruction>(!strconcat(Opc, "16r1")) GR16:$op)>;
3576 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3577 (!cast<Instruction>(!strconcat(Opc, "32r1")) GR32:$op)>;
3578 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3579 (!cast<Instruction>(!strconcat(Opc, "64r1")) GR64:$op)>;
3580 def : InstAlias<!strconcat(Mnemonic, "b $op, $$1"),
3581 (!cast<Instruction>(!strconcat(Opc, "8m1")) i8mem:$op)>;
3582 def : InstAlias<!strconcat(Mnemonic, "w $op, $$1"),
3583 (!cast<Instruction>(!strconcat(Opc, "16m1")) i16mem:$op)>;
3584 def : InstAlias<!strconcat(Mnemonic, "l $op, $$1"),
3585 (!cast<Instruction>(!strconcat(Opc, "32m1")) i32mem:$op)>;
3586 def : InstAlias<!strconcat(Mnemonic, "q $op, $$1"),
3587 (!cast<Instruction>(!strconcat(Opc, "64m1")) i64mem:$op)>;
3590 defm : ShiftRotateByOneAlias<"rcl", "RCL">;
3591 defm : ShiftRotateByOneAlias<"rcr", "RCR">;
3592 defm : ShiftRotateByOneAlias<"rol", "ROL">;
3593 defm : ShiftRotateByOneAlias<"ror", "ROR">;
3596 // test: We accept "testX <reg>, <mem>" and "testX <mem>, <reg>" as synonyms.
3597 def : InstAlias<"test{b}\t{$mem, $val|$val, $mem}",
3598 (TEST8mr i8mem :$mem, GR8 :$val), 0>;
3599 def : InstAlias<"test{w}\t{$mem, $val|$val, $mem}",
3600 (TEST16mr i16mem:$mem, GR16:$val), 0>;
3601 def : InstAlias<"test{l}\t{$mem, $val|$val, $mem}",
3602 (TEST32mr i32mem:$mem, GR32:$val), 0>;
3603 def : InstAlias<"test{q}\t{$mem, $val|$val, $mem}",
3604 (TEST64mr i64mem:$mem, GR64:$val), 0>;
3606 // xchg: We accept "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as synonyms.
3607 def : InstAlias<"xchg{b}\t{$mem, $val|$val, $mem}",
3608 (XCHG8rm GR8 :$val, i8mem :$mem), 0>;
3609 def : InstAlias<"xchg{w}\t{$mem, $val|$val, $mem}",
3610 (XCHG16rm GR16:$val, i16mem:$mem), 0>;
3611 def : InstAlias<"xchg{l}\t{$mem, $val|$val, $mem}",
3612 (XCHG32rm GR32:$val, i32mem:$mem), 0>;
3613 def : InstAlias<"xchg{q}\t{$mem, $val|$val, $mem}",
3614 (XCHG64rm GR64:$val, i64mem:$mem), 0>;
3616 // xchg: We accept "xchgX <reg>, %eax" and "xchgX %eax, <reg>" as synonyms.
3617 def : InstAlias<"xchg{w}\t{%ax, $src|$src, ax}", (XCHG16ar GR16:$src), 0>;
3618 def : InstAlias<"xchg{l}\t{%eax, $src|$src, eax}", (XCHG32ar GR32:$src), 0>;
3619 def : InstAlias<"xchg{q}\t{%rax, $src|$src, rax}", (XCHG64ar GR64:$src), 0>;
3621 // In 64-bit mode, xchg %eax, %eax can't be encoded with the 0x90 opcode we
3622 // would get by default because it's defined as NOP. But xchg %eax, %eax implies
3623 // implicit zeroing of the upper 32 bits. So alias to the longer encoding.
3624 def : InstAlias<"xchg{l}\t{%eax, %eax|eax, eax}",
3625 (XCHG32rr EAX, EAX), 0>, Requires<[In64BitMode]>;
3627 // xchg %rax, %rax is a nop in x86-64 and can be encoded as such. Without this
3628 // we emit an unneeded REX.w prefix.
3629 def : InstAlias<"xchg{q}\t{%rax, %rax|rax, rax}", (NOOP), 0>;
3631 // These aliases exist to get the parser to prioritize matching 8-bit
3632 // immediate encodings over matching the implicit ax/eax/rax encodings. By
3633 // explicitly mentioning the A register here, these entries will be ordered
3634 // first due to the more explicit immediate type.
3635 def : InstAlias<"adc{w}\t{$imm, %ax|ax, $imm}", (ADC16ri8 AX, i16i8imm:$imm), 0>;
3636 def : InstAlias<"add{w}\t{$imm, %ax|ax, $imm}", (ADD16ri8 AX, i16i8imm:$imm), 0>;
3637 def : InstAlias<"and{w}\t{$imm, %ax|ax, $imm}", (AND16ri8 AX, i16i8imm:$imm), 0>;
3638 def : InstAlias<"cmp{w}\t{$imm, %ax|ax, $imm}", (CMP16ri8 AX, i16i8imm:$imm), 0>;
3639 def : InstAlias<"or{w}\t{$imm, %ax|ax, $imm}", (OR16ri8 AX, i16i8imm:$imm), 0>;
3640 def : InstAlias<"sbb{w}\t{$imm, %ax|ax, $imm}", (SBB16ri8 AX, i16i8imm:$imm), 0>;
3641 def : InstAlias<"sub{w}\t{$imm, %ax|ax, $imm}", (SUB16ri8 AX, i16i8imm:$imm), 0>;
3642 def : InstAlias<"xor{w}\t{$imm, %ax|ax, $imm}", (XOR16ri8 AX, i16i8imm:$imm), 0>;
3644 def : InstAlias<"adc{l}\t{$imm, %eax|eax, $imm}", (ADC32ri8 EAX, i32i8imm:$imm), 0>;
3645 def : InstAlias<"add{l}\t{$imm, %eax|eax, $imm}", (ADD32ri8 EAX, i32i8imm:$imm), 0>;
3646 def : InstAlias<"and{l}\t{$imm, %eax|eax, $imm}", (AND32ri8 EAX, i32i8imm:$imm), 0>;
3647 def : InstAlias<"cmp{l}\t{$imm, %eax|eax, $imm}", (CMP32ri8 EAX, i32i8imm:$imm), 0>;
3648 def : InstAlias<"or{l}\t{$imm, %eax|eax, $imm}", (OR32ri8 EAX, i32i8imm:$imm), 0>;
3649 def : InstAlias<"sbb{l}\t{$imm, %eax|eax, $imm}", (SBB32ri8 EAX, i32i8imm:$imm), 0>;
3650 def : InstAlias<"sub{l}\t{$imm, %eax|eax, $imm}", (SUB32ri8 EAX, i32i8imm:$imm), 0>;
3651 def : InstAlias<"xor{l}\t{$imm, %eax|eax, $imm}", (XOR32ri8 EAX, i32i8imm:$imm), 0>;
3653 def : InstAlias<"adc{q}\t{$imm, %rax|rax, $imm}", (ADC64ri8 RAX, i64i8imm:$imm), 0>;
3654 def : InstAlias<"add{q}\t{$imm, %rax|rax, $imm}", (ADD64ri8 RAX, i64i8imm:$imm), 0>;
3655 def : InstAlias<"and{q}\t{$imm, %rax|rax, $imm}", (AND64ri8 RAX, i64i8imm:$imm), 0>;
3656 def : InstAlias<"cmp{q}\t{$imm, %rax|rax, $imm}", (CMP64ri8 RAX, i64i8imm:$imm), 0>;
3657 def : InstAlias<"or{q}\t{$imm, %rax|rax, $imm}", (OR64ri8 RAX, i64i8imm:$imm), 0>;
3658 def : InstAlias<"sbb{q}\t{$imm, %rax|rax, $imm}", (SBB64ri8 RAX, i64i8imm:$imm), 0>;
3659 def : InstAlias<"sub{q}\t{$imm, %rax|rax, $imm}", (SUB64ri8 RAX, i64i8imm:$imm), 0>;
3660 def : InstAlias<"xor{q}\t{$imm, %rax|rax, $imm}", (XOR64ri8 RAX, i64i8imm:$imm), 0>;