1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the various pseudo instructions used by the compiler,
10 // as well as Pat patterns used during instruction selection.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Pattern Matching Support
17 def GetLo32XForm : SDNodeXForm<imm, [{
18 // Transformation function: get the low 32 bits.
19 return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N));
23 //===----------------------------------------------------------------------===//
24 // Random Pseudo Instructions.
26 // PIC base construction. This expands to code that looks like this:
29 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP],
30 SchedRW = [WriteJump] in
31 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
34 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
35 // a stack adjustment and the codegen must know that they may modify the stack
36 // pointer before prolog-epilog rewriting occurs.
37 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
38 // sub / add which can clobber EFLAGS.
39 let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in {
40 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs),
41 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
42 "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>;
43 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
45 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
48 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
49 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>;
52 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
53 // a stack adjustment and the codegen must know that they may modify the stack
54 // pointer before prolog-epilog rewriting occurs.
55 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
56 // sub / add which can clobber EFLAGS.
57 let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in {
58 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs),
59 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
60 "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>;
61 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
63 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
66 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
67 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>;
69 let SchedRW = [WriteSystem] in {
71 // x86-64 va_start lowering magic.
72 let hasSideEffects = 1, mayStore = 1, Defs = [EFLAGS] in {
73 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
75 (ins GR8:$al, i8mem:$regsavefi, variable_ops),
76 "#VASTART_SAVE_XMM_REGS $al, $regsavefi",
77 [(X86vastart_save_xmm_regs GR8:$al, addr:$regsavefi),
81 let usesCustomInserter = 1, Defs = [EFLAGS] in {
82 // The VAARG_64 and VAARG_X32 pseudo-instructions take the address of the
83 // va_list, and place the address of the next argument into a register.
84 let Defs = [EFLAGS] in {
85 def VAARG_64 : I<0, Pseudo,
87 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
88 "#VAARG_64 $dst, $ap, $size, $mode, $align",
90 (X86vaarg64 addr:$ap, timm:$size, timm:$mode, timm:$align)),
91 (implicit EFLAGS)]>, Requires<[In64BitMode, IsLP64]>;
92 def VAARG_X32 : I<0, Pseudo,
94 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
95 "#VAARG_X32 $dst, $ap, $size, $mode, $align",
97 (X86vaargx32 addr:$ap, timm:$size, timm:$mode, timm:$align)),
98 (implicit EFLAGS)]>, Requires<[In64BitMode, NotLP64]>;
101 // When using segmented stacks these are lowered into instructions which first
102 // check if the current stacklet has enough free memory. If it does, memory is
103 // allocated by bumping the stack pointer. Otherwise memory is allocated from
106 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
107 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
108 "# variable sized alloca for segmented stacks",
110 (X86SegAlloca GR32:$size))]>,
113 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
114 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
115 "# variable sized alloca for segmented stacks",
117 (X86SegAlloca GR64:$size))]>,
118 Requires<[In64BitMode]>;
120 // To protect against stack clash, dynamic allocation should perform a memory
121 // probe at each page.
123 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
124 def PROBED_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
125 "# variable sized alloca with probing",
127 (X86ProbedAlloca GR32:$size))]>,
130 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
131 def PROBED_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
132 "# variable sized alloca with probing",
134 (X86ProbedAlloca GR64:$size))]>,
135 Requires<[In64BitMode]>;
138 let hasNoSchedulingInfo = 1 in
139 def STACKALLOC_W_PROBING : I<0, Pseudo, (outs), (ins i64imm:$stacksize),
140 "# fixed size alloca with probing",
143 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
144 // targets. These calls are needed to probe the stack when allocating more than
145 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
146 // ensure that the guard pages used by the OS virtual memory manager are
147 // allocated in correct sequence.
148 // The main point of having separate instruction are extra unmodelled effects
149 // (compared to ordinary calls) like stack pointer change.
151 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
152 def DYN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
153 "# dynamic stack allocation",
154 [(X86DynAlloca GR32:$size)]>,
157 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
158 def DYN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
159 "# dynamic stack allocation",
160 [(X86DynAlloca GR64:$size)]>,
161 Requires<[In64BitMode]>;
164 // These instructions XOR the frame pointer into a GPR. They are used in some
165 // stack protection schemes. These are post-RA pseudos because we only know the
166 // frame register after register allocation.
167 let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in {
168 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
169 "xorl\t$$FP, $src", []>,
170 Requires<[NotLP64]>, Sched<[WriteALU]>;
171 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src),
172 "xorq\t$$FP $src", []>,
173 Requires<[In64BitMode]>, Sched<[WriteALU]>;
176 //===----------------------------------------------------------------------===//
177 // EH Pseudo Instructions
179 let SchedRW = [WriteSystem] in {
180 let isTerminator = 1, isReturn = 1, isBarrier = 1,
181 hasCtrlDep = 1, isCodeGenOnly = 1 in {
182 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
183 "ret\t#eh_return, addr: $addr",
184 [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>;
188 let isTerminator = 1, isReturn = 1, isBarrier = 1,
189 hasCtrlDep = 1, isCodeGenOnly = 1 in {
190 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
191 "ret\t#eh_return, addr: $addr",
192 [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>;
196 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
197 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in {
198 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>;
200 // CATCHRET needs a custom inserter for SEH.
201 let usesCustomInserter = 1 in
202 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from),
204 [(catchret bb:$dst, bb:$from)]>;
207 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
208 usesCustomInserter = 1 in {
209 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
211 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
212 Requires<[Not64BitMode]>;
213 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
215 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
216 Requires<[In64BitMode]>;
217 let isTerminator = 1 in {
218 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
219 "#EH_SJLJ_LONGJMP32",
220 [(X86eh_sjlj_longjmp addr:$buf)]>,
221 Requires<[Not64BitMode]>;
222 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
223 "#EH_SJLJ_LONGJMP64",
224 [(X86eh_sjlj_longjmp addr:$buf)]>,
225 Requires<[In64BitMode]>;
229 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
230 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
231 "#EH_SjLj_Setup\t$dst", []>;
235 //===----------------------------------------------------------------------===//
236 // Pseudo instructions used by unwind info.
238 let isPseudo = 1, SchedRW = [WriteSystem] in {
239 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
240 "#SEH_PushReg $reg", []>;
241 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
242 "#SEH_SaveReg $reg, $dst", []>;
243 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
244 "#SEH_SaveXMM $reg, $dst", []>;
245 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
246 "#SEH_StackAlloc $size", []>;
247 def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align),
248 "#SEH_StackAlign $align", []>;
249 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
250 "#SEH_SetFrame $reg, $offset", []>;
251 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
252 "#SEH_PushFrame $mode", []>;
253 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
254 "#SEH_EndPrologue", []>;
255 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
256 "#SEH_Epilogue", []>;
259 //===----------------------------------------------------------------------===//
260 // Pseudo instructions used by address sanitizer.
261 //===----------------------------------------------------------------------===//
263 Defs = [R10, R11, EFLAGS] in {
264 def ASAN_CHECK_MEMACCESS : PseudoI<
265 (outs), (ins GR64PLTSafe:$addr, i32imm:$accessinfo),
266 [(int_asan_check_memaccess GR64PLTSafe:$addr, (i32 timm:$accessinfo))]>,
270 //===----------------------------------------------------------------------===//
271 // Pseudo instructions used by segmented stacks.
274 // This is lowered into a RET instruction by MCInstLower. We need
275 // this so that we don't have to have a MachineBasicBlock which ends
276 // with a RET and also has successors.
277 let isPseudo = 1, SchedRW = [WriteJumpLd] in {
278 def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>;
280 // This instruction is lowered to a RET followed by a MOV. The two
281 // instructions are not generated on a higher level since then the
282 // verifier sees a MachineBasicBlock ending with a non-terminator.
283 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>;
286 //===----------------------------------------------------------------------===//
287 // Alias Instructions
288 //===----------------------------------------------------------------------===//
290 // Alias instruction mapping movr0 to xor.
291 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
292 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
293 isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in
294 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
295 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>;
297 // Other widths can also make use of the 32-bit xor, which may have a smaller
298 // encoding and avoid partial register updates.
299 let AddedComplexity = 10 in {
300 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
301 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
302 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>;
305 let Predicates = [OptForSize, Not64BitMode],
306 AddedComplexity = 10 in {
307 let SchedRW = [WriteALU] in {
308 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
309 // which only require 3 bytes compared to MOV32ri which requires 5.
310 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
311 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
312 [(set GR32:$dst, 1)]>;
313 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
314 [(set GR32:$dst, -1)]>;
318 // MOV16ri is 4 bytes, so the instructions above are smaller.
319 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>;
320 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
323 let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5,
324 SchedRW = [WriteALU] in {
325 // AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
326 def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
327 [(set GR32:$dst, i32immSExt8:$src)]>,
328 Requires<[OptForMinSize, NotWin64WithoutFP]>;
329 def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
330 [(set GR64:$dst, i64immSExt8:$src)]>,
331 Requires<[OptForMinSize, NotWin64WithoutFP]>;
334 // Materialize i64 constant where top 32-bits are zero. This could theoretically
335 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
336 // that would make it more difficult to rematerialize.
337 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
338 isPseudo = 1, SchedRW = [WriteMove] in
339 def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "",
340 [(set GR64:$dst, i64immZExt32:$src)]>;
342 // This 64-bit pseudo-move can also be used for labels in the x86-64 small code
344 def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [X86Wrapper]>;
345 def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>;
347 // Use sbb to materialize carry bit.
348 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteADC],
349 hasSideEffects = 0 in {
350 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
351 // However, Pat<> can't replicate the destination reg into the inputs of the
353 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", []>;
354 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", []>;
357 //===----------------------------------------------------------------------===//
358 // String Pseudo Instructions
360 let SchedRW = [WriteMicrocoded] in {
361 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
362 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins),
363 "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}",
364 [(X86rep_movs i8)]>, REP, AdSize32,
366 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins),
367 "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}",
368 [(X86rep_movs i16)]>, REP, AdSize32, OpSize16,
370 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins),
371 "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}",
372 [(X86rep_movs i32)]>, REP, AdSize32, OpSize32,
374 def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins),
375 "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}",
376 [(X86rep_movs i64)]>, REP, AdSize32,
377 Requires<[NotLP64, In64BitMode]>;
380 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
381 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins),
382 "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}",
383 [(X86rep_movs i8)]>, REP, AdSize64,
385 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins),
386 "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}",
387 [(X86rep_movs i16)]>, REP, AdSize64, OpSize16,
389 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins),
390 "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}",
391 [(X86rep_movs i32)]>, REP, AdSize64, OpSize32,
393 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins),
394 "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}",
395 [(X86rep_movs i64)]>, REP, AdSize64,
399 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
400 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
401 let Uses = [AL,ECX,EDI] in
402 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins),
403 "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}",
404 [(X86rep_stos i8)]>, REP, AdSize32,
406 let Uses = [AX,ECX,EDI] in
407 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins),
408 "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}",
409 [(X86rep_stos i16)]>, REP, AdSize32, OpSize16,
411 let Uses = [EAX,ECX,EDI] in
412 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins),
413 "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}",
414 [(X86rep_stos i32)]>, REP, AdSize32, OpSize32,
416 let Uses = [RAX,RCX,RDI] in
417 def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins),
418 "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}",
419 [(X86rep_stos i64)]>, REP, AdSize32,
420 Requires<[NotLP64, In64BitMode]>;
423 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
424 let Uses = [AL,RCX,RDI] in
425 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins),
426 "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}",
427 [(X86rep_stos i8)]>, REP, AdSize64,
429 let Uses = [AX,RCX,RDI] in
430 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins),
431 "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}",
432 [(X86rep_stos i16)]>, REP, AdSize64, OpSize16,
434 let Uses = [RAX,RCX,RDI] in
435 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins),
436 "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}",
437 [(X86rep_stos i32)]>, REP, AdSize64, OpSize32,
440 let Uses = [RAX,RCX,RDI] in
441 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins),
442 "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}",
443 [(X86rep_stos i64)]>, REP, AdSize64,
448 //===----------------------------------------------------------------------===//
449 // Thread Local Storage Instructions
451 let SchedRW = [WriteSystem] in {
454 // All calls clobber the non-callee saved registers. ESP is marked as
455 // a use to prevent stack-pointer assignments that appear immediately
456 // before calls from potentially appearing dead.
457 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
458 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
459 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
460 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
461 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
462 usesCustomInserter = 1, Uses = [ESP, SSP] in {
463 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
465 [(X86tlsaddr tls32addr:$sym)]>,
466 Requires<[Not64BitMode]>;
467 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
469 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
470 Requires<[Not64BitMode]>;
473 // All calls clobber the non-callee saved registers. RSP is marked as
474 // a use to prevent stack-pointer assignments that appear immediately
475 // before calls from potentially appearing dead.
476 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
477 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
478 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
479 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
480 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
481 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
482 usesCustomInserter = 1, Uses = [RSP, SSP] in {
483 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
485 [(X86tlsaddr tls64addr:$sym)]>,
486 Requires<[In64BitMode, IsLP64]>;
487 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
489 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
490 Requires<[In64BitMode, IsLP64]>;
491 def TLS_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
493 [(X86tlsaddr tls32addr:$sym)]>,
494 Requires<[In64BitMode, NotLP64]>;
495 def TLS_base_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
496 "# TLS_base_addrX32",
497 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
498 Requires<[In64BitMode, NotLP64]>;
501 // Darwin TLS Support
502 // For i386, the address of the thunk is passed on the stack, on return the
503 // address of the variable is in %eax. %ecx is trashed during the function
504 // call. All other registers are preserved.
505 let Defs = [EAX, ECX, EFLAGS, DF],
507 usesCustomInserter = 1 in
508 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
510 [(X86TLSCall addr:$sym)]>,
511 Requires<[Not64BitMode]>;
513 // For x86_64, the address of the thunk is passed in %rdi, but the
514 // pseudo directly use the symbol, so do not add an implicit use of
515 // %rdi. The lowering will do the right thing with RDI.
516 // On return the address of the variable is in %rax. All other
517 // registers are preserved.
518 let Defs = [RAX, EFLAGS, DF],
520 usesCustomInserter = 1 in
521 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
523 [(X86TLSCall addr:$sym)]>,
524 Requires<[In64BitMode]>;
527 //===----------------------------------------------------------------------===//
528 // Conditional Move Pseudo Instructions
530 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
531 // instruction selection into a branch sequence.
532 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
533 def CMOV#NAME : I<0, Pseudo,
534 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
535 "#CMOV_"#NAME#" PSEUDO!",
536 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond,
540 let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
541 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
542 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
543 // however that requires promoting the operands, and can induce additional
544 // i8 register pressure.
545 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
547 let Predicates = [NoCMOV] in {
548 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
549 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
550 } // Predicates = [NoCMOV]
552 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
554 let Predicates = [FPStackf32] in
555 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
557 let Predicates = [FPStackf64] in
558 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
560 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
562 let Predicates = [HasMMX] in
563 defm _VR64 : CMOVrr_PSEUDO<VR64, x86mmx>;
565 let Predicates = [HasSSE1,NoAVX512] in
566 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
567 let Predicates = [HasSSE2,NoAVX512] in {
568 defm _FR16 : CMOVrr_PSEUDO<FR16, f16>;
569 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
571 let Predicates = [HasAVX512] in {
572 defm _FR16X : CMOVrr_PSEUDO<FR16X, f16>;
573 defm _FR32X : CMOVrr_PSEUDO<FR32X, f32>;
574 defm _FR64X : CMOVrr_PSEUDO<FR64X, f64>;
576 let Predicates = [NoVLX] in {
577 defm _VR128 : CMOVrr_PSEUDO<VR128, v2i64>;
578 defm _VR256 : CMOVrr_PSEUDO<VR256, v4i64>;
580 let Predicates = [HasVLX] in {
581 defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>;
582 defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>;
584 defm _VR512 : CMOVrr_PSEUDO<VR512, v8i64>;
585 defm _VK1 : CMOVrr_PSEUDO<VK1, v1i1>;
586 defm _VK2 : CMOVrr_PSEUDO<VK2, v2i1>;
587 defm _VK4 : CMOVrr_PSEUDO<VK4, v4i1>;
588 defm _VK8 : CMOVrr_PSEUDO<VK8, v8i1>;
589 defm _VK16 : CMOVrr_PSEUDO<VK16, v16i1>;
590 defm _VK32 : CMOVrr_PSEUDO<VK32, v32i1>;
591 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>;
592 } // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS]
594 def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
595 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
597 let Predicates = [NoVLX] in {
598 def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
599 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
600 def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
601 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
602 def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
603 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
604 def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
605 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
606 def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
607 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
609 def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
610 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
611 def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
612 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
613 def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
614 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
615 def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
616 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
617 def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
618 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
620 let Predicates = [HasVLX] in {
621 def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
622 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
623 def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
624 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
625 def : Pat<(v8f16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
626 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
627 def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
628 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
629 def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
630 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
631 def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
632 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
634 def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
635 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
636 def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
637 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
638 def : Pat<(v16f16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
639 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
640 def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
641 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
642 def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
643 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
644 def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
645 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
648 def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
649 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
650 def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
651 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
652 def : Pat<(v32f16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
653 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
654 def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
655 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
656 def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
657 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
658 def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
659 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
661 //===----------------------------------------------------------------------===//
662 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
663 //===----------------------------------------------------------------------===//
665 // FIXME: Use normal instructions and add lock prefix dynamically.
669 let isCodeGenOnly = 1, Defs = [EFLAGS] in
670 def OR32mi8Locked : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero),
671 "or{l}\t{$zero, $dst|$dst, $zero}", []>,
672 Requires<[Not64BitMode]>, OpSize32, LOCK,
673 Sched<[WriteALURMW]>;
675 let hasSideEffects = 1, isMeta = 1 in
676 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
678 [(X86MemBarrier)]>, Sched<[WriteLoad]>;
680 // RegOpc corresponds to the mr version of the instruction
681 // ImmOpc corresponds to the mi version of the instruction
682 // ImmOpc8 corresponds to the mi8 version of the instruction
683 // ImmMod corresponds to the instruction format of the mi and mi8 versions
684 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
685 Format ImmMod, SDNode Op, string mnemonic> {
686 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
687 SchedRW = [WriteALURMW] in {
689 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
690 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
691 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
692 !strconcat(mnemonic, "{b}\t",
693 "{$src2, $dst|$dst, $src2}"),
694 [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK;
696 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
697 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
698 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
699 !strconcat(mnemonic, "{w}\t",
700 "{$src2, $dst|$dst, $src2}"),
701 [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>,
704 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
705 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
706 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
707 !strconcat(mnemonic, "{l}\t",
708 "{$src2, $dst|$dst, $src2}"),
709 [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>,
712 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
713 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
714 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
715 !strconcat(mnemonic, "{q}\t",
716 "{$src2, $dst|$dst, $src2}"),
717 [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK;
719 // NOTE: These are order specific, we want the mi8 forms to be listed
720 // first so that they are slightly preferred to the mi forms.
721 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
722 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
723 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
724 !strconcat(mnemonic, "{w}\t",
725 "{$src2, $dst|$dst, $src2}"),
726 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>,
729 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
730 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
731 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
732 !strconcat(mnemonic, "{l}\t",
733 "{$src2, $dst|$dst, $src2}"),
734 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>,
737 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
738 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
739 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
740 !strconcat(mnemonic, "{q}\t",
741 "{$src2, $dst|$dst, $src2}"),
742 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>,
745 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
746 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
747 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
748 !strconcat(mnemonic, "{b}\t",
749 "{$src2, $dst|$dst, $src2}"),
750 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK;
752 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
753 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
754 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
755 !strconcat(mnemonic, "{w}\t",
756 "{$src2, $dst|$dst, $src2}"),
757 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>,
760 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
761 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
762 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
763 !strconcat(mnemonic, "{l}\t",
764 "{$src2, $dst|$dst, $src2}"),
765 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>,
768 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
769 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
770 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
771 !strconcat(mnemonic, "{q}\t",
772 "{$src2, $dst|$dst, $src2}"),
773 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>,
779 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
780 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
781 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
782 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
783 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
785 def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs),
786 (X86lock_add node:$lhs, node:$rhs), [{
787 return hasNoCarryFlagUses(SDValue(N, 0));
790 def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs),
791 (X86lock_sub node:$lhs, node:$rhs), [{
792 return hasNoCarryFlagUses(SDValue(N, 0));
795 let Predicates = [UseIncDec] in {
796 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
797 SchedRW = [WriteALURMW] in {
798 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
800 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>,
802 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
804 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>,
806 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
808 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>,
810 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
812 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>,
815 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
817 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>,
819 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
821 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>,
823 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
825 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>,
827 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
829 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>,
833 // Additional patterns for -1 constant.
834 def : Pat<(X86lock_add addr:$dst, (i8 -1)), (LOCK_DEC8m addr:$dst)>;
835 def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>;
836 def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>;
837 def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>;
838 def : Pat<(X86lock_sub addr:$dst, (i8 -1)), (LOCK_INC8m addr:$dst)>;
839 def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>;
840 def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>;
841 def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>;
845 def X86LBTest : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
846 SDTCisVT<2, i8>, SDTCisVT<3, i32>]>;
847 def x86bts : SDNode<"X86ISD::LBTS", X86LBTest,
848 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
849 def x86btc : SDNode<"X86ISD::LBTC", X86LBTest,
850 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
851 def x86btr : SDNode<"X86ISD::LBTR", X86LBTest,
852 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
854 multiclass ATOMIC_LOGIC_OP<Format Form, string s> {
855 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
856 SchedRW = [WriteBitTestSetRegRMW] in {
857 def 16m : Ii8<0xBA, Form, (outs), (ins i16mem:$src1, i8imm:$src2),
858 !strconcat(s, "{w}\t{$src2, $src1|$src1, $src2}"),
859 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 16)))]>,
861 def 32m : Ii8<0xBA, Form, (outs), (ins i32mem:$src1, i8imm:$src2),
862 !strconcat(s, "{l}\t{$src2, $src1|$src1, $src2}"),
863 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 32)))]>,
865 def 64m : RIi8<0xBA, Form, (outs), (ins i64mem:$src1, i8imm:$src2),
866 !strconcat(s, "{q}\t{$src2, $src1|$src1, $src2}"),
867 [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 64)))]>,
872 defm LOCK_BTS : ATOMIC_LOGIC_OP<MRM5m, "bts">;
873 defm LOCK_BTC : ATOMIC_LOGIC_OP<MRM7m, "btc">;
874 defm LOCK_BTR : ATOMIC_LOGIC_OP<MRM6m, "btr">;
876 // Atomic compare and swap.
877 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
878 string mnemonic, SDPatternOperator frag> {
879 let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in {
880 let Defs = [AL, EFLAGS], Uses = [AL] in
881 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
882 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
883 [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
884 let Defs = [AX, EFLAGS], Uses = [AX] in
885 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
886 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
887 [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK;
888 let Defs = [EAX, EFLAGS], Uses = [EAX] in
889 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
890 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
891 [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK;
892 let Defs = [RAX, EFLAGS], Uses = [RAX] in
893 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
894 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
895 [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
899 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
900 Predicates = [HasCX8], SchedRW = [WriteCMPXCHGRMW],
901 isCodeGenOnly = 1, usesCustomInserter = 1 in {
902 def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
904 [(X86cas8 addr:$ptr)]>, TB, LOCK;
907 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
908 Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
909 isCodeGenOnly = 1, mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
910 def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
915 // This pseudo must be used when the frame uses RBX as
916 // the base pointer. Indeed, in such situation RBX is a reserved
917 // register and the register allocator will ignore any use/def of
918 // it. In other words, the register will not fix the clobbering of
919 // RBX that will happen when setting the arguments for the instrucion.
921 // Unlike the actual related instruction, we mark that this one
922 // defines RBX (instead of using RBX).
923 // The rationale is that we will define RBX during the expansion of
924 // the pseudo. The argument feeding RBX is rbx_input.
926 // The additional argument, $rbx_save, is a temporary register used to
927 // save the value of RBX across the actual instruction.
929 // To make sure the register assigned to $rbx_save does not interfere with
930 // the definition of the actual instruction, we use a definition $dst which
931 // is tied to $rbx_save. That way, the live-range of $rbx_save spans across
932 // the instruction and we are sure we will have a valid register to restore
934 let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX],
935 Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
936 isCodeGenOnly = 1, isPseudo = 1,
937 mayLoad = 1, mayStore = 1, hasSideEffects = 0,
938 Constraints = "$rbx_save = $dst" in {
939 def LCMPXCHG16B_SAVE_RBX :
940 I<0, Pseudo, (outs GR64:$dst),
941 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save), "", []>;
944 // Pseudo instruction that doesn't read/write RBX. Will be turned into either
945 // LCMPXCHG16B_SAVE_RBX or LCMPXCHG16B via a custom inserter.
946 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RCX, RDX],
947 Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
948 isCodeGenOnly = 1, isPseudo = 1,
949 mayLoad = 1, mayStore = 1, hasSideEffects = 0,
950 usesCustomInserter = 1 in {
951 def LCMPXCHG16B_NO_RBX :
952 I<0, Pseudo, (outs), (ins i128mem:$ptr, GR64:$rbx_input), "",
953 [(X86cas16 addr:$ptr, GR64:$rbx_input)]>;
956 // This pseudo must be used when the frame uses RBX/EBX as
958 // cf comment for LCMPXCHG16B_SAVE_RBX.
959 let Defs = [EBX], Uses = [ECX, EAX],
960 Predicates = [HasMWAITX], SchedRW = [WriteSystem],
961 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst" in {
962 def MWAITX_SAVE_RBX :
963 I<0, Pseudo, (outs GR64:$dst),
964 (ins GR32:$ebx_input, GR64:$rbx_save),
969 // Pseudo mwaitx instruction to use for custom insertion.
970 let Predicates = [HasMWAITX], SchedRW = [WriteSystem],
971 isCodeGenOnly = 1, isPseudo = 1,
972 usesCustomInserter = 1 in {
974 I<0, Pseudo, (outs), (ins GR32:$ecx, GR32:$eax, GR32:$ebx),
976 [(int_x86_mwaitx GR32:$ecx, GR32:$eax, GR32:$ebx)]>;
980 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>;
982 // Atomic exchange and add
983 multiclass ATOMIC_RMW_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
985 let Constraints = "$val = $dst", Defs = [EFLAGS], mayLoad = 1, mayStore = 1,
986 isCodeGenOnly = 1, SchedRW = [WriteALURMW] in {
987 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
988 (ins GR8:$val, i8mem:$ptr),
989 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
991 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
992 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
993 (ins GR16:$val, i16mem:$ptr),
994 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
997 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
999 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
1000 (ins GR32:$val, i32mem:$ptr),
1001 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
1004 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
1006 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
1007 (ins GR64:$val, i64mem:$ptr),
1008 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
1011 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
1015 defm LXADD : ATOMIC_RMW_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK;
1017 /* The following multiclass tries to make sure that in code like
1018 * x.store (immediate op x.load(acquire), release)
1020 * x.store (register op x.load(acquire), release)
1021 * an operation directly on memory is generated instead of wasting a register.
1022 * It is not automatic as atomic_store/load are only lowered to MOV instructions
1023 * extremely late to prevent them from being accidentally reordered in the backend
1024 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
1026 multiclass RELEASE_BINOP_MI<string Name, SDNode op> {
1027 def : Pat<(atomic_store_8 addr:$dst,
1028 (op (atomic_load_8 addr:$dst), (i8 imm:$src))),
1029 (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>;
1030 def : Pat<(atomic_store_16 addr:$dst,
1031 (op (atomic_load_16 addr:$dst), (i16 imm:$src))),
1032 (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>;
1033 def : Pat<(atomic_store_32 addr:$dst,
1034 (op (atomic_load_32 addr:$dst), (i32 imm:$src))),
1035 (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>;
1036 def : Pat<(atomic_store_64 addr:$dst,
1037 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src))),
1038 (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>;
1040 def : Pat<(atomic_store_8 addr:$dst,
1041 (op (atomic_load_8 addr:$dst), (i8 GR8:$src))),
1042 (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>;
1043 def : Pat<(atomic_store_16 addr:$dst,
1044 (op (atomic_load_16 addr:$dst), (i16 GR16:$src))),
1045 (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>;
1046 def : Pat<(atomic_store_32 addr:$dst,
1047 (op (atomic_load_32 addr:$dst), (i32 GR32:$src))),
1048 (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>;
1049 def : Pat<(atomic_store_64 addr:$dst,
1050 (op (atomic_load_64 addr:$dst), (i64 GR64:$src))),
1051 (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>;
1053 defm : RELEASE_BINOP_MI<"ADD", add>;
1054 defm : RELEASE_BINOP_MI<"AND", and>;
1055 defm : RELEASE_BINOP_MI<"OR", or>;
1056 defm : RELEASE_BINOP_MI<"XOR", xor>;
1057 defm : RELEASE_BINOP_MI<"SUB", sub>;
1059 // Atomic load + floating point patterns.
1060 // FIXME: This could also handle SIMD operations with *ps and *pd instructions.
1061 multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> {
1062 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1063 (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>,
1064 Requires<[UseSSE1]>;
1065 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1066 (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>,
1068 def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1069 (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>,
1070 Requires<[HasAVX512]>;
1072 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1073 (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>,
1074 Requires<[UseSSE1]>;
1075 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1076 (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>,
1078 def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1079 (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>,
1080 Requires<[HasAVX512]>;
1082 defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>;
1083 // FIXME: Add fsub, fmul, fdiv, ...
1085 multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32,
1087 def : Pat<(atomic_store_8 addr:$dst, dag8),
1088 (!cast<Instruction>(Name#8m) addr:$dst)>;
1089 def : Pat<(atomic_store_16 addr:$dst, dag16),
1090 (!cast<Instruction>(Name#16m) addr:$dst)>;
1091 def : Pat<(atomic_store_32 addr:$dst, dag32),
1092 (!cast<Instruction>(Name#32m) addr:$dst)>;
1093 def : Pat<(atomic_store_64 addr:$dst, dag64),
1094 (!cast<Instruction>(Name#64m) addr:$dst)>;
1097 let Predicates = [UseIncDec] in {
1098 defm : RELEASE_UNOP<"INC",
1099 (add (atomic_load_8 addr:$dst), (i8 1)),
1100 (add (atomic_load_16 addr:$dst), (i16 1)),
1101 (add (atomic_load_32 addr:$dst), (i32 1)),
1102 (add (atomic_load_64 addr:$dst), (i64 1))>;
1103 defm : RELEASE_UNOP<"DEC",
1104 (add (atomic_load_8 addr:$dst), (i8 -1)),
1105 (add (atomic_load_16 addr:$dst), (i16 -1)),
1106 (add (atomic_load_32 addr:$dst), (i32 -1)),
1107 (add (atomic_load_64 addr:$dst), (i64 -1))>;
1110 defm : RELEASE_UNOP<"NEG",
1111 (ineg (i8 (atomic_load_8 addr:$dst))),
1112 (ineg (i16 (atomic_load_16 addr:$dst))),
1113 (ineg (i32 (atomic_load_32 addr:$dst))),
1114 (ineg (i64 (atomic_load_64 addr:$dst)))>;
1115 defm : RELEASE_UNOP<"NOT",
1116 (not (i8 (atomic_load_8 addr:$dst))),
1117 (not (i16 (atomic_load_16 addr:$dst))),
1118 (not (i32 (atomic_load_32 addr:$dst))),
1119 (not (i64 (atomic_load_64 addr:$dst)))>;
1121 def : Pat<(atomic_store_8 addr:$dst, (i8 imm:$src)),
1122 (MOV8mi addr:$dst, imm:$src)>;
1123 def : Pat<(atomic_store_16 addr:$dst, (i16 imm:$src)),
1124 (MOV16mi addr:$dst, imm:$src)>;
1125 def : Pat<(atomic_store_32 addr:$dst, (i32 imm:$src)),
1126 (MOV32mi addr:$dst, imm:$src)>;
1127 def : Pat<(atomic_store_64 addr:$dst, (i64immSExt32:$src)),
1128 (MOV64mi32 addr:$dst, i64immSExt32:$src)>;
1130 def : Pat<(atomic_store_8 addr:$dst, GR8:$src),
1131 (MOV8mr addr:$dst, GR8:$src)>;
1132 def : Pat<(atomic_store_16 addr:$dst, GR16:$src),
1133 (MOV16mr addr:$dst, GR16:$src)>;
1134 def : Pat<(atomic_store_32 addr:$dst, GR32:$src),
1135 (MOV32mr addr:$dst, GR32:$src)>;
1136 def : Pat<(atomic_store_64 addr:$dst, GR64:$src),
1137 (MOV64mr addr:$dst, GR64:$src)>;
1139 def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>;
1140 def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>;
1141 def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>;
1142 def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>;
1144 // Floating point loads/stores.
1145 def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))),
1146 (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>;
1147 def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))),
1148 (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>;
1149 def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))),
1150 (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>;
1152 def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))),
1153 (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>;
1154 def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))),
1155 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>;
1156 def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))),
1157 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>;
1159 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1160 (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>;
1161 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1162 (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>;
1163 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1164 (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1166 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1167 (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>;
1168 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1169 (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>;
1170 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1171 (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1173 //===----------------------------------------------------------------------===//
1174 // DAG Pattern Matching Rules
1175 //===----------------------------------------------------------------------===//
1177 // Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
1178 // binary size compared to a regular MOV, but it introduces an unnecessary
1179 // load, so is not suitable for regular or optsize functions.
1180 let Predicates = [OptForMinSize] in {
1181 def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
1182 def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
1183 def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
1184 def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
1185 def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
1186 def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
1189 // In kernel code model, we can get the address of a label
1190 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1191 // the MOV64ri32 should accept these.
1192 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1193 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1194 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1195 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1196 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1197 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1198 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1199 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1200 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
1201 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
1202 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1203 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1205 // If we have small model and -static mode, it is safe to store global addresses
1206 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1207 // for MOV64mi32 should handle this sort of thing.
1208 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1209 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1210 Requires<[NearData, IsNotPIC]>;
1211 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1212 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1213 Requires<[NearData, IsNotPIC]>;
1214 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1215 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1216 Requires<[NearData, IsNotPIC]>;
1217 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1218 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1219 Requires<[NearData, IsNotPIC]>;
1220 def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
1221 (MOV64mi32 addr:$dst, mcsym:$src)>,
1222 Requires<[NearData, IsNotPIC]>;
1223 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1224 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1225 Requires<[NearData, IsNotPIC]>;
1227 def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
1228 def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
1232 // tls has some funny stuff here...
1233 // This corresponds to movabs $foo@tpoff, %rax
1234 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1235 (MOV64ri32 tglobaltlsaddr :$dst)>;
1236 // This corresponds to add $foo@tpoff, %rax
1237 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1238 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1241 // Direct PC relative function call for small code model. 32-bit displacement
1242 // sign extended to 64-bit.
1243 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1244 (CALL64pcrel32 tglobaladdr:$dst)>;
1245 def : Pat<(X86call (i64 texternalsym:$dst)),
1246 (CALL64pcrel32 texternalsym:$dst)>;
1248 def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 texternalsym:$dst)),
1249 (CALL64pcrel32_RVMARKER tglobaladdr:$rvfunc, texternalsym:$dst)>;
1250 def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 tglobaladdr:$dst)),
1251 (CALL64pcrel32_RVMARKER tglobaladdr:$rvfunc, tglobaladdr:$dst)>;
1254 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1255 // can never use callee-saved registers. That is the purpose of the GR64_TC
1256 // register classes.
1258 // The only volatile register that is never used by the calling convention is
1259 // %r11. This happens when calling a vararg function with 6 arguments.
1261 // Match an X86tcret that uses less than 7 volatile registers.
1262 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1263 (X86tcret node:$ptr, node:$off), [{
1264 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1265 unsigned NumRegs = 0;
1266 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1267 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1272 def X86tcret_1reg : PatFrag<(ops node:$ptr, node:$off),
1273 (X86tcret node:$ptr, node:$off), [{
1274 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1275 unsigned NumRegs = 1;
1276 const SDValue& BasePtr = cast<LoadSDNode>(N->getOperand(1))->getBasePtr();
1277 if (isa<FrameIndexSDNode>(BasePtr))
1279 else if (BasePtr->getNumOperands() && isa<GlobalAddressSDNode>(BasePtr->getOperand(0)))
1281 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1282 if (isa<RegisterSDNode>(N->getOperand(i)) && ( NumRegs-- == 0))
1287 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1288 (TCRETURNri ptr_rc_tailcall:$dst, timm:$off)>,
1289 Requires<[Not64BitMode, NotUseIndirectThunkCalls]>;
1291 // FIXME: This is disabled for 32-bit PIC mode because the global base
1292 // register which is part of the address mode may be assigned a
1293 // callee-saved register.
1294 // Similar to X86tcret_6regs, here we only have 1 register left
1295 def : Pat<(X86tcret_1reg (load addr:$dst), timm:$off),
1296 (TCRETURNmi addr:$dst, timm:$off)>,
1297 Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>;
1299 def : Pat<(X86tcret (i32 tglobaladdr:$dst), timm:$off),
1300 (TCRETURNdi tglobaladdr:$dst, timm:$off)>,
1301 Requires<[NotLP64]>;
1303 def : Pat<(X86tcret (i32 texternalsym:$dst), timm:$off),
1304 (TCRETURNdi texternalsym:$dst, timm:$off)>,
1305 Requires<[NotLP64]>;
1307 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1308 (TCRETURNri64 ptr_rc_tailcall:$dst, timm:$off)>,
1309 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1311 // Don't fold loads into X86tcret requiring more than 6 regs.
1312 // There wouldn't be enough scratch registers for base+index.
1313 def : Pat<(X86tcret_6regs (load addr:$dst), timm:$off),
1314 (TCRETURNmi64 addr:$dst, timm:$off)>,
1315 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1317 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1318 (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, timm:$off)>,
1319 Requires<[In64BitMode, UseIndirectThunkCalls]>;
1321 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1322 (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, timm:$off)>,
1323 Requires<[Not64BitMode, UseIndirectThunkCalls]>;
1325 def : Pat<(X86tcret (i64 tglobaladdr:$dst), timm:$off),
1326 (TCRETURNdi64 tglobaladdr:$dst, timm:$off)>,
1329 def : Pat<(X86tcret (i64 texternalsym:$dst), timm:$off),
1330 (TCRETURNdi64 texternalsym:$dst, timm:$off)>,
1333 // Normal calls, with various flavors of addresses.
1334 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1335 (CALLpcrel32 tglobaladdr:$dst)>;
1336 def : Pat<(X86call (i32 texternalsym:$dst)),
1337 (CALLpcrel32 texternalsym:$dst)>;
1338 def : Pat<(X86call (i32 imm:$dst)),
1339 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1343 // TEST R,R is smaller than CMP R,0
1344 def : Pat<(X86cmp GR8:$src1, 0),
1345 (TEST8rr GR8:$src1, GR8:$src1)>;
1346 def : Pat<(X86cmp GR16:$src1, 0),
1347 (TEST16rr GR16:$src1, GR16:$src1)>;
1348 def : Pat<(X86cmp GR32:$src1, 0),
1349 (TEST32rr GR32:$src1, GR32:$src1)>;
1350 def : Pat<(X86cmp GR64:$src1, 0),
1351 (TEST64rr GR64:$src1, GR64:$src1)>;
1353 // zextload bool -> zextload byte
1354 // i1 stored in one byte in zero-extended form.
1355 // Upper bits cleanup should be executed before Store.
1356 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1357 def : Pat<(zextloadi16i1 addr:$src),
1358 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1359 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1360 def : Pat<(zextloadi64i1 addr:$src),
1361 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1363 // extload bool -> extload byte
1364 // When extloading from 16-bit and smaller memory locations into 64-bit
1365 // registers, use zero-extending loads so that the entire 64-bit register is
1366 // defined, avoiding partial-register updates.
1368 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1369 def : Pat<(extloadi16i1 addr:$src),
1370 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1371 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1372 def : Pat<(extloadi16i8 addr:$src),
1373 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1374 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1375 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1377 // For other extloads, use subregs, since the high contents of the register are
1378 // defined after an extload.
1379 // NOTE: The extloadi64i32 pattern needs to be first as it will try to form
1380 // 32-bit loads for 4 byte aligned i8/i16 loads.
1381 def : Pat<(extloadi64i32 addr:$src),
1382 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1383 def : Pat<(extloadi64i1 addr:$src),
1384 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1385 def : Pat<(extloadi64i8 addr:$src),
1386 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1387 def : Pat<(extloadi64i16 addr:$src),
1388 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1390 // anyext. Define these to do an explicit zero-extend to
1391 // avoid partial-register updates.
1392 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1393 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1394 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1396 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1397 def : Pat<(i32 (anyext GR16:$src)),
1398 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1400 def : Pat<(i64 (anyext GR8 :$src)),
1401 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1402 def : Pat<(i64 (anyext GR16:$src)),
1403 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1404 def : Pat<(i64 (anyext GR32:$src)),
1405 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;
1407 // If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX
1408 // instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move
1409 // %ah to the lower byte of a register. By using a MOVSX here we allow a
1410 // post-isel peephole to merge the two MOVSX instructions into one.
1411 def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{
1412 return (N->getOperand(0).getOpcode() == ISD::SDIVREM &&
1413 N->getOperand(0).getResNo() == 1);
1415 def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>;
1417 // Any instruction that defines a 32-bit result leaves the high half of the
1418 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1419 // be copying from a truncate. AssertSext/AssertZext/AssertAlign aren't saying
1420 // anything about the upper 32 bits, they're probably just qualifying a
1421 // CopyFromReg. FREEZE may be coming from a a truncate. Any other 32-bit
1422 // operation will zero-extend up to 64 bits.
1423 def def32 : PatLeaf<(i32 GR32:$src), [{
1424 return N->getOpcode() != ISD::TRUNCATE &&
1425 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1426 N->getOpcode() != ISD::CopyFromReg &&
1427 N->getOpcode() != ISD::AssertSext &&
1428 N->getOpcode() != ISD::AssertZext &&
1429 N->getOpcode() != ISD::AssertAlign &&
1430 N->getOpcode() != ISD::FREEZE;
1433 // In the case of a 32-bit def that is known to implicitly zero-extend,
1434 // we can use a SUBREG_TO_REG.
1435 def : Pat<(i64 (zext def32:$src)),
1436 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1437 def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)),
1438 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1440 //===----------------------------------------------------------------------===//
1441 // Pattern match OR as ADD
1442 //===----------------------------------------------------------------------===//
1444 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1445 // 3-addressified into an LEA instruction to avoid copies. However, we also
1446 // want to finally emit these instructions as an or at the end of the code
1447 // generator to make the generated code easier to read. To do this, we select
1448 // into "disjoint bits" pseudo ops.
1450 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1451 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1452 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1453 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1455 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
1456 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
1457 return (~Known0.Zero & ~Known1.Zero) == 0;
1461 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1462 // Try this before the selecting to OR.
1463 let SchedRW = [WriteALU] in {
1465 let isConvertibleToThreeAddress = 1, isPseudo = 1,
1466 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1467 let isCommutable = 1 in {
1468 def ADD8rr_DB : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
1469 "", // orb/addb REG, REG
1470 [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>;
1471 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1472 "", // orw/addw REG, REG
1473 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1474 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1475 "", // orl/addl REG, REG
1476 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1477 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1478 "", // orq/addq REG, REG
1479 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1482 // NOTE: These are order specific, we want the ri8 forms to be listed
1483 // first so that they are slightly preferred to the ri forms.
1485 def ADD8ri_DB : I<0, Pseudo,
1486 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1487 "", // orb/addb REG, imm8
1488 [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>;
1489 def ADD16ri8_DB : I<0, Pseudo,
1490 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1491 "", // orw/addw REG, imm8
1492 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1493 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1494 "", // orw/addw REG, imm
1495 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1497 def ADD32ri8_DB : I<0, Pseudo,
1498 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1499 "", // orl/addl REG, imm8
1500 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1501 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1502 "", // orl/addl REG, imm
1503 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1506 def ADD64ri8_DB : I<0, Pseudo,
1507 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1508 "", // orq/addq REG, imm8
1509 [(set GR64:$dst, (or_is_add GR64:$src1,
1510 i64immSExt8:$src2))]>;
1511 def ADD64ri32_DB : I<0, Pseudo,
1512 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1513 "", // orq/addq REG, imm
1514 [(set GR64:$dst, (or_is_add GR64:$src1,
1515 i64immSExt32:$src2))]>;
1517 } // AddedComplexity, SchedRW
1519 //===----------------------------------------------------------------------===//
1520 // Pattern match XOR as ADD
1521 //===----------------------------------------------------------------------===//
1523 // Prefer to pattern match XOR with min_signed_value as ADD at isel time.
1524 // ADD can be 3-addressified into an LEA instruction to avoid copies.
1525 let AddedComplexity = 5 in {
1526 def : Pat<(xor GR8:$src1, -128),
1527 (ADD8ri GR8:$src1, -128)>;
1528 def : Pat<(xor GR16:$src1, -32768),
1529 (ADD16ri GR16:$src1, -32768)>;
1530 def : Pat<(xor GR32:$src1, -2147483648),
1531 (ADD32ri GR32:$src1, -2147483648)>;
1534 //===----------------------------------------------------------------------===//
1536 //===----------------------------------------------------------------------===//
1538 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1539 // +128 doesn't, so in this special case use a sub instead of an add.
1540 def : Pat<(add GR16:$src1, 128),
1541 (SUB16ri8 GR16:$src1, -128)>;
1542 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1543 (SUB16mi8 addr:$dst, -128)>;
1545 def : Pat<(add GR32:$src1, 128),
1546 (SUB32ri8 GR32:$src1, -128)>;
1547 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1548 (SUB32mi8 addr:$dst, -128)>;
1550 def : Pat<(add GR64:$src1, 128),
1551 (SUB64ri8 GR64:$src1, -128)>;
1552 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1553 (SUB64mi8 addr:$dst, -128)>;
1555 def : Pat<(X86add_flag_nocf GR16:$src1, 128),
1556 (SUB16ri8 GR16:$src1, -128)>;
1557 def : Pat<(X86add_flag_nocf GR32:$src1, 128),
1558 (SUB32ri8 GR32:$src1, -128)>;
1559 def : Pat<(X86add_flag_nocf GR64:$src1, 128),
1560 (SUB64ri8 GR64:$src1, -128)>;
1562 // The same trick applies for 32-bit immediate fields in 64-bit
1564 def : Pat<(add GR64:$src1, 0x0000000080000000),
1565 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1566 def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
1567 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1569 def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
1570 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1572 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1573 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1574 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1575 // represented with a sign extension of a 8 bit constant, use that.
1576 // This can also reduce instruction size by eliminating the need for the REX
1579 // AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1580 let AddedComplexity = 1 in {
1581 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1585 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1586 (i32 (GetLo32XForm imm:$imm))),
1589 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1593 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1594 (i32 (GetLo32XForm imm:$imm))),
1596 } // AddedComplexity = 1
1599 // AddedComplexity is needed due to the increased complexity on the
1600 // i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1601 // the MOVZX patterns keeps thems together in DAGIsel tables.
1602 let AddedComplexity = 1 in {
1603 // r & (2^16-1) ==> movz
1604 def : Pat<(and GR32:$src1, 0xffff),
1605 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1606 // r & (2^8-1) ==> movz
1607 def : Pat<(and GR32:$src1, 0xff),
1608 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>;
1609 // r & (2^8-1) ==> movz
1610 def : Pat<(and GR16:$src1, 0xff),
1611 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)),
1614 // r & (2^32-1) ==> movz
1615 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1616 (SUBREG_TO_REG (i64 0),
1617 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1619 // r & (2^16-1) ==> movz
1620 def : Pat<(and GR64:$src, 0xffff),
1621 (SUBREG_TO_REG (i64 0),
1622 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1624 // r & (2^8-1) ==> movz
1625 def : Pat<(and GR64:$src, 0xff),
1626 (SUBREG_TO_REG (i64 0),
1627 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1629 } // AddedComplexity = 1
1632 // Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits.
1634 def BTRXForm : SDNodeXForm<imm, [{
1635 // Transformation function: Find the lowest 0.
1636 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingOnes(), SDLoc(N));
1639 def BTCBTSXForm : SDNodeXForm<imm, [{
1640 // Transformation function: Find the lowest 1.
1641 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingZeros(), SDLoc(N));
1644 def BTRMask64 : ImmLeaf<i64, [{
1645 return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm);
1648 def BTCBTSMask64 : ImmLeaf<i64, [{
1649 return !isInt<32>(Imm) && isPowerOf2_64(Imm);
1652 // For now only do this for optsize.
1653 let AddedComplexity = 1, Predicates=[OptForSize] in {
1654 def : Pat<(and GR64:$src1, BTRMask64:$mask),
1655 (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>;
1656 def : Pat<(or GR64:$src1, BTCBTSMask64:$mask),
1657 (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1658 def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask),
1659 (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1663 // sext_inreg patterns
1664 def : Pat<(sext_inreg GR32:$src, i16),
1665 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1666 def : Pat<(sext_inreg GR32:$src, i8),
1667 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>;
1669 def : Pat<(sext_inreg GR16:$src, i8),
1670 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)),
1673 def : Pat<(sext_inreg GR64:$src, i32),
1674 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1675 def : Pat<(sext_inreg GR64:$src, i16),
1676 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1677 def : Pat<(sext_inreg GR64:$src, i8),
1678 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1680 // sext, sext_load, zext, zext_load
1681 def: Pat<(i16 (sext GR8:$src)),
1682 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1683 def: Pat<(sextloadi16i8 addr:$src),
1684 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1685 def: Pat<(i16 (zext GR8:$src)),
1686 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1687 def: Pat<(zextloadi16i8 addr:$src),
1688 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1691 def : Pat<(i16 (trunc GR32:$src)),
1692 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1693 def : Pat<(i8 (trunc GR32:$src)),
1694 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1696 Requires<[Not64BitMode]>;
1697 def : Pat<(i8 (trunc GR16:$src)),
1698 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1700 Requires<[Not64BitMode]>;
1701 def : Pat<(i32 (trunc GR64:$src)),
1702 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1703 def : Pat<(i16 (trunc GR64:$src)),
1704 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1705 def : Pat<(i8 (trunc GR64:$src)),
1706 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1707 def : Pat<(i8 (trunc GR32:$src)),
1708 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1709 Requires<[In64BitMode]>;
1710 def : Pat<(i8 (trunc GR16:$src)),
1711 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1712 Requires<[In64BitMode]>;
1714 def immff00_ffff : ImmLeaf<i32, [{
1715 return Imm >= 0xff00 && Imm <= 0xffff;
1718 // h-register tricks
1719 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1720 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1721 Requires<[Not64BitMode]>;
1722 def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
1723 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1724 Requires<[Not64BitMode]>;
1725 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1726 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>,
1727 Requires<[Not64BitMode]>;
1728 def : Pat<(srl GR16:$src, (i8 8)),
1730 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1732 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1733 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1734 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1735 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1736 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1737 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1738 def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)),
1739 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1741 // h-register tricks.
1742 // For now, be conservative on x86-64 and use an h-register extract only if the
1743 // value is immediately zero-extended or stored, which are somewhat common
1744 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1745 // from being allocated in the same instruction as the h register, as there's
1746 // currently no way to describe this requirement to the register allocator.
1748 // h-register extract and zero-extend.
1749 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1753 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)),
1755 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1759 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1761 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1765 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1768 // h-register extract and store.
1769 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1772 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>;
1773 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1776 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>,
1777 Requires<[In64BitMode]>;
1778 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1781 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>,
1782 Requires<[In64BitMode]>;
1784 // Special pattern to catch the last step of __builtin_parity handling. Our
1785 // goal is to use an xor of an h-register with the corresponding l-register.
1786 // The above patterns would handle this on non 64-bit targets, but for 64-bit
1787 // we need to be more careful. We're using a NOREX instruction here in case
1788 // register allocation fails to keep the two registers together. So we need to
1789 // make sure we can't accidentally mix R8-R15 with an h-register.
1790 def : Pat<(X86xor_flag (i8 (trunc GR32:$src)),
1791 (i8 (trunc (srl_su GR32:$src, (i8 8))))),
1792 (XOR8rr_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit),
1793 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1795 // (shl x, 1) ==> (add x, x)
1796 // Note that if x is undef (immediate or otherwise), we could theoretically
1797 // end up with the two uses of x getting different values, producing a result
1798 // where the least significant bit is not 0. However, the probability of this
1799 // happening is considered low enough that this is officially not a
1801 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1802 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1803 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1804 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1806 def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1807 return isUnneededShiftMask(N, 3);
1810 def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1811 return isUnneededShiftMask(N, 4);
1814 def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1815 return isUnneededShiftMask(N, 5);
1818 def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1819 return isUnneededShiftMask(N, 6);
1823 // Shift amount is implicitly masked.
1824 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1825 // (shift x (and y, 31)) ==> (shift x, y)
1826 def : Pat<(frag GR8:$src1, (shiftMask32 CL)),
1827 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1828 def : Pat<(frag GR16:$src1, (shiftMask32 CL)),
1829 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1830 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1831 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1832 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst),
1833 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1834 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst),
1835 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1836 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1837 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1839 // (shift x (and y, 63)) ==> (shift x, y)
1840 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1841 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1842 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1843 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1846 defm : MaskedShiftAmountPats<shl, "SHL">;
1847 defm : MaskedShiftAmountPats<srl, "SHR">;
1848 defm : MaskedShiftAmountPats<sra, "SAR">;
1850 // ROL/ROR instructions allow a stronger mask optimization than shift for 8- and
1851 // 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount
1852 // because over-rotating produces the same result. This is noted in the Intel
1853 // docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation
1854 // amount could affect EFLAGS results, but that does not matter because we are
1855 // not tracking flags for these nodes.
1856 multiclass MaskedRotateAmountPats<SDNode frag, string name> {
1857 // (rot x (and y, BitWidth - 1)) ==> (rot x, y)
1858 def : Pat<(frag GR8:$src1, (shiftMask8 CL)),
1859 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1860 def : Pat<(frag GR16:$src1, (shiftMask16 CL)),
1861 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1862 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1863 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1864 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst),
1865 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1866 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst),
1867 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1868 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1869 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1871 // (rot x (and y, 63)) ==> (rot x, y)
1872 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1873 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1874 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1875 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1879 defm : MaskedRotateAmountPats<rotl, "ROL">;
1880 defm : MaskedRotateAmountPats<rotr, "ROR">;
1882 // Double "funnel" shift amount is implicitly masked.
1883 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) (NOTE: modulo32)
1884 def : Pat<(X86fshl GR16:$src1, GR16:$src2, (shiftMask32 CL)),
1885 (SHLD16rrCL GR16:$src1, GR16:$src2)>;
1886 def : Pat<(X86fshr GR16:$src2, GR16:$src1, (shiftMask32 CL)),
1887 (SHRD16rrCL GR16:$src1, GR16:$src2)>;
1889 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y)
1890 def : Pat<(fshl GR32:$src1, GR32:$src2, (shiftMask32 CL)),
1891 (SHLD32rrCL GR32:$src1, GR32:$src2)>;
1892 def : Pat<(fshr GR32:$src2, GR32:$src1, (shiftMask32 CL)),
1893 (SHRD32rrCL GR32:$src1, GR32:$src2)>;
1895 // (fshl/fshr x (and y, 63)) ==> (fshl/fshr x, y)
1896 def : Pat<(fshl GR64:$src1, GR64:$src2, (shiftMask64 CL)),
1897 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1898 def : Pat<(fshr GR64:$src2, GR64:$src1, (shiftMask64 CL)),
1899 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1901 let Predicates = [HasBMI2] in {
1902 let AddedComplexity = 1 in {
1903 def : Pat<(sra GR32:$src1, (shiftMask32 GR8:$src2)),
1904 (SARX32rr GR32:$src1,
1906 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1907 def : Pat<(sra GR64:$src1, (shiftMask64 GR8:$src2)),
1908 (SARX64rr GR64:$src1,
1910 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1912 def : Pat<(srl GR32:$src1, (shiftMask32 GR8:$src2)),
1913 (SHRX32rr GR32:$src1,
1915 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1916 def : Pat<(srl GR64:$src1, (shiftMask64 GR8:$src2)),
1917 (SHRX64rr GR64:$src1,
1919 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1921 def : Pat<(shl GR32:$src1, (shiftMask32 GR8:$src2)),
1922 (SHLX32rr GR32:$src1,
1924 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1925 def : Pat<(shl GR64:$src1, (shiftMask64 GR8:$src2)),
1926 (SHLX64rr GR64:$src1,
1928 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1931 def : Pat<(sra (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1932 (SARX32rm addr:$src1,
1934 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1935 def : Pat<(sra (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1936 (SARX64rm addr:$src1,
1938 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1940 def : Pat<(srl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1941 (SHRX32rm addr:$src1,
1943 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1944 def : Pat<(srl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1945 (SHRX64rm addr:$src1,
1947 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1949 def : Pat<(shl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1950 (SHLX32rm addr:$src1,
1952 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1953 def : Pat<(shl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1954 (SHLX64rm addr:$src1,
1956 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1959 // Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location.
1960 multiclass one_bit_patterns<RegisterClass RC, ValueType VT, Instruction BTR,
1961 Instruction BTS, Instruction BTC,
1962 PatFrag ShiftMask> {
1963 def : Pat<(and RC:$src1, (rotl -2, GR8:$src2)),
1965 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1966 def : Pat<(or RC:$src1, (shl 1, GR8:$src2)),
1968 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1969 def : Pat<(xor RC:$src1, (shl 1, GR8:$src2)),
1971 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1973 // Similar to above, but removing unneeded masking of the shift amount.
1974 def : Pat<(and RC:$src1, (rotl -2, (ShiftMask GR8:$src2))),
1976 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1977 def : Pat<(or RC:$src1, (shl 1, (ShiftMask GR8:$src2))),
1979 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1980 def : Pat<(xor RC:$src1, (shl 1, (ShiftMask GR8:$src2))),
1982 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1985 defm : one_bit_patterns<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>;
1986 defm : one_bit_patterns<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>;
1987 defm : one_bit_patterns<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>;
1989 //===----------------------------------------------------------------------===//
1990 // EFLAGS-defining Patterns
1991 //===----------------------------------------------------------------------===//
1994 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1995 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1996 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1997 def : Pat<(add GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>;
2000 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
2001 (ADD8rm GR8:$src1, addr:$src2)>;
2002 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
2003 (ADD16rm GR16:$src1, addr:$src2)>;
2004 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
2005 (ADD32rm GR32:$src1, addr:$src2)>;
2006 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
2007 (ADD64rm GR64:$src1, addr:$src2)>;
2010 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
2011 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
2012 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
2013 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
2014 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
2015 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
2016 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
2017 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
2018 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
2019 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
2020 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
2023 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
2024 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
2025 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
2026 def : Pat<(sub GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>;
2029 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
2030 (SUB8rm GR8:$src1, addr:$src2)>;
2031 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
2032 (SUB16rm GR16:$src1, addr:$src2)>;
2033 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
2034 (SUB32rm GR32:$src1, addr:$src2)>;
2035 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
2036 (SUB64rm GR64:$src1, addr:$src2)>;
2039 def : Pat<(sub GR8:$src1, imm:$src2),
2040 (SUB8ri GR8:$src1, imm:$src2)>;
2041 def : Pat<(sub GR16:$src1, imm:$src2),
2042 (SUB16ri GR16:$src1, imm:$src2)>;
2043 def : Pat<(sub GR32:$src1, imm:$src2),
2044 (SUB32ri GR32:$src1, imm:$src2)>;
2045 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
2046 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
2047 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
2048 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
2049 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
2050 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2051 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
2052 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2055 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
2056 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
2057 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
2058 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
2061 def : Pat<(mul GR16:$src1, GR16:$src2),
2062 (IMUL16rr GR16:$src1, GR16:$src2)>;
2063 def : Pat<(mul GR32:$src1, GR32:$src2),
2064 (IMUL32rr GR32:$src1, GR32:$src2)>;
2065 def : Pat<(mul GR64:$src1, GR64:$src2),
2066 (IMUL64rr GR64:$src1, GR64:$src2)>;
2069 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
2070 (IMUL16rm GR16:$src1, addr:$src2)>;
2071 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
2072 (IMUL32rm GR32:$src1, addr:$src2)>;
2073 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
2074 (IMUL64rm GR64:$src1, addr:$src2)>;
2077 def : Pat<(mul GR16:$src1, imm:$src2),
2078 (IMUL16rri GR16:$src1, imm:$src2)>;
2079 def : Pat<(mul GR32:$src1, imm:$src2),
2080 (IMUL32rri GR32:$src1, imm:$src2)>;
2081 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
2082 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
2083 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
2084 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
2085 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
2086 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
2087 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
2088 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
2090 // reg = mul mem, imm
2091 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
2092 (IMUL16rmi addr:$src1, imm:$src2)>;
2093 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
2094 (IMUL32rmi addr:$src1, imm:$src2)>;
2095 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
2096 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
2097 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
2098 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
2099 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
2100 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
2101 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
2102 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2104 // Increment/Decrement reg.
2105 // Do not make INC/DEC if it is slow
2106 let Predicates = [UseIncDec] in {
2107 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
2108 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
2109 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
2110 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
2111 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
2112 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
2113 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
2114 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
2116 def : Pat<(X86add_flag_nocf GR8:$src, -1), (DEC8r GR8:$src)>;
2117 def : Pat<(X86add_flag_nocf GR16:$src, -1), (DEC16r GR16:$src)>;
2118 def : Pat<(X86add_flag_nocf GR32:$src, -1), (DEC32r GR32:$src)>;
2119 def : Pat<(X86add_flag_nocf GR64:$src, -1), (DEC64r GR64:$src)>;
2120 def : Pat<(X86sub_flag_nocf GR8:$src, -1), (INC8r GR8:$src)>;
2121 def : Pat<(X86sub_flag_nocf GR16:$src, -1), (INC16r GR16:$src)>;
2122 def : Pat<(X86sub_flag_nocf GR32:$src, -1), (INC32r GR32:$src)>;
2123 def : Pat<(X86sub_flag_nocf GR64:$src, -1), (INC64r GR64:$src)>;
2127 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
2128 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
2129 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
2130 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
2133 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
2134 (OR8rm GR8:$src1, addr:$src2)>;
2135 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
2136 (OR16rm GR16:$src1, addr:$src2)>;
2137 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
2138 (OR32rm GR32:$src1, addr:$src2)>;
2139 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
2140 (OR64rm GR64:$src1, addr:$src2)>;
2143 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
2144 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
2145 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
2146 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
2147 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
2148 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
2149 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
2150 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
2151 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2152 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
2153 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2156 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
2157 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
2158 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
2159 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
2162 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
2163 (XOR8rm GR8:$src1, addr:$src2)>;
2164 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
2165 (XOR16rm GR16:$src1, addr:$src2)>;
2166 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
2167 (XOR32rm GR32:$src1, addr:$src2)>;
2168 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
2169 (XOR64rm GR64:$src1, addr:$src2)>;
2172 def : Pat<(xor GR8:$src1, imm:$src2),
2173 (XOR8ri GR8:$src1, imm:$src2)>;
2174 def : Pat<(xor GR16:$src1, imm:$src2),
2175 (XOR16ri GR16:$src1, imm:$src2)>;
2176 def : Pat<(xor GR32:$src1, imm:$src2),
2177 (XOR32ri GR32:$src1, imm:$src2)>;
2178 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
2179 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
2180 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
2181 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
2182 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
2183 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2184 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
2185 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2188 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
2189 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
2190 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
2191 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
2194 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
2195 (AND8rm GR8:$src1, addr:$src2)>;
2196 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
2197 (AND16rm GR16:$src1, addr:$src2)>;
2198 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
2199 (AND32rm GR32:$src1, addr:$src2)>;
2200 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
2201 (AND64rm GR64:$src1, addr:$src2)>;
2204 def : Pat<(and GR8:$src1, imm:$src2),
2205 (AND8ri GR8:$src1, imm:$src2)>;
2206 def : Pat<(and GR16:$src1, imm:$src2),
2207 (AND16ri GR16:$src1, imm:$src2)>;
2208 def : Pat<(and GR32:$src1, imm:$src2),
2209 (AND32ri GR32:$src1, imm:$src2)>;
2210 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
2211 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
2212 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
2213 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
2214 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
2215 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
2216 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
2217 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
2219 // Bit scan instruction patterns to match explicit zero-undef behavior.
2220 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
2221 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
2222 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
2223 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
2224 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
2225 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
2227 // When HasMOVBE is enabled it is possible to get a non-legalized
2228 // register-register 16 bit bswap. This maps it to a ROL instruction.
2229 let Predicates = [HasMOVBE] in {
2230 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;