1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the various pseudo instructions used by the compiler,
10 // as well as Pat patterns used during instruction selection.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Pattern Matching Support
17 def GetLo32XForm : SDNodeXForm<imm, [{
18 // Transformation function: get the low 32 bits.
19 return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N));
23 //===----------------------------------------------------------------------===//
24 // Random Pseudo Instructions.
26 // PIC base construction. This expands to code that looks like this:
29 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP],
30 SchedRW = [WriteJump] in
31 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
34 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
35 // a stack adjustment and the codegen must know that they may modify the stack
36 // pointer before prolog-epilog rewriting occurs.
37 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
38 // sub / add which can clobber EFLAGS.
39 let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in {
40 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs),
41 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
42 "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>;
43 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
45 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
48 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
49 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>;
52 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
53 // a stack adjustment and the codegen must know that they may modify the stack
54 // pointer before prolog-epilog rewriting occurs.
55 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
56 // sub / add which can clobber EFLAGS.
57 let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in {
58 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs),
59 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
60 "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>;
61 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
63 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
66 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
67 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>;
69 let SchedRW = [WriteSystem] in {
71 // x86-64 va_start lowering magic.
72 let usesCustomInserter = 1, Defs = [EFLAGS] in {
73 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
76 i32imm:$regsavefi, i32imm:$offset,
78 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
79 [(X86vastart_save_xmm_regs GR8:$al,
84 // The VAARG_64 and VAARG_X32 pseudo-instructions take the address of the
85 // va_list, and place the address of the next argument into a register.
86 let Defs = [EFLAGS] in {
87 def VAARG_64 : I<0, Pseudo,
89 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
90 "#VAARG_64 $dst, $ap, $size, $mode, $align",
92 (X86vaarg64 addr:$ap, timm:$size, timm:$mode, timm:$align)),
93 (implicit EFLAGS)]>, Requires<[In64BitMode, IsLP64]>;
94 def VAARG_X32 : I<0, Pseudo,
96 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
97 "#VAARG_X32 $dst, $ap, $size, $mode, $align",
99 (X86vaargx32 addr:$ap, timm:$size, timm:$mode, timm:$align)),
100 (implicit EFLAGS)]>, Requires<[In64BitMode, NotLP64]>;
103 // When using segmented stacks these are lowered into instructions which first
104 // check if the current stacklet has enough free memory. If it does, memory is
105 // allocated by bumping the stack pointer. Otherwise memory is allocated from
108 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
109 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
110 "# variable sized alloca for segmented stacks",
112 (X86SegAlloca GR32:$size))]>,
115 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
116 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
117 "# variable sized alloca for segmented stacks",
119 (X86SegAlloca GR64:$size))]>,
120 Requires<[In64BitMode]>;
122 // To protect against stack clash, dynamic allocation should perform a memory
123 // probe at each page.
125 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
126 def PROBED_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
127 "# variable sized alloca with probing",
129 (X86ProbedAlloca GR32:$size))]>,
132 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
133 def PROBED_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
134 "# variable sized alloca with probing",
136 (X86ProbedAlloca GR64:$size))]>,
137 Requires<[In64BitMode]>;
140 let hasNoSchedulingInfo = 1 in
141 def STACKALLOC_W_PROBING : I<0, Pseudo, (outs), (ins i64imm:$stacksize),
142 "# fixed size alloca with probing",
145 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
146 // targets. These calls are needed to probe the stack when allocating more than
147 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
148 // ensure that the guard pages used by the OS virtual memory manager are
149 // allocated in correct sequence.
150 // The main point of having separate instruction are extra unmodelled effects
151 // (compared to ordinary calls) like stack pointer change.
153 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
154 def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
155 "# dynamic stack allocation",
156 [(X86WinAlloca GR32:$size)]>,
159 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
160 def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
161 "# dynamic stack allocation",
162 [(X86WinAlloca GR64:$size)]>,
163 Requires<[In64BitMode]>;
166 // These instructions XOR the frame pointer into a GPR. They are used in some
167 // stack protection schemes. These are post-RA pseudos because we only know the
168 // frame register after register allocation.
169 let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in {
170 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
171 "xorl\t$$FP, $src", []>,
172 Requires<[NotLP64]>, Sched<[WriteALU]>;
173 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src),
174 "xorq\t$$FP $src", []>,
175 Requires<[In64BitMode]>, Sched<[WriteALU]>;
178 //===----------------------------------------------------------------------===//
179 // EH Pseudo Instructions
181 let SchedRW = [WriteSystem] in {
182 let isTerminator = 1, isReturn = 1, isBarrier = 1,
183 hasCtrlDep = 1, isCodeGenOnly = 1 in {
184 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
185 "ret\t#eh_return, addr: $addr",
186 [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>;
190 let isTerminator = 1, isReturn = 1, isBarrier = 1,
191 hasCtrlDep = 1, isCodeGenOnly = 1 in {
192 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
193 "ret\t#eh_return, addr: $addr",
194 [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>;
198 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
199 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in {
200 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>;
202 // CATCHRET needs a custom inserter for SEH.
203 let usesCustomInserter = 1 in
204 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from),
206 [(catchret bb:$dst, bb:$from)]>;
209 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
210 usesCustomInserter = 1 in {
211 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
213 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
214 Requires<[Not64BitMode]>;
215 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
217 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
218 Requires<[In64BitMode]>;
219 let isTerminator = 1 in {
220 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
221 "#EH_SJLJ_LONGJMP32",
222 [(X86eh_sjlj_longjmp addr:$buf)]>,
223 Requires<[Not64BitMode]>;
224 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
225 "#EH_SJLJ_LONGJMP64",
226 [(X86eh_sjlj_longjmp addr:$buf)]>,
227 Requires<[In64BitMode]>;
231 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
232 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
233 "#EH_SjLj_Setup\t$dst", []>;
237 //===----------------------------------------------------------------------===//
238 // Pseudo instructions used by unwind info.
240 let isPseudo = 1, SchedRW = [WriteSystem] in {
241 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
242 "#SEH_PushReg $reg", []>;
243 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
244 "#SEH_SaveReg $reg, $dst", []>;
245 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
246 "#SEH_SaveXMM $reg, $dst", []>;
247 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
248 "#SEH_StackAlloc $size", []>;
249 def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align),
250 "#SEH_StackAlign $align", []>;
251 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
252 "#SEH_SetFrame $reg, $offset", []>;
253 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
254 "#SEH_PushFrame $mode", []>;
255 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
256 "#SEH_EndPrologue", []>;
257 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
258 "#SEH_Epilogue", []>;
261 //===----------------------------------------------------------------------===//
262 // Pseudo instructions used by segmented stacks.
265 // This is lowered into a RET instruction by MCInstLower. We need
266 // this so that we don't have to have a MachineBasicBlock which ends
267 // with a RET and also has successors.
268 let isPseudo = 1, SchedRW = [WriteJumpLd] in {
269 def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>;
271 // This instruction is lowered to a RET followed by a MOV. The two
272 // instructions are not generated on a higher level since then the
273 // verifier sees a MachineBasicBlock ending with a non-terminator.
274 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>;
277 //===----------------------------------------------------------------------===//
278 // Alias Instructions
279 //===----------------------------------------------------------------------===//
281 // Alias instruction mapping movr0 to xor.
282 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
283 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
284 isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in
285 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
286 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>;
288 // Other widths can also make use of the 32-bit xor, which may have a smaller
289 // encoding and avoid partial register updates.
290 let AddedComplexity = 10 in {
291 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
292 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
293 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>;
296 let Predicates = [OptForSize, Not64BitMode],
297 AddedComplexity = 10 in {
298 let SchedRW = [WriteALU] in {
299 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
300 // which only require 3 bytes compared to MOV32ri which requires 5.
301 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
302 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
303 [(set GR32:$dst, 1)]>;
304 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
305 [(set GR32:$dst, -1)]>;
309 // MOV16ri is 4 bytes, so the instructions above are smaller.
310 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>;
311 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
314 let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5,
315 SchedRW = [WriteALU] in {
316 // AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
317 def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
318 [(set GR32:$dst, i32immSExt8:$src)]>,
319 Requires<[OptForMinSize, NotWin64WithoutFP]>;
320 def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
321 [(set GR64:$dst, i64immSExt8:$src)]>,
322 Requires<[OptForMinSize, NotWin64WithoutFP]>;
325 // Materialize i64 constant where top 32-bits are zero. This could theoretically
326 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
327 // that would make it more difficult to rematerialize.
328 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
329 isPseudo = 1, SchedRW = [WriteMove] in
330 def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "",
331 [(set GR64:$dst, i64immZExt32:$src)]>;
333 // This 64-bit pseudo-move can also be used for labels in the x86-64 small code
335 def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [X86Wrapper]>;
336 def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>;
338 // Use sbb to materialize carry bit.
339 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteADC],
340 hasSideEffects = 0 in {
341 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
342 // However, Pat<> can't replicate the destination reg into the inputs of the
344 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", []>;
345 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", []>;
348 //===----------------------------------------------------------------------===//
349 // String Pseudo Instructions
351 let SchedRW = [WriteMicrocoded] in {
352 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
353 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins),
354 "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}",
355 [(X86rep_movs i8)]>, REP, AdSize32,
357 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins),
358 "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}",
359 [(X86rep_movs i16)]>, REP, AdSize32, OpSize16,
361 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins),
362 "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}",
363 [(X86rep_movs i32)]>, REP, AdSize32, OpSize32,
365 def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins),
366 "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}",
367 [(X86rep_movs i64)]>, REP, AdSize32,
368 Requires<[NotLP64, In64BitMode]>;
371 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
372 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins),
373 "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}",
374 [(X86rep_movs i8)]>, REP, AdSize64,
376 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins),
377 "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}",
378 [(X86rep_movs i16)]>, REP, AdSize64, OpSize16,
380 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins),
381 "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}",
382 [(X86rep_movs i32)]>, REP, AdSize64, OpSize32,
384 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins),
385 "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}",
386 [(X86rep_movs i64)]>, REP, AdSize64,
390 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
391 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
392 let Uses = [AL,ECX,EDI] in
393 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins),
394 "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}",
395 [(X86rep_stos i8)]>, REP, AdSize32,
397 let Uses = [AX,ECX,EDI] in
398 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins),
399 "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}",
400 [(X86rep_stos i16)]>, REP, AdSize32, OpSize16,
402 let Uses = [EAX,ECX,EDI] in
403 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins),
404 "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}",
405 [(X86rep_stos i32)]>, REP, AdSize32, OpSize32,
407 let Uses = [RAX,RCX,RDI] in
408 def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins),
409 "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}",
410 [(X86rep_stos i64)]>, REP, AdSize32,
411 Requires<[NotLP64, In64BitMode]>;
414 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
415 let Uses = [AL,RCX,RDI] in
416 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins),
417 "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}",
418 [(X86rep_stos i8)]>, REP, AdSize64,
420 let Uses = [AX,RCX,RDI] in
421 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins),
422 "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}",
423 [(X86rep_stos i16)]>, REP, AdSize64, OpSize16,
425 let Uses = [RAX,RCX,RDI] in
426 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins),
427 "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}",
428 [(X86rep_stos i32)]>, REP, AdSize64, OpSize32,
431 let Uses = [RAX,RCX,RDI] in
432 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins),
433 "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}",
434 [(X86rep_stos i64)]>, REP, AdSize64,
439 //===----------------------------------------------------------------------===//
440 // Thread Local Storage Instructions
442 let SchedRW = [WriteSystem] in {
445 // All calls clobber the non-callee saved registers. ESP is marked as
446 // a use to prevent stack-pointer assignments that appear immediately
447 // before calls from potentially appearing dead.
448 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
449 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
450 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
451 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
452 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
453 usesCustomInserter = 1, Uses = [ESP, SSP] in {
454 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
456 [(X86tlsaddr tls32addr:$sym)]>,
457 Requires<[Not64BitMode]>;
458 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
460 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
461 Requires<[Not64BitMode]>;
464 // All calls clobber the non-callee saved registers. RSP is marked as
465 // a use to prevent stack-pointer assignments that appear immediately
466 // before calls from potentially appearing dead.
467 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
468 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
469 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
470 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
471 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
472 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
473 usesCustomInserter = 1, Uses = [RSP, SSP] in {
474 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
476 [(X86tlsaddr tls64addr:$sym)]>,
477 Requires<[In64BitMode, IsLP64]>;
478 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
480 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
481 Requires<[In64BitMode, IsLP64]>;
482 def TLS_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
484 [(X86tlsaddr tls32addr:$sym)]>,
485 Requires<[In64BitMode, NotLP64]>;
486 def TLS_base_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
487 "# TLS_base_addrX32",
488 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
489 Requires<[In64BitMode, NotLP64]>;
492 // Darwin TLS Support
493 // For i386, the address of the thunk is passed on the stack, on return the
494 // address of the variable is in %eax. %ecx is trashed during the function
495 // call. All other registers are preserved.
496 let Defs = [EAX, ECX, EFLAGS, DF],
498 usesCustomInserter = 1 in
499 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
501 [(X86TLSCall addr:$sym)]>,
502 Requires<[Not64BitMode]>;
504 // For x86_64, the address of the thunk is passed in %rdi, but the
505 // pseudo directly use the symbol, so do not add an implicit use of
506 // %rdi. The lowering will do the right thing with RDI.
507 // On return the address of the variable is in %rax. All other
508 // registers are preserved.
509 let Defs = [RAX, EFLAGS, DF],
511 usesCustomInserter = 1 in
512 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
514 [(X86TLSCall addr:$sym)]>,
515 Requires<[In64BitMode]>;
518 //===----------------------------------------------------------------------===//
519 // Conditional Move Pseudo Instructions
521 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
522 // instruction selection into a branch sequence.
523 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
524 def CMOV#NAME : I<0, Pseudo,
525 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
526 "#CMOV_"#NAME#" PSEUDO!",
527 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond,
531 let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
532 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
533 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
534 // however that requires promoting the operands, and can induce additional
535 // i8 register pressure.
536 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
538 let Predicates = [NoCMov] in {
539 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
540 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
541 } // Predicates = [NoCMov]
543 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
545 let Predicates = [FPStackf32] in
546 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
548 let Predicates = [FPStackf64] in
549 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
551 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
553 let Predicates = [HasMMX] in
554 defm _VR64 : CMOVrr_PSEUDO<VR64, x86mmx>;
556 let Predicates = [HasSSE1,NoAVX512] in
557 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
558 let Predicates = [HasSSE2,NoAVX512] in
559 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
560 let Predicates = [HasAVX512] in {
561 defm _FR32X : CMOVrr_PSEUDO<FR32X, f32>;
562 defm _FR64X : CMOVrr_PSEUDO<FR64X, f64>;
564 let Predicates = [NoVLX] in {
565 defm _VR128 : CMOVrr_PSEUDO<VR128, v2i64>;
566 defm _VR256 : CMOVrr_PSEUDO<VR256, v4i64>;
568 let Predicates = [HasVLX] in {
569 defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>;
570 defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>;
572 defm _VR512 : CMOVrr_PSEUDO<VR512, v8i64>;
573 defm _VK1 : CMOVrr_PSEUDO<VK1, v1i1>;
574 defm _VK2 : CMOVrr_PSEUDO<VK2, v2i1>;
575 defm _VK4 : CMOVrr_PSEUDO<VK4, v4i1>;
576 defm _VK8 : CMOVrr_PSEUDO<VK8, v8i1>;
577 defm _VK16 : CMOVrr_PSEUDO<VK16, v16i1>;
578 defm _VK32 : CMOVrr_PSEUDO<VK32, v32i1>;
579 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>;
580 } // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS]
582 def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
583 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
585 let Predicates = [NoVLX] in {
586 def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
587 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
588 def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
589 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
590 def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
591 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
592 def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
593 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
594 def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
595 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
597 def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
598 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
599 def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
600 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
601 def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
602 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
603 def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
604 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
605 def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
606 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
608 let Predicates = [HasVLX] in {
609 def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
610 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
611 def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
612 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
613 def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
614 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
615 def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
616 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
617 def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
618 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
620 def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
621 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
622 def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
623 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
624 def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
625 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
626 def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
627 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
628 def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
629 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
632 def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
633 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
634 def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
635 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
636 def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
637 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
638 def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
639 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
640 def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
641 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
643 //===----------------------------------------------------------------------===//
644 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
645 //===----------------------------------------------------------------------===//
647 // FIXME: Use normal instructions and add lock prefix dynamically.
651 let isCodeGenOnly = 1, Defs = [EFLAGS] in
652 def OR32mi8Locked : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero),
653 "or{l}\t{$zero, $dst|$dst, $zero}", []>,
654 Requires<[Not64BitMode]>, OpSize32, LOCK,
655 Sched<[WriteALURMW]>;
657 let hasSideEffects = 1 in
658 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
660 [(X86MemBarrier)]>, Sched<[WriteLoad]>;
662 // RegOpc corresponds to the mr version of the instruction
663 // ImmOpc corresponds to the mi version of the instruction
664 // ImmOpc8 corresponds to the mi8 version of the instruction
665 // ImmMod corresponds to the instruction format of the mi and mi8 versions
666 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
667 Format ImmMod, SDNode Op, string mnemonic> {
668 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
669 SchedRW = [WriteALURMW] in {
671 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
672 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
673 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
674 !strconcat(mnemonic, "{b}\t",
675 "{$src2, $dst|$dst, $src2}"),
676 [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK;
678 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
679 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
680 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
681 !strconcat(mnemonic, "{w}\t",
682 "{$src2, $dst|$dst, $src2}"),
683 [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>,
686 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
687 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
688 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
689 !strconcat(mnemonic, "{l}\t",
690 "{$src2, $dst|$dst, $src2}"),
691 [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>,
694 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
695 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
696 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
697 !strconcat(mnemonic, "{q}\t",
698 "{$src2, $dst|$dst, $src2}"),
699 [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK;
701 // NOTE: These are order specific, we want the mi8 forms to be listed
702 // first so that they are slightly preferred to the mi forms.
703 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
704 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
705 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
706 !strconcat(mnemonic, "{w}\t",
707 "{$src2, $dst|$dst, $src2}"),
708 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>,
711 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
712 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
713 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
714 !strconcat(mnemonic, "{l}\t",
715 "{$src2, $dst|$dst, $src2}"),
716 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>,
719 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
720 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
721 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
722 !strconcat(mnemonic, "{q}\t",
723 "{$src2, $dst|$dst, $src2}"),
724 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>,
727 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
728 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
729 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
730 !strconcat(mnemonic, "{b}\t",
731 "{$src2, $dst|$dst, $src2}"),
732 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK;
734 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
735 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
736 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
737 !strconcat(mnemonic, "{w}\t",
738 "{$src2, $dst|$dst, $src2}"),
739 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>,
742 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
743 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
744 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
745 !strconcat(mnemonic, "{l}\t",
746 "{$src2, $dst|$dst, $src2}"),
747 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>,
750 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
751 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
752 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
753 !strconcat(mnemonic, "{q}\t",
754 "{$src2, $dst|$dst, $src2}"),
755 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>,
761 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
762 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
763 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
764 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
765 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
767 def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs),
768 (X86lock_add node:$lhs, node:$rhs), [{
769 return hasNoCarryFlagUses(SDValue(N, 0));
772 def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs),
773 (X86lock_sub node:$lhs, node:$rhs), [{
774 return hasNoCarryFlagUses(SDValue(N, 0));
777 let Predicates = [UseIncDec] in {
778 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
779 SchedRW = [WriteALURMW] in {
780 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
782 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>,
784 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
786 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>,
788 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
790 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>,
792 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
794 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>,
797 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
799 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>,
801 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
803 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>,
805 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
807 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>,
809 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
811 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>,
815 // Additional patterns for -1 constant.
816 def : Pat<(X86lock_add addr:$dst, (i8 -1)), (LOCK_DEC8m addr:$dst)>;
817 def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>;
818 def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>;
819 def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>;
820 def : Pat<(X86lock_sub addr:$dst, (i8 -1)), (LOCK_INC8m addr:$dst)>;
821 def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>;
822 def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>;
823 def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>;
826 // Atomic compare and swap.
827 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
828 string mnemonic, SDPatternOperator frag> {
829 let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in {
830 let Defs = [AL, EFLAGS], Uses = [AL] in
831 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
832 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
833 [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
834 let Defs = [AX, EFLAGS], Uses = [AX] in
835 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
836 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
837 [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK;
838 let Defs = [EAX, EFLAGS], Uses = [EAX] in
839 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
840 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
841 [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK;
842 let Defs = [RAX, EFLAGS], Uses = [RAX] in
843 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
844 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
845 [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
849 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
850 Predicates = [HasCmpxchg8b], SchedRW = [WriteCMPXCHGRMW],
851 isCodeGenOnly = 1, usesCustomInserter = 1 in {
852 def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
854 [(X86cas8 addr:$ptr)]>, TB, LOCK;
857 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
858 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
859 isCodeGenOnly = 1, mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
860 def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
865 // This pseudo must be used when the frame uses RBX as
866 // the base pointer. Indeed, in such situation RBX is a reserved
867 // register and the register allocator will ignore any use/def of
868 // it. In other words, the register will not fix the clobbering of
869 // RBX that will happen when setting the arguments for the instrucion.
871 // Unlike the actual related instruction, we mark that this one
872 // defines RBX (instead of using RBX).
873 // The rationale is that we will define RBX during the expansion of
874 // the pseudo. The argument feeding RBX is rbx_input.
876 // The additional argument, $rbx_save, is a temporary register used to
877 // save the value of RBX across the actual instruction.
879 // To make sure the register assigned to $rbx_save does not interfere with
880 // the definition of the actual instruction, we use a definition $dst which
881 // is tied to $rbx_save. That way, the live-range of $rbx_save spans across
882 // the instruction and we are sure we will have a valid register to restore
884 let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX],
885 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
886 isCodeGenOnly = 1, isPseudo = 1,
887 mayLoad = 1, mayStore = 1, hasSideEffects = 0,
888 Constraints = "$rbx_save = $dst" in {
889 def LCMPXCHG16B_SAVE_RBX :
890 I<0, Pseudo, (outs GR64:$dst),
891 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save), "", []>;
894 // Pseudo instruction that doesn't read/write RBX. Will be turned into either
895 // LCMPXCHG16B_SAVE_RBX or LCMPXCHG16B via a custom inserter.
896 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RCX, RDX],
897 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
898 isCodeGenOnly = 1, isPseudo = 1,
899 mayLoad = 1, mayStore = 1, hasSideEffects = 0,
900 usesCustomInserter = 1 in {
901 def LCMPXCHG16B_NO_RBX :
902 I<0, Pseudo, (outs), (ins i128mem:$ptr, GR64:$rbx_input), "",
903 [(X86cas16 addr:$ptr, GR64:$rbx_input)]>;
906 // This pseudo must be used when the frame uses RBX/EBX as
908 // cf comment for LCMPXCHG16B_SAVE_RBX.
909 let Defs = [EBX], Uses = [ECX, EAX],
910 Predicates = [HasMWAITX], SchedRW = [WriteSystem],
911 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst" in {
912 def MWAITX_SAVE_RBX :
913 I<0, Pseudo, (outs GR64:$dst),
914 (ins GR32:$ebx_input, GR64:$rbx_save),
919 // Pseudo mwaitx instruction to use for custom insertion.
920 let Predicates = [HasMWAITX], SchedRW = [WriteSystem],
921 isCodeGenOnly = 1, isPseudo = 1,
922 usesCustomInserter = 1 in {
924 I<0, Pseudo, (outs), (ins GR32:$ecx, GR32:$eax, GR32:$ebx),
926 [(int_x86_mwaitx GR32:$ecx, GR32:$eax, GR32:$ebx)]>;
930 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>;
932 // Atomic exchange and add
933 multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
935 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
936 SchedRW = [WriteALURMW] in {
937 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
938 (ins GR8:$val, i8mem:$ptr),
939 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
941 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
942 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
943 (ins GR16:$val, i16mem:$ptr),
944 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
947 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
949 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
950 (ins GR32:$val, i32mem:$ptr),
951 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
954 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
956 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
957 (ins GR64:$val, i64mem:$ptr),
958 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
961 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
965 defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK;
967 /* The following multiclass tries to make sure that in code like
968 * x.store (immediate op x.load(acquire), release)
970 * x.store (register op x.load(acquire), release)
971 * an operation directly on memory is generated instead of wasting a register.
972 * It is not automatic as atomic_store/load are only lowered to MOV instructions
973 * extremely late to prevent them from being accidentally reordered in the backend
974 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
976 multiclass RELEASE_BINOP_MI<string Name, SDNode op> {
977 def : Pat<(atomic_store_8 addr:$dst,
978 (op (atomic_load_8 addr:$dst), (i8 imm:$src))),
979 (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>;
980 def : Pat<(atomic_store_16 addr:$dst,
981 (op (atomic_load_16 addr:$dst), (i16 imm:$src))),
982 (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>;
983 def : Pat<(atomic_store_32 addr:$dst,
984 (op (atomic_load_32 addr:$dst), (i32 imm:$src))),
985 (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>;
986 def : Pat<(atomic_store_64 addr:$dst,
987 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src))),
988 (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>;
990 def : Pat<(atomic_store_8 addr:$dst,
991 (op (atomic_load_8 addr:$dst), (i8 GR8:$src))),
992 (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>;
993 def : Pat<(atomic_store_16 addr:$dst,
994 (op (atomic_load_16 addr:$dst), (i16 GR16:$src))),
995 (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>;
996 def : Pat<(atomic_store_32 addr:$dst,
997 (op (atomic_load_32 addr:$dst), (i32 GR32:$src))),
998 (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>;
999 def : Pat<(atomic_store_64 addr:$dst,
1000 (op (atomic_load_64 addr:$dst), (i64 GR64:$src))),
1001 (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>;
1003 defm : RELEASE_BINOP_MI<"ADD", add>;
1004 defm : RELEASE_BINOP_MI<"AND", and>;
1005 defm : RELEASE_BINOP_MI<"OR", or>;
1006 defm : RELEASE_BINOP_MI<"XOR", xor>;
1007 defm : RELEASE_BINOP_MI<"SUB", sub>;
1009 // Atomic load + floating point patterns.
1010 // FIXME: This could also handle SIMD operations with *ps and *pd instructions.
1011 multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> {
1012 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1013 (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>,
1014 Requires<[UseSSE1]>;
1015 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1016 (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>,
1018 def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1019 (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>,
1020 Requires<[HasAVX512]>;
1022 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1023 (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>,
1024 Requires<[UseSSE1]>;
1025 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1026 (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>,
1028 def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1029 (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>,
1030 Requires<[HasAVX512]>;
1032 defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>;
1033 // FIXME: Add fsub, fmul, fdiv, ...
1035 multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32,
1037 def : Pat<(atomic_store_8 addr:$dst, dag8),
1038 (!cast<Instruction>(Name#8m) addr:$dst)>;
1039 def : Pat<(atomic_store_16 addr:$dst, dag16),
1040 (!cast<Instruction>(Name#16m) addr:$dst)>;
1041 def : Pat<(atomic_store_32 addr:$dst, dag32),
1042 (!cast<Instruction>(Name#32m) addr:$dst)>;
1043 def : Pat<(atomic_store_64 addr:$dst, dag64),
1044 (!cast<Instruction>(Name#64m) addr:$dst)>;
1047 let Predicates = [UseIncDec] in {
1048 defm : RELEASE_UNOP<"INC",
1049 (add (atomic_load_8 addr:$dst), (i8 1)),
1050 (add (atomic_load_16 addr:$dst), (i16 1)),
1051 (add (atomic_load_32 addr:$dst), (i32 1)),
1052 (add (atomic_load_64 addr:$dst), (i64 1))>;
1053 defm : RELEASE_UNOP<"DEC",
1054 (add (atomic_load_8 addr:$dst), (i8 -1)),
1055 (add (atomic_load_16 addr:$dst), (i16 -1)),
1056 (add (atomic_load_32 addr:$dst), (i32 -1)),
1057 (add (atomic_load_64 addr:$dst), (i64 -1))>;
1060 defm : RELEASE_UNOP<"NEG",
1061 (ineg (i8 (atomic_load_8 addr:$dst))),
1062 (ineg (i16 (atomic_load_16 addr:$dst))),
1063 (ineg (i32 (atomic_load_32 addr:$dst))),
1064 (ineg (i64 (atomic_load_64 addr:$dst)))>;
1065 defm : RELEASE_UNOP<"NOT",
1066 (not (i8 (atomic_load_8 addr:$dst))),
1067 (not (i16 (atomic_load_16 addr:$dst))),
1068 (not (i32 (atomic_load_32 addr:$dst))),
1069 (not (i64 (atomic_load_64 addr:$dst)))>;
1071 def : Pat<(atomic_store_8 addr:$dst, (i8 imm:$src)),
1072 (MOV8mi addr:$dst, imm:$src)>;
1073 def : Pat<(atomic_store_16 addr:$dst, (i16 imm:$src)),
1074 (MOV16mi addr:$dst, imm:$src)>;
1075 def : Pat<(atomic_store_32 addr:$dst, (i32 imm:$src)),
1076 (MOV32mi addr:$dst, imm:$src)>;
1077 def : Pat<(atomic_store_64 addr:$dst, (i64immSExt32:$src)),
1078 (MOV64mi32 addr:$dst, i64immSExt32:$src)>;
1080 def : Pat<(atomic_store_8 addr:$dst, GR8:$src),
1081 (MOV8mr addr:$dst, GR8:$src)>;
1082 def : Pat<(atomic_store_16 addr:$dst, GR16:$src),
1083 (MOV16mr addr:$dst, GR16:$src)>;
1084 def : Pat<(atomic_store_32 addr:$dst, GR32:$src),
1085 (MOV32mr addr:$dst, GR32:$src)>;
1086 def : Pat<(atomic_store_64 addr:$dst, GR64:$src),
1087 (MOV64mr addr:$dst, GR64:$src)>;
1089 def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>;
1090 def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>;
1091 def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>;
1092 def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>;
1094 // Floating point loads/stores.
1095 def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))),
1096 (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>;
1097 def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))),
1098 (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>;
1099 def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))),
1100 (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>;
1102 def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))),
1103 (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>;
1104 def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))),
1105 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>;
1106 def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))),
1107 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>;
1109 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1110 (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>;
1111 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1112 (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>;
1113 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1114 (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1116 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1117 (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>;
1118 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1119 (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>;
1120 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1121 (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1123 //===----------------------------------------------------------------------===//
1124 // DAG Pattern Matching Rules
1125 //===----------------------------------------------------------------------===//
1127 // Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
1128 // binary size compared to a regular MOV, but it introduces an unnecessary
1129 // load, so is not suitable for regular or optsize functions.
1130 let Predicates = [OptForMinSize] in {
1131 def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
1132 def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
1133 def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
1134 def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
1135 def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
1136 def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
1139 // In kernel code model, we can get the address of a label
1140 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1141 // the MOV64ri32 should accept these.
1142 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1143 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1144 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1145 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1146 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1147 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1148 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1149 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1150 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
1151 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
1152 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1153 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1155 // If we have small model and -static mode, it is safe to store global addresses
1156 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1157 // for MOV64mi32 should handle this sort of thing.
1158 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1159 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1160 Requires<[NearData, IsNotPIC]>;
1161 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1162 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1163 Requires<[NearData, IsNotPIC]>;
1164 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1165 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1166 Requires<[NearData, IsNotPIC]>;
1167 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1168 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1169 Requires<[NearData, IsNotPIC]>;
1170 def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
1171 (MOV64mi32 addr:$dst, mcsym:$src)>,
1172 Requires<[NearData, IsNotPIC]>;
1173 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1174 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1175 Requires<[NearData, IsNotPIC]>;
1177 def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
1178 def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
1182 // tls has some funny stuff here...
1183 // This corresponds to movabs $foo@tpoff, %rax
1184 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1185 (MOV64ri32 tglobaltlsaddr :$dst)>;
1186 // This corresponds to add $foo@tpoff, %rax
1187 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1188 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1191 // Direct PC relative function call for small code model. 32-bit displacement
1192 // sign extended to 64-bit.
1193 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1194 (CALL64pcrel32 tglobaladdr:$dst)>;
1195 def : Pat<(X86call (i64 texternalsym:$dst)),
1196 (CALL64pcrel32 texternalsym:$dst)>;
1198 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1199 // can never use callee-saved registers. That is the purpose of the GR64_TC
1200 // register classes.
1202 // The only volatile register that is never used by the calling convention is
1203 // %r11. This happens when calling a vararg function with 6 arguments.
1205 // Match an X86tcret that uses less than 7 volatile registers.
1206 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1207 (X86tcret node:$ptr, node:$off), [{
1208 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1209 unsigned NumRegs = 0;
1210 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1211 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1216 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1217 (TCRETURNri ptr_rc_tailcall:$dst, timm:$off)>,
1218 Requires<[Not64BitMode, NotUseIndirectThunkCalls]>;
1220 // FIXME: This is disabled for 32-bit PIC mode because the global base
1221 // register which is part of the address mode may be assigned a
1222 // callee-saved register.
1223 def : Pat<(X86tcret (load addr:$dst), timm:$off),
1224 (TCRETURNmi addr:$dst, timm:$off)>,
1225 Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>;
1227 def : Pat<(X86tcret (i32 tglobaladdr:$dst), timm:$off),
1228 (TCRETURNdi tglobaladdr:$dst, timm:$off)>,
1229 Requires<[NotLP64]>;
1231 def : Pat<(X86tcret (i32 texternalsym:$dst), timm:$off),
1232 (TCRETURNdi texternalsym:$dst, timm:$off)>,
1233 Requires<[NotLP64]>;
1235 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1236 (TCRETURNri64 ptr_rc_tailcall:$dst, timm:$off)>,
1237 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1239 // Don't fold loads into X86tcret requiring more than 6 regs.
1240 // There wouldn't be enough scratch registers for base+index.
1241 def : Pat<(X86tcret_6regs (load addr:$dst), timm:$off),
1242 (TCRETURNmi64 addr:$dst, timm:$off)>,
1243 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1245 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1246 (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, timm:$off)>,
1247 Requires<[In64BitMode, UseIndirectThunkCalls]>;
1249 def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1250 (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, timm:$off)>,
1251 Requires<[Not64BitMode, UseIndirectThunkCalls]>;
1253 def : Pat<(X86tcret (i64 tglobaladdr:$dst), timm:$off),
1254 (TCRETURNdi64 tglobaladdr:$dst, timm:$off)>,
1257 def : Pat<(X86tcret (i64 texternalsym:$dst), timm:$off),
1258 (TCRETURNdi64 texternalsym:$dst, timm:$off)>,
1261 // Normal calls, with various flavors of addresses.
1262 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1263 (CALLpcrel32 tglobaladdr:$dst)>;
1264 def : Pat<(X86call (i32 texternalsym:$dst)),
1265 (CALLpcrel32 texternalsym:$dst)>;
1266 def : Pat<(X86call (i32 imm:$dst)),
1267 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1271 // TEST R,R is smaller than CMP R,0
1272 def : Pat<(X86cmp GR8:$src1, 0),
1273 (TEST8rr GR8:$src1, GR8:$src1)>;
1274 def : Pat<(X86cmp GR16:$src1, 0),
1275 (TEST16rr GR16:$src1, GR16:$src1)>;
1276 def : Pat<(X86cmp GR32:$src1, 0),
1277 (TEST32rr GR32:$src1, GR32:$src1)>;
1278 def : Pat<(X86cmp GR64:$src1, 0),
1279 (TEST64rr GR64:$src1, GR64:$src1)>;
1281 // zextload bool -> zextload byte
1282 // i1 stored in one byte in zero-extended form.
1283 // Upper bits cleanup should be executed before Store.
1284 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1285 def : Pat<(zextloadi16i1 addr:$src),
1286 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1287 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1288 def : Pat<(zextloadi64i1 addr:$src),
1289 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1291 // extload bool -> extload byte
1292 // When extloading from 16-bit and smaller memory locations into 64-bit
1293 // registers, use zero-extending loads so that the entire 64-bit register is
1294 // defined, avoiding partial-register updates.
1296 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1297 def : Pat<(extloadi16i1 addr:$src),
1298 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1299 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1300 def : Pat<(extloadi16i8 addr:$src),
1301 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1302 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1303 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1305 // For other extloads, use subregs, since the high contents of the register are
1306 // defined after an extload.
1307 // NOTE: The extloadi64i32 pattern needs to be first as it will try to form
1308 // 32-bit loads for 4 byte aligned i8/i16 loads.
1309 def : Pat<(extloadi64i32 addr:$src),
1310 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1311 def : Pat<(extloadi64i1 addr:$src),
1312 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1313 def : Pat<(extloadi64i8 addr:$src),
1314 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1315 def : Pat<(extloadi64i16 addr:$src),
1316 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1318 // anyext. Define these to do an explicit zero-extend to
1319 // avoid partial-register updates.
1320 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1321 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1322 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1324 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1325 def : Pat<(i32 (anyext GR16:$src)),
1326 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1328 def : Pat<(i64 (anyext GR8 :$src)),
1329 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1330 def : Pat<(i64 (anyext GR16:$src)),
1331 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1332 def : Pat<(i64 (anyext GR32:$src)),
1333 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;
1335 // If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX
1336 // instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move
1337 // %ah to the lower byte of a register. By using a MOVSX here we allow a
1338 // post-isel peephole to merge the two MOVSX instructions into one.
1339 def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{
1340 return (N->getOperand(0).getOpcode() == ISD::SDIVREM &&
1341 N->getOperand(0).getResNo() == 1);
1343 def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>;
1345 // Any instruction that defines a 32-bit result leaves the high half of the
1346 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1347 // be copying from a truncate. AssertSext/AssertZext/AssertAlign aren't saying
1348 // anything about the upper 32 bits, they're probably just qualifying a
1349 // CopyFromReg. FREEZE may be coming from a a truncate. Any other 32-bit
1350 // operation will zero-extend up to 64 bits.
1351 def def32 : PatLeaf<(i32 GR32:$src), [{
1352 return N->getOpcode() != ISD::TRUNCATE &&
1353 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1354 N->getOpcode() != ISD::CopyFromReg &&
1355 N->getOpcode() != ISD::AssertSext &&
1356 N->getOpcode() != ISD::AssertZext &&
1357 N->getOpcode() != ISD::AssertAlign &&
1358 N->getOpcode() != ISD::FREEZE;
1361 // In the case of a 32-bit def that is known to implicitly zero-extend,
1362 // we can use a SUBREG_TO_REG.
1363 def : Pat<(i64 (zext def32:$src)),
1364 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1365 def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)),
1366 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1368 //===----------------------------------------------------------------------===//
1369 // Pattern match OR as ADD
1370 //===----------------------------------------------------------------------===//
1372 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1373 // 3-addressified into an LEA instruction to avoid copies. However, we also
1374 // want to finally emit these instructions as an or at the end of the code
1375 // generator to make the generated code easier to read. To do this, we select
1376 // into "disjoint bits" pseudo ops.
1378 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1379 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1380 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1381 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1383 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
1384 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
1385 return (~Known0.Zero & ~Known1.Zero) == 0;
1389 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1390 // Try this before the selecting to OR.
1391 let SchedRW = [WriteALU] in {
1393 let isConvertibleToThreeAddress = 1, isPseudo = 1,
1394 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1395 let isCommutable = 1 in {
1396 def ADD8rr_DB : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
1397 "", // orb/addb REG, REG
1398 [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>;
1399 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1400 "", // orw/addw REG, REG
1401 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1402 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1403 "", // orl/addl REG, REG
1404 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1405 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1406 "", // orq/addq REG, REG
1407 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1410 // NOTE: These are order specific, we want the ri8 forms to be listed
1411 // first so that they are slightly preferred to the ri forms.
1413 def ADD8ri_DB : I<0, Pseudo,
1414 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1415 "", // orb/addb REG, imm8
1416 [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>;
1417 def ADD16ri8_DB : I<0, Pseudo,
1418 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1419 "", // orw/addw REG, imm8
1420 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1421 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1422 "", // orw/addw REG, imm
1423 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1425 def ADD32ri8_DB : I<0, Pseudo,
1426 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1427 "", // orl/addl REG, imm8
1428 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1429 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1430 "", // orl/addl REG, imm
1431 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1434 def ADD64ri8_DB : I<0, Pseudo,
1435 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1436 "", // orq/addq REG, imm8
1437 [(set GR64:$dst, (or_is_add GR64:$src1,
1438 i64immSExt8:$src2))]>;
1439 def ADD64ri32_DB : I<0, Pseudo,
1440 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1441 "", // orq/addq REG, imm
1442 [(set GR64:$dst, (or_is_add GR64:$src1,
1443 i64immSExt32:$src2))]>;
1445 } // AddedComplexity, SchedRW
1447 //===----------------------------------------------------------------------===//
1448 // Pattern match SUB as XOR
1449 //===----------------------------------------------------------------------===//
1451 // An immediate in the LHS of a subtract can't be encoded in the instruction.
1452 // If there is no possibility of a borrow we can use an XOR instead of a SUB
1453 // to enable the immediate to be folded.
1454 // TODO: Move this to a DAG combine?
1456 def sub_is_xor : PatFrag<(ops node:$lhs, node:$rhs), (sub node:$lhs, node:$rhs),[{
1457 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
1458 KnownBits Known = CurDAG->computeKnownBits(N->getOperand(1));
1460 // If all possible ones in the RHS are set in the LHS then there can't be
1461 // a borrow and we can use xor.
1462 return (~Known.Zero).isSubsetOf(CN->getAPIntValue());
1468 let AddedComplexity = 5 in {
1469 def : Pat<(sub_is_xor imm:$src2, GR8:$src1),
1470 (XOR8ri GR8:$src1, imm:$src2)>;
1471 def : Pat<(sub_is_xor i16immSExt8:$src2, GR16:$src1),
1472 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1473 def : Pat<(sub_is_xor imm:$src2, GR16:$src1),
1474 (XOR16ri GR16:$src1, imm:$src2)>;
1475 def : Pat<(sub_is_xor i32immSExt8:$src2, GR32:$src1),
1476 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1477 def : Pat<(sub_is_xor imm:$src2, GR32:$src1),
1478 (XOR32ri GR32:$src1, imm:$src2)>;
1479 def : Pat<(sub_is_xor i64immSExt8:$src2, GR64:$src1),
1480 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1481 def : Pat<(sub_is_xor i64immSExt32:$src2, GR64:$src1),
1482 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1485 //===----------------------------------------------------------------------===//
1487 //===----------------------------------------------------------------------===//
1489 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1490 // +128 doesn't, so in this special case use a sub instead of an add.
1491 def : Pat<(add GR16:$src1, 128),
1492 (SUB16ri8 GR16:$src1, -128)>;
1493 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1494 (SUB16mi8 addr:$dst, -128)>;
1496 def : Pat<(add GR32:$src1, 128),
1497 (SUB32ri8 GR32:$src1, -128)>;
1498 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1499 (SUB32mi8 addr:$dst, -128)>;
1501 def : Pat<(add GR64:$src1, 128),
1502 (SUB64ri8 GR64:$src1, -128)>;
1503 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1504 (SUB64mi8 addr:$dst, -128)>;
1506 def : Pat<(X86add_flag_nocf GR16:$src1, 128),
1507 (SUB16ri8 GR16:$src1, -128)>;
1508 def : Pat<(X86add_flag_nocf GR32:$src1, 128),
1509 (SUB32ri8 GR32:$src1, -128)>;
1510 def : Pat<(X86add_flag_nocf GR64:$src1, 128),
1511 (SUB64ri8 GR64:$src1, -128)>;
1513 // The same trick applies for 32-bit immediate fields in 64-bit
1515 def : Pat<(add GR64:$src1, 0x0000000080000000),
1516 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1517 def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
1518 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1520 def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
1521 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1523 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1524 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1525 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1526 // represented with a sign extension of a 8 bit constant, use that.
1527 // This can also reduce instruction size by eliminating the need for the REX
1530 // AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1531 let AddedComplexity = 1 in {
1532 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1536 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1537 (i32 (GetLo32XForm imm:$imm))),
1540 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1544 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1545 (i32 (GetLo32XForm imm:$imm))),
1547 } // AddedComplexity = 1
1550 // AddedComplexity is needed due to the increased complexity on the
1551 // i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1552 // the MOVZX patterns keeps thems together in DAGIsel tables.
1553 let AddedComplexity = 1 in {
1554 // r & (2^16-1) ==> movz
1555 def : Pat<(and GR32:$src1, 0xffff),
1556 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1557 // r & (2^8-1) ==> movz
1558 def : Pat<(and GR32:$src1, 0xff),
1559 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>;
1560 // r & (2^8-1) ==> movz
1561 def : Pat<(and GR16:$src1, 0xff),
1562 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)),
1565 // r & (2^32-1) ==> movz
1566 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1567 (SUBREG_TO_REG (i64 0),
1568 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1570 // r & (2^16-1) ==> movz
1571 def : Pat<(and GR64:$src, 0xffff),
1572 (SUBREG_TO_REG (i64 0),
1573 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1575 // r & (2^8-1) ==> movz
1576 def : Pat<(and GR64:$src, 0xff),
1577 (SUBREG_TO_REG (i64 0),
1578 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1580 } // AddedComplexity = 1
1583 // Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits.
1585 def BTRXForm : SDNodeXForm<imm, [{
1586 // Transformation function: Find the lowest 0.
1587 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingOnes(), SDLoc(N));
1590 def BTCBTSXForm : SDNodeXForm<imm, [{
1591 // Transformation function: Find the lowest 1.
1592 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingZeros(), SDLoc(N));
1595 def BTRMask64 : ImmLeaf<i64, [{
1596 return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm);
1599 def BTCBTSMask64 : ImmLeaf<i64, [{
1600 return !isInt<32>(Imm) && isPowerOf2_64(Imm);
1603 // For now only do this for optsize.
1604 let AddedComplexity = 1, Predicates=[OptForSize] in {
1605 def : Pat<(and GR64:$src1, BTRMask64:$mask),
1606 (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>;
1607 def : Pat<(or GR64:$src1, BTCBTSMask64:$mask),
1608 (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1609 def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask),
1610 (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1614 // sext_inreg patterns
1615 def : Pat<(sext_inreg GR32:$src, i16),
1616 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1617 def : Pat<(sext_inreg GR32:$src, i8),
1618 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>;
1620 def : Pat<(sext_inreg GR16:$src, i8),
1621 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)),
1624 def : Pat<(sext_inreg GR64:$src, i32),
1625 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1626 def : Pat<(sext_inreg GR64:$src, i16),
1627 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1628 def : Pat<(sext_inreg GR64:$src, i8),
1629 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1631 // sext, sext_load, zext, zext_load
1632 def: Pat<(i16 (sext GR8:$src)),
1633 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1634 def: Pat<(sextloadi16i8 addr:$src),
1635 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1636 def: Pat<(i16 (zext GR8:$src)),
1637 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1638 def: Pat<(zextloadi16i8 addr:$src),
1639 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1642 def : Pat<(i16 (trunc GR32:$src)),
1643 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1644 def : Pat<(i8 (trunc GR32:$src)),
1645 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1647 Requires<[Not64BitMode]>;
1648 def : Pat<(i8 (trunc GR16:$src)),
1649 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1651 Requires<[Not64BitMode]>;
1652 def : Pat<(i32 (trunc GR64:$src)),
1653 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1654 def : Pat<(i16 (trunc GR64:$src)),
1655 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1656 def : Pat<(i8 (trunc GR64:$src)),
1657 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1658 def : Pat<(i8 (trunc GR32:$src)),
1659 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1660 Requires<[In64BitMode]>;
1661 def : Pat<(i8 (trunc GR16:$src)),
1662 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1663 Requires<[In64BitMode]>;
1665 def immff00_ffff : ImmLeaf<i32, [{
1666 return Imm >= 0xff00 && Imm <= 0xffff;
1669 // h-register tricks
1670 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1671 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1672 Requires<[Not64BitMode]>;
1673 def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
1674 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1675 Requires<[Not64BitMode]>;
1676 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1677 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>,
1678 Requires<[Not64BitMode]>;
1679 def : Pat<(srl GR16:$src, (i8 8)),
1681 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1683 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1684 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1685 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1686 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1687 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1688 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1689 def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)),
1690 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1692 // h-register tricks.
1693 // For now, be conservative on x86-64 and use an h-register extract only if the
1694 // value is immediately zero-extended or stored, which are somewhat common
1695 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1696 // from being allocated in the same instruction as the h register, as there's
1697 // currently no way to describe this requirement to the register allocator.
1699 // h-register extract and zero-extend.
1700 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1704 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)),
1706 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1710 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1712 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1716 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1719 // h-register extract and store.
1720 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1723 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>;
1724 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1727 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>,
1728 Requires<[In64BitMode]>;
1729 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1732 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>,
1733 Requires<[In64BitMode]>;
1735 // Special pattern to catch the last step of __builtin_parity handling. Our
1736 // goal is to use an xor of an h-register with the corresponding l-register.
1737 // The above patterns would handle this on non 64-bit targets, but for 64-bit
1738 // we need to be more careful. We're using a NOREX instruction here in case
1739 // register allocation fails to keep the two registers together. So we need to
1740 // make sure we can't accidentally mix R8-R15 with an h-register.
1741 def : Pat<(X86xor_flag (i8 (trunc GR32:$src)),
1742 (i8 (trunc (srl_su GR32:$src, (i8 8))))),
1743 (XOR8rr_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit),
1744 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1746 // (shl x, 1) ==> (add x, x)
1747 // Note that if x is undef (immediate or otherwise), we could theoretically
1748 // end up with the two uses of x getting different values, producing a result
1749 // where the least significant bit is not 0. However, the probability of this
1750 // happening is considered low enough that this is officially not a
1752 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1753 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1754 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1755 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1757 def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1758 return isUnneededShiftMask(N, 3);
1761 def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1762 return isUnneededShiftMask(N, 4);
1765 def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1766 return isUnneededShiftMask(N, 5);
1769 def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1770 return isUnneededShiftMask(N, 6);
1774 // Shift amount is implicitly masked.
1775 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1776 // (shift x (and y, 31)) ==> (shift x, y)
1777 def : Pat<(frag GR8:$src1, (shiftMask32 CL)),
1778 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1779 def : Pat<(frag GR16:$src1, (shiftMask32 CL)),
1780 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1781 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1782 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1783 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst),
1784 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1785 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst),
1786 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1787 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1788 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1790 // (shift x (and y, 63)) ==> (shift x, y)
1791 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1792 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1793 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1794 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1797 defm : MaskedShiftAmountPats<shl, "SHL">;
1798 defm : MaskedShiftAmountPats<srl, "SHR">;
1799 defm : MaskedShiftAmountPats<sra, "SAR">;
1801 // ROL/ROR instructions allow a stronger mask optimization than shift for 8- and
1802 // 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount
1803 // because over-rotating produces the same result. This is noted in the Intel
1804 // docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation
1805 // amount could affect EFLAGS results, but that does not matter because we are
1806 // not tracking flags for these nodes.
1807 multiclass MaskedRotateAmountPats<SDNode frag, string name> {
1808 // (rot x (and y, BitWidth - 1)) ==> (rot x, y)
1809 def : Pat<(frag GR8:$src1, (shiftMask8 CL)),
1810 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1811 def : Pat<(frag GR16:$src1, (shiftMask16 CL)),
1812 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1813 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1814 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1815 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst),
1816 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1817 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst),
1818 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1819 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1820 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1822 // (rot x (and y, 63)) ==> (rot x, y)
1823 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1824 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1825 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1826 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1830 defm : MaskedRotateAmountPats<rotl, "ROL">;
1831 defm : MaskedRotateAmountPats<rotr, "ROR">;
1833 // Double "funnel" shift amount is implicitly masked.
1834 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) (NOTE: modulo32)
1835 def : Pat<(X86fshl GR16:$src1, GR16:$src2, (shiftMask32 CL)),
1836 (SHLD16rrCL GR16:$src1, GR16:$src2)>;
1837 def : Pat<(X86fshr GR16:$src2, GR16:$src1, (shiftMask32 CL)),
1838 (SHRD16rrCL GR16:$src1, GR16:$src2)>;
1840 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y)
1841 def : Pat<(fshl GR32:$src1, GR32:$src2, (shiftMask32 CL)),
1842 (SHLD32rrCL GR32:$src1, GR32:$src2)>;
1843 def : Pat<(fshr GR32:$src2, GR32:$src1, (shiftMask32 CL)),
1844 (SHRD32rrCL GR32:$src1, GR32:$src2)>;
1846 // (fshl/fshr x (and y, 63)) ==> (fshl/fshr x, y)
1847 def : Pat<(fshl GR64:$src1, GR64:$src2, (shiftMask64 CL)),
1848 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1849 def : Pat<(fshr GR64:$src2, GR64:$src1, (shiftMask64 CL)),
1850 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1852 let Predicates = [HasBMI2] in {
1853 let AddedComplexity = 1 in {
1854 def : Pat<(sra GR32:$src1, (shiftMask32 GR8:$src2)),
1855 (SARX32rr GR32:$src1,
1857 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1858 def : Pat<(sra GR64:$src1, (shiftMask64 GR8:$src2)),
1859 (SARX64rr GR64:$src1,
1861 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1863 def : Pat<(srl GR32:$src1, (shiftMask32 GR8:$src2)),
1864 (SHRX32rr GR32:$src1,
1866 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1867 def : Pat<(srl GR64:$src1, (shiftMask64 GR8:$src2)),
1868 (SHRX64rr GR64:$src1,
1870 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1872 def : Pat<(shl GR32:$src1, (shiftMask32 GR8:$src2)),
1873 (SHLX32rr GR32:$src1,
1875 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1876 def : Pat<(shl GR64:$src1, (shiftMask64 GR8:$src2)),
1877 (SHLX64rr GR64:$src1,
1879 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1882 def : Pat<(sra (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1883 (SARX32rm addr:$src1,
1885 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1886 def : Pat<(sra (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1887 (SARX64rm addr:$src1,
1889 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1891 def : Pat<(srl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1892 (SHRX32rm addr:$src1,
1894 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1895 def : Pat<(srl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1896 (SHRX64rm addr:$src1,
1898 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1900 def : Pat<(shl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1901 (SHLX32rm addr:$src1,
1903 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1904 def : Pat<(shl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1905 (SHLX64rm addr:$src1,
1907 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1910 // Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location.
1911 multiclass one_bit_patterns<RegisterClass RC, ValueType VT, Instruction BTR,
1912 Instruction BTS, Instruction BTC,
1913 PatFrag ShiftMask> {
1914 def : Pat<(and RC:$src1, (rotl -2, GR8:$src2)),
1916 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1917 def : Pat<(or RC:$src1, (shl 1, GR8:$src2)),
1919 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1920 def : Pat<(xor RC:$src1, (shl 1, GR8:$src2)),
1922 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1924 // Similar to above, but removing unneeded masking of the shift amount.
1925 def : Pat<(and RC:$src1, (rotl -2, (ShiftMask GR8:$src2))),
1927 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1928 def : Pat<(or RC:$src1, (shl 1, (ShiftMask GR8:$src2))),
1930 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1931 def : Pat<(xor RC:$src1, (shl 1, (ShiftMask GR8:$src2))),
1933 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1936 defm : one_bit_patterns<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>;
1937 defm : one_bit_patterns<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>;
1938 defm : one_bit_patterns<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>;
1940 //===----------------------------------------------------------------------===//
1941 // EFLAGS-defining Patterns
1942 //===----------------------------------------------------------------------===//
1945 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1946 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1947 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1948 def : Pat<(add GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>;
1951 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1952 (ADD8rm GR8:$src1, addr:$src2)>;
1953 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1954 (ADD16rm GR16:$src1, addr:$src2)>;
1955 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1956 (ADD32rm GR32:$src1, addr:$src2)>;
1957 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1958 (ADD64rm GR64:$src1, addr:$src2)>;
1961 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1962 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1963 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1964 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1965 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1966 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1967 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1968 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1969 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1970 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1971 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1974 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1975 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1976 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1977 def : Pat<(sub GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>;
1980 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1981 (SUB8rm GR8:$src1, addr:$src2)>;
1982 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1983 (SUB16rm GR16:$src1, addr:$src2)>;
1984 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1985 (SUB32rm GR32:$src1, addr:$src2)>;
1986 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1987 (SUB64rm GR64:$src1, addr:$src2)>;
1990 def : Pat<(sub GR8:$src1, imm:$src2),
1991 (SUB8ri GR8:$src1, imm:$src2)>;
1992 def : Pat<(sub GR16:$src1, imm:$src2),
1993 (SUB16ri GR16:$src1, imm:$src2)>;
1994 def : Pat<(sub GR32:$src1, imm:$src2),
1995 (SUB32ri GR32:$src1, imm:$src2)>;
1996 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1997 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1998 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1999 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
2000 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
2001 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
2002 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
2003 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
2006 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
2007 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
2008 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
2009 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
2012 def : Pat<(mul GR16:$src1, GR16:$src2),
2013 (IMUL16rr GR16:$src1, GR16:$src2)>;
2014 def : Pat<(mul GR32:$src1, GR32:$src2),
2015 (IMUL32rr GR32:$src1, GR32:$src2)>;
2016 def : Pat<(mul GR64:$src1, GR64:$src2),
2017 (IMUL64rr GR64:$src1, GR64:$src2)>;
2020 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
2021 (IMUL16rm GR16:$src1, addr:$src2)>;
2022 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
2023 (IMUL32rm GR32:$src1, addr:$src2)>;
2024 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
2025 (IMUL64rm GR64:$src1, addr:$src2)>;
2028 def : Pat<(mul GR16:$src1, imm:$src2),
2029 (IMUL16rri GR16:$src1, imm:$src2)>;
2030 def : Pat<(mul GR32:$src1, imm:$src2),
2031 (IMUL32rri GR32:$src1, imm:$src2)>;
2032 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
2033 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
2034 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
2035 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
2036 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
2037 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
2038 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
2039 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
2041 // reg = mul mem, imm
2042 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
2043 (IMUL16rmi addr:$src1, imm:$src2)>;
2044 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
2045 (IMUL32rmi addr:$src1, imm:$src2)>;
2046 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
2047 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
2048 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
2049 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
2050 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
2051 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
2052 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
2053 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2055 // Increment/Decrement reg.
2056 // Do not make INC/DEC if it is slow
2057 let Predicates = [UseIncDec] in {
2058 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
2059 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
2060 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
2061 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
2062 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
2063 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
2064 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
2065 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
2067 def : Pat<(X86add_flag_nocf GR8:$src, -1), (DEC8r GR8:$src)>;
2068 def : Pat<(X86add_flag_nocf GR16:$src, -1), (DEC16r GR16:$src)>;
2069 def : Pat<(X86add_flag_nocf GR32:$src, -1), (DEC32r GR32:$src)>;
2070 def : Pat<(X86add_flag_nocf GR64:$src, -1), (DEC64r GR64:$src)>;
2071 def : Pat<(X86sub_flag_nocf GR8:$src, -1), (INC8r GR8:$src)>;
2072 def : Pat<(X86sub_flag_nocf GR16:$src, -1), (INC16r GR16:$src)>;
2073 def : Pat<(X86sub_flag_nocf GR32:$src, -1), (INC32r GR32:$src)>;
2074 def : Pat<(X86sub_flag_nocf GR64:$src, -1), (INC64r GR64:$src)>;
2078 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
2079 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
2080 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
2081 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
2084 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
2085 (OR8rm GR8:$src1, addr:$src2)>;
2086 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
2087 (OR16rm GR16:$src1, addr:$src2)>;
2088 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
2089 (OR32rm GR32:$src1, addr:$src2)>;
2090 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
2091 (OR64rm GR64:$src1, addr:$src2)>;
2094 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
2095 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
2096 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
2097 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
2098 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
2099 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
2100 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
2101 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
2102 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2103 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
2104 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2107 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
2108 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
2109 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
2110 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
2113 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
2114 (XOR8rm GR8:$src1, addr:$src2)>;
2115 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
2116 (XOR16rm GR16:$src1, addr:$src2)>;
2117 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
2118 (XOR32rm GR32:$src1, addr:$src2)>;
2119 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
2120 (XOR64rm GR64:$src1, addr:$src2)>;
2123 def : Pat<(xor GR8:$src1, imm:$src2),
2124 (XOR8ri GR8:$src1, imm:$src2)>;
2125 def : Pat<(xor GR16:$src1, imm:$src2),
2126 (XOR16ri GR16:$src1, imm:$src2)>;
2127 def : Pat<(xor GR32:$src1, imm:$src2),
2128 (XOR32ri GR32:$src1, imm:$src2)>;
2129 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
2130 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
2131 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
2132 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
2133 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
2134 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2135 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
2136 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2139 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
2140 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
2141 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
2142 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
2145 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
2146 (AND8rm GR8:$src1, addr:$src2)>;
2147 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
2148 (AND16rm GR16:$src1, addr:$src2)>;
2149 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
2150 (AND32rm GR32:$src1, addr:$src2)>;
2151 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
2152 (AND64rm GR64:$src1, addr:$src2)>;
2155 def : Pat<(and GR8:$src1, imm:$src2),
2156 (AND8ri GR8:$src1, imm:$src2)>;
2157 def : Pat<(and GR16:$src1, imm:$src2),
2158 (AND16ri GR16:$src1, imm:$src2)>;
2159 def : Pat<(and GR32:$src1, imm:$src2),
2160 (AND32ri GR32:$src1, imm:$src2)>;
2161 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
2162 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
2163 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
2164 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
2165 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
2166 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
2167 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
2168 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
2170 // Bit scan instruction patterns to match explicit zero-undef behavior.
2171 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
2172 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
2173 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
2174 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
2175 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
2176 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
2178 // When HasMOVBE is enabled it is possible to get a non-legalized
2179 // register-register 16 bit bswap. This maps it to a ROL instruction.
2180 let Predicates = [HasMOVBE] in {
2181 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;