1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the various pseudo instructions used by the compiler,
10 // as well as Pat patterns used during instruction selection.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Pattern Matching Support
17 def GetLo32XForm : SDNodeXForm<imm, [{
18 // Transformation function: get the low 32 bits.
19 return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N));
23 //===----------------------------------------------------------------------===//
24 // Random Pseudo Instructions.
26 // PIC base construction. This expands to code that looks like this:
29 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP],
30 SchedRW = [WriteJump] in
31 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
34 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
35 // a stack adjustment and the codegen must know that they may modify the stack
36 // pointer before prolog-epilog rewriting occurs.
37 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
38 // sub / add which can clobber EFLAGS.
39 let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in {
40 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs),
41 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
42 "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>;
43 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
45 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
48 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
49 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>;
52 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
53 // a stack adjustment and the codegen must know that they may modify the stack
54 // pointer before prolog-epilog rewriting occurs.
55 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
56 // sub / add which can clobber EFLAGS.
57 let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in {
58 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs),
59 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
60 "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>;
61 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
63 [(X86callseq_end timm:$amt1, timm:$amt2)]>,
66 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
67 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>;
69 let SchedRW = [WriteSystem] in {
71 // x86-64 va_start lowering magic.
72 let usesCustomInserter = 1, Defs = [EFLAGS] in {
73 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
76 i64imm:$regsavefi, i64imm:$offset,
78 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
79 [(X86vastart_save_xmm_regs GR8:$al,
84 // The VAARG_64 pseudo-instruction takes the address of the va_list,
85 // and places the address of the next argument into a register.
86 let Defs = [EFLAGS] in
87 def VAARG_64 : I<0, Pseudo,
89 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
90 "#VAARG_64 $dst, $ap, $size, $mode, $align",
92 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
96 // When using segmented stacks these are lowered into instructions which first
97 // check if the current stacklet has enough free memory. If it does, memory is
98 // allocated by bumping the stack pointer. Otherwise memory is allocated from
101 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
102 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
103 "# variable sized alloca for segmented stacks",
105 (X86SegAlloca GR32:$size))]>,
108 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
109 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
110 "# variable sized alloca for segmented stacks",
112 (X86SegAlloca GR64:$size))]>,
113 Requires<[In64BitMode]>;
115 // To protect against stack clash, dynamic allocation should perform a memory
116 // probe at each page.
118 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
119 def PROBED_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
120 "# variable sized alloca with probing",
122 (X86ProbedAlloca GR32:$size))]>,
125 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
126 def PROBED_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
127 "# variable sized alloca with probing",
129 (X86ProbedAlloca GR64:$size))]>,
130 Requires<[In64BitMode]>;
133 let hasNoSchedulingInfo = 1 in
134 def STACKALLOC_W_PROBING : I<0, Pseudo, (outs), (ins i64imm:$stacksize),
135 "# fixed size alloca with probing",
138 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
139 // targets. These calls are needed to probe the stack when allocating more than
140 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
141 // ensure that the guard pages used by the OS virtual memory manager are
142 // allocated in correct sequence.
143 // The main point of having separate instruction are extra unmodelled effects
144 // (compared to ordinary calls) like stack pointer change.
146 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
147 def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
148 "# dynamic stack allocation",
149 [(X86WinAlloca GR32:$size)]>,
152 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
153 def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
154 "# dynamic stack allocation",
155 [(X86WinAlloca GR64:$size)]>,
156 Requires<[In64BitMode]>;
159 // These instructions XOR the frame pointer into a GPR. They are used in some
160 // stack protection schemes. These are post-RA pseudos because we only know the
161 // frame register after register allocation.
162 let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in {
163 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
164 "xorl\t$$FP, $src", []>,
165 Requires<[NotLP64]>, Sched<[WriteALU]>;
166 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src),
167 "xorq\t$$FP $src", []>,
168 Requires<[In64BitMode]>, Sched<[WriteALU]>;
171 //===----------------------------------------------------------------------===//
172 // EH Pseudo Instructions
174 let SchedRW = [WriteSystem] in {
175 let isTerminator = 1, isReturn = 1, isBarrier = 1,
176 hasCtrlDep = 1, isCodeGenOnly = 1 in {
177 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
178 "ret\t#eh_return, addr: $addr",
179 [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>;
183 let isTerminator = 1, isReturn = 1, isBarrier = 1,
184 hasCtrlDep = 1, isCodeGenOnly = 1 in {
185 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
186 "ret\t#eh_return, addr: $addr",
187 [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>;
191 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
192 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in {
193 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>;
195 // CATCHRET needs a custom inserter for SEH.
196 let usesCustomInserter = 1 in
197 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from),
199 [(catchret bb:$dst, bb:$from)]>;
202 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
203 usesCustomInserter = 1 in {
204 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
206 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
207 Requires<[Not64BitMode]>;
208 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
210 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
211 Requires<[In64BitMode]>;
212 let isTerminator = 1 in {
213 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
214 "#EH_SJLJ_LONGJMP32",
215 [(X86eh_sjlj_longjmp addr:$buf)]>,
216 Requires<[Not64BitMode]>;
217 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
218 "#EH_SJLJ_LONGJMP64",
219 [(X86eh_sjlj_longjmp addr:$buf)]>,
220 Requires<[In64BitMode]>;
224 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
225 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
226 "#EH_SjLj_Setup\t$dst", []>;
230 //===----------------------------------------------------------------------===//
231 // Pseudo instructions used by unwind info.
233 let isPseudo = 1, SchedRW = [WriteSystem] in {
234 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
235 "#SEH_PushReg $reg", []>;
236 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
237 "#SEH_SaveReg $reg, $dst", []>;
238 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
239 "#SEH_SaveXMM $reg, $dst", []>;
240 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
241 "#SEH_StackAlloc $size", []>;
242 def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align),
243 "#SEH_StackAlign $align", []>;
244 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
245 "#SEH_SetFrame $reg, $offset", []>;
246 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
247 "#SEH_PushFrame $mode", []>;
248 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
249 "#SEH_EndPrologue", []>;
250 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
251 "#SEH_Epilogue", []>;
254 //===----------------------------------------------------------------------===//
255 // Pseudo instructions used by segmented stacks.
258 // This is lowered into a RET instruction by MCInstLower. We need
259 // this so that we don't have to have a MachineBasicBlock which ends
260 // with a RET and also has successors.
261 let isPseudo = 1, SchedRW = [WriteJumpLd] in {
262 def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>;
264 // This instruction is lowered to a RET followed by a MOV. The two
265 // instructions are not generated on a higher level since then the
266 // verifier sees a MachineBasicBlock ending with a non-terminator.
267 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>;
270 //===----------------------------------------------------------------------===//
271 // Alias Instructions
272 //===----------------------------------------------------------------------===//
274 // Alias instruction mapping movr0 to xor.
275 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
276 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
277 isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in
278 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
279 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>;
281 // Other widths can also make use of the 32-bit xor, which may have a smaller
282 // encoding and avoid partial register updates.
283 let AddedComplexity = 10 in {
284 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
285 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
286 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>;
289 let Predicates = [OptForSize, Not64BitMode],
290 AddedComplexity = 10 in {
291 let SchedRW = [WriteALU] in {
292 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
293 // which only require 3 bytes compared to MOV32ri which requires 5.
294 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
295 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
296 [(set GR32:$dst, 1)]>;
297 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
298 [(set GR32:$dst, -1)]>;
302 // MOV16ri is 4 bytes, so the instructions above are smaller.
303 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>;
304 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
307 let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5,
308 SchedRW = [WriteALU] in {
309 // AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
310 def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
311 [(set GR32:$dst, i32immSExt8:$src)]>,
312 Requires<[OptForMinSize, NotWin64WithoutFP]>;
313 def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
314 [(set GR64:$dst, i64immSExt8:$src)]>,
315 Requires<[OptForMinSize, NotWin64WithoutFP]>;
318 // Materialize i64 constant where top 32-bits are zero. This could theoretically
319 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
320 // that would make it more difficult to rematerialize.
321 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
322 isPseudo = 1, SchedRW = [WriteMove] in
323 def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "",
324 [(set GR64:$dst, i64immZExt32:$src)]>;
326 // This 64-bit pseudo-move can also be used for labels in the x86-64 small code
328 def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [X86Wrapper]>;
329 def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>;
331 // Use sbb to materialize carry bit.
332 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteADC],
333 hasSideEffects = 0 in {
334 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
335 // However, Pat<> can't replicate the destination reg into the inputs of the
337 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", []>;
338 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", []>;
341 //===----------------------------------------------------------------------===//
342 // String Pseudo Instructions
344 let SchedRW = [WriteMicrocoded] in {
345 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
346 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins),
347 "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}",
348 [(X86rep_movs i8)]>, REP, AdSize32,
350 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins),
351 "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}",
352 [(X86rep_movs i16)]>, REP, AdSize32, OpSize16,
354 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins),
355 "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}",
356 [(X86rep_movs i32)]>, REP, AdSize32, OpSize32,
358 def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins),
359 "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}",
360 [(X86rep_movs i64)]>, REP, AdSize32,
361 Requires<[NotLP64, In64BitMode]>;
364 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
365 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins),
366 "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}",
367 [(X86rep_movs i8)]>, REP, AdSize64,
369 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins),
370 "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}",
371 [(X86rep_movs i16)]>, REP, AdSize64, OpSize16,
373 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins),
374 "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}",
375 [(X86rep_movs i32)]>, REP, AdSize64, OpSize32,
377 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins),
378 "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}",
379 [(X86rep_movs i64)]>, REP, AdSize64,
383 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
384 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
385 let Uses = [AL,ECX,EDI] in
386 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins),
387 "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}",
388 [(X86rep_stos i8)]>, REP, AdSize32,
390 let Uses = [AX,ECX,EDI] in
391 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins),
392 "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}",
393 [(X86rep_stos i16)]>, REP, AdSize32, OpSize16,
395 let Uses = [EAX,ECX,EDI] in
396 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins),
397 "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}",
398 [(X86rep_stos i32)]>, REP, AdSize32, OpSize32,
400 let Uses = [RAX,RCX,RDI] in
401 def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins),
402 "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}",
403 [(X86rep_stos i64)]>, REP, AdSize32,
404 Requires<[NotLP64, In64BitMode]>;
407 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
408 let Uses = [AL,RCX,RDI] in
409 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins),
410 "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}",
411 [(X86rep_stos i8)]>, REP, AdSize64,
413 let Uses = [AX,RCX,RDI] in
414 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins),
415 "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}",
416 [(X86rep_stos i16)]>, REP, AdSize64, OpSize16,
418 let Uses = [RAX,RCX,RDI] in
419 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins),
420 "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}",
421 [(X86rep_stos i32)]>, REP, AdSize64, OpSize32,
424 let Uses = [RAX,RCX,RDI] in
425 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins),
426 "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}",
427 [(X86rep_stos i64)]>, REP, AdSize64,
432 //===----------------------------------------------------------------------===//
433 // Thread Local Storage Instructions
435 let SchedRW = [WriteSystem] in {
438 // All calls clobber the non-callee saved registers. ESP is marked as
439 // a use to prevent stack-pointer assignments that appear immediately
440 // before calls from potentially appearing dead.
441 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
442 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
443 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
444 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
445 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
446 usesCustomInserter = 1, Uses = [ESP, SSP] in {
447 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
449 [(X86tlsaddr tls32addr:$sym)]>,
450 Requires<[Not64BitMode]>;
451 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
453 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
454 Requires<[Not64BitMode]>;
457 // All calls clobber the non-callee saved registers. RSP is marked as
458 // a use to prevent stack-pointer assignments that appear immediately
459 // before calls from potentially appearing dead.
460 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
461 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
462 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
463 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
464 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
465 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
466 usesCustomInserter = 1, Uses = [RSP, SSP] in {
467 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
469 [(X86tlsaddr tls64addr:$sym)]>,
470 Requires<[In64BitMode]>;
471 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
473 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
474 Requires<[In64BitMode]>;
477 // Darwin TLS Support
478 // For i386, the address of the thunk is passed on the stack, on return the
479 // address of the variable is in %eax. %ecx is trashed during the function
480 // call. All other registers are preserved.
481 let Defs = [EAX, ECX, EFLAGS, DF],
483 usesCustomInserter = 1 in
484 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
486 [(X86TLSCall addr:$sym)]>,
487 Requires<[Not64BitMode]>;
489 // For x86_64, the address of the thunk is passed in %rdi, but the
490 // pseudo directly use the symbol, so do not add an implicit use of
491 // %rdi. The lowering will do the right thing with RDI.
492 // On return the address of the variable is in %rax. All other
493 // registers are preserved.
494 let Defs = [RAX, EFLAGS, DF],
496 usesCustomInserter = 1 in
497 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
499 [(X86TLSCall addr:$sym)]>,
500 Requires<[In64BitMode]>;
503 //===----------------------------------------------------------------------===//
504 // Conditional Move Pseudo Instructions
506 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
507 // instruction selection into a branch sequence.
508 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
509 def CMOV#NAME : I<0, Pseudo,
510 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
511 "#CMOV_"#NAME#" PSEUDO!",
512 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond,
516 let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
517 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
518 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
519 // however that requires promoting the operands, and can induce additional
520 // i8 register pressure.
521 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
523 let Predicates = [NoCMov] in {
524 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
525 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
526 } // Predicates = [NoCMov]
528 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
530 let Predicates = [FPStackf32] in
531 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
533 let Predicates = [FPStackf64] in
534 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
536 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
538 let Predicates = [HasMMX] in
539 defm _VR64 : CMOVrr_PSEUDO<VR64, x86mmx>;
541 let Predicates = [HasSSE1,NoAVX512] in
542 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
543 let Predicates = [HasSSE2,NoAVX512] in
544 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
545 let Predicates = [HasAVX512] in {
546 defm _FR32X : CMOVrr_PSEUDO<FR32X, f32>;
547 defm _FR64X : CMOVrr_PSEUDO<FR64X, f64>;
549 let Predicates = [NoVLX] in {
550 defm _VR128 : CMOVrr_PSEUDO<VR128, v2i64>;
551 defm _VR256 : CMOVrr_PSEUDO<VR256, v4i64>;
553 let Predicates = [HasVLX] in {
554 defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>;
555 defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>;
557 defm _VR512 : CMOVrr_PSEUDO<VR512, v8i64>;
558 defm _VK1 : CMOVrr_PSEUDO<VK1, v1i1>;
559 defm _VK2 : CMOVrr_PSEUDO<VK2, v2i1>;
560 defm _VK4 : CMOVrr_PSEUDO<VK4, v4i1>;
561 defm _VK8 : CMOVrr_PSEUDO<VK8, v8i1>;
562 defm _VK16 : CMOVrr_PSEUDO<VK16, v16i1>;
563 defm _VK32 : CMOVrr_PSEUDO<VK32, v32i1>;
564 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>;
565 } // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS]
567 def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
568 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
570 let Predicates = [NoVLX] in {
571 def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
572 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
573 def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
574 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
575 def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
576 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
577 def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
578 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
579 def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
580 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
582 def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
583 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
584 def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
585 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
586 def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
587 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
588 def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
589 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
590 def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
591 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
593 let Predicates = [HasVLX] in {
594 def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
595 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
596 def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
597 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
598 def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
599 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
600 def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
601 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
602 def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
603 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
605 def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
606 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
607 def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
608 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
609 def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
610 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
611 def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
612 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
613 def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
614 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
617 def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
618 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
619 def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
620 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
621 def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
622 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
623 def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
624 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
625 def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
626 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
628 //===----------------------------------------------------------------------===//
629 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
630 //===----------------------------------------------------------------------===//
632 // FIXME: Use normal instructions and add lock prefix dynamically.
636 let isCodeGenOnly = 1, Defs = [EFLAGS] in
637 def OR32mi8Locked : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero),
638 "or{l}\t{$zero, $dst|$dst, $zero}", []>,
639 Requires<[Not64BitMode]>, OpSize32, LOCK,
640 Sched<[WriteALURMW]>;
642 let hasSideEffects = 1 in
643 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
645 [(X86MemBarrier)]>, Sched<[WriteLoad]>;
647 // RegOpc corresponds to the mr version of the instruction
648 // ImmOpc corresponds to the mi version of the instruction
649 // ImmOpc8 corresponds to the mi8 version of the instruction
650 // ImmMod corresponds to the instruction format of the mi and mi8 versions
651 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
652 Format ImmMod, SDNode Op, string mnemonic> {
653 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
654 SchedRW = [WriteALURMW] in {
656 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
657 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
658 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
659 !strconcat(mnemonic, "{b}\t",
660 "{$src2, $dst|$dst, $src2}"),
661 [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK;
663 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
664 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
665 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
666 !strconcat(mnemonic, "{w}\t",
667 "{$src2, $dst|$dst, $src2}"),
668 [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>,
671 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
672 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
673 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
674 !strconcat(mnemonic, "{l}\t",
675 "{$src2, $dst|$dst, $src2}"),
676 [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>,
679 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
680 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
681 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
682 !strconcat(mnemonic, "{q}\t",
683 "{$src2, $dst|$dst, $src2}"),
684 [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK;
686 // NOTE: These are order specific, we want the mi8 forms to be listed
687 // first so that they are slightly preferred to the mi forms.
688 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
689 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
690 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
691 !strconcat(mnemonic, "{w}\t",
692 "{$src2, $dst|$dst, $src2}"),
693 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>,
696 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
697 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
698 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
699 !strconcat(mnemonic, "{l}\t",
700 "{$src2, $dst|$dst, $src2}"),
701 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>,
704 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
705 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
706 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
707 !strconcat(mnemonic, "{q}\t",
708 "{$src2, $dst|$dst, $src2}"),
709 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>,
712 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
713 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
714 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
715 !strconcat(mnemonic, "{b}\t",
716 "{$src2, $dst|$dst, $src2}"),
717 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK;
719 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
720 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
721 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
722 !strconcat(mnemonic, "{w}\t",
723 "{$src2, $dst|$dst, $src2}"),
724 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>,
727 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
728 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
729 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
730 !strconcat(mnemonic, "{l}\t",
731 "{$src2, $dst|$dst, $src2}"),
732 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>,
735 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
736 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
737 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
738 !strconcat(mnemonic, "{q}\t",
739 "{$src2, $dst|$dst, $src2}"),
740 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>,
746 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
747 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
748 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
749 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
750 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
752 def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs),
753 (X86lock_add node:$lhs, node:$rhs), [{
754 return hasNoCarryFlagUses(SDValue(N, 0));
757 def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs),
758 (X86lock_sub node:$lhs, node:$rhs), [{
759 return hasNoCarryFlagUses(SDValue(N, 0));
762 let Predicates = [UseIncDec] in {
763 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
764 SchedRW = [WriteALURMW] in {
765 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
767 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>,
769 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
771 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>,
773 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
775 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>,
777 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
779 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>,
782 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
784 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>,
786 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
788 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>,
790 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
792 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>,
794 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
796 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>,
800 // Additional patterns for -1 constant.
801 def : Pat<(X86lock_add addr:$dst, (i8 -1)), (LOCK_DEC8m addr:$dst)>;
802 def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>;
803 def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>;
804 def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>;
805 def : Pat<(X86lock_sub addr:$dst, (i8 -1)), (LOCK_INC8m addr:$dst)>;
806 def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>;
807 def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>;
808 def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>;
811 // Atomic compare and swap.
812 multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
813 SDPatternOperator frag, X86MemOperand x86memop> {
814 let isCodeGenOnly = 1, usesCustomInserter = 1 in {
815 def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
816 !strconcat(mnemonic, "\t$ptr"),
817 [(frag addr:$ptr)]>, TB, LOCK;
821 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
822 string mnemonic, SDPatternOperator frag> {
823 let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in {
824 let Defs = [AL, EFLAGS], Uses = [AL] in
825 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
826 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
827 [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
828 let Defs = [AX, EFLAGS], Uses = [AX] in
829 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
830 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
831 [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK;
832 let Defs = [EAX, EFLAGS], Uses = [EAX] in
833 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
834 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
835 [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK;
836 let Defs = [RAX, EFLAGS], Uses = [RAX] in
837 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
838 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
839 [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
843 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
844 Predicates = [HasCmpxchg8b], SchedRW = [WriteCMPXCHGRMW] in {
845 defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b", X86cas8, i64mem>;
848 // This pseudo must be used when the frame uses RBX as
849 // the base pointer. Indeed, in such situation RBX is a reserved
850 // register and the register allocator will ignore any use/def of
851 // it. In other words, the register will not fix the clobbering of
852 // RBX that will happen when setting the arguments for the instrucion.
854 // Unlike the actual related instruction, we mark that this one
855 // defines EBX (instead of using EBX).
856 // The rationale is that we will define RBX during the expansion of
857 // the pseudo. The argument feeding EBX is ebx_input.
859 // The additional argument, $ebx_save, is a temporary register used to
860 // save the value of RBX across the actual instruction.
862 // To make sure the register assigned to $ebx_save does not interfere with
863 // the definition of the actual instruction, we use a definition $dst which
864 // is tied to $rbx_save. That way, the live-range of $rbx_save spans across
865 // the instruction and we are sure we will have a valid register to restore
867 let Defs = [EAX, EDX, EBX, EFLAGS], Uses = [EAX, ECX, EDX],
868 Predicates = [HasCmpxchg8b], SchedRW = [WriteCMPXCHGRMW],
869 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$ebx_save = $dst",
870 usesCustomInserter = 1 in {
871 def LCMPXCHG8B_SAVE_EBX :
872 I<0, Pseudo, (outs GR32:$dst),
873 (ins i64mem:$ptr, GR32:$ebx_input, GR32:$ebx_save),
874 !strconcat("cmpxchg8b", "\t$ptr"),
875 [(set GR32:$dst, (X86cas8save_ebx addr:$ptr, GR32:$ebx_input,
880 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
881 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW] in {
882 defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
883 X86cas16, i128mem>, REX_W;
886 // Same as LCMPXCHG8B_SAVE_RBX but for the 16 Bytes variant.
887 let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX],
888 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
889 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst",
890 usesCustomInserter = 1 in {
891 def LCMPXCHG16B_SAVE_RBX :
892 I<0, Pseudo, (outs GR64:$dst),
893 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save),
894 !strconcat("cmpxchg16b", "\t$ptr"),
895 [(set GR64:$dst, (X86cas16save_rbx addr:$ptr, GR64:$rbx_input,
899 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>;
901 // Atomic exchange and add
902 multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
904 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
905 SchedRW = [WriteALURMW] in {
906 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
907 (ins GR8:$val, i8mem:$ptr),
908 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
910 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>;
911 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
912 (ins GR16:$val, i16mem:$ptr),
913 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
916 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>,
918 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
919 (ins GR32:$val, i32mem:$ptr),
920 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
923 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
925 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
926 (ins GR64:$val, i64mem:$ptr),
927 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
930 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>;
934 defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK;
936 /* The following multiclass tries to make sure that in code like
937 * x.store (immediate op x.load(acquire), release)
939 * x.store (register op x.load(acquire), release)
940 * an operation directly on memory is generated instead of wasting a register.
941 * It is not automatic as atomic_store/load are only lowered to MOV instructions
942 * extremely late to prevent them from being accidentally reordered in the backend
943 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
945 multiclass RELEASE_BINOP_MI<string Name, SDNode op> {
946 def : Pat<(atomic_store_8 addr:$dst,
947 (op (atomic_load_8 addr:$dst), (i8 imm:$src))),
948 (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>;
949 def : Pat<(atomic_store_16 addr:$dst,
950 (op (atomic_load_16 addr:$dst), (i16 imm:$src))),
951 (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>;
952 def : Pat<(atomic_store_32 addr:$dst,
953 (op (atomic_load_32 addr:$dst), (i32 imm:$src))),
954 (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>;
955 def : Pat<(atomic_store_64 addr:$dst,
956 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src))),
957 (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>;
959 def : Pat<(atomic_store_8 addr:$dst,
960 (op (atomic_load_8 addr:$dst), (i8 GR8:$src))),
961 (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>;
962 def : Pat<(atomic_store_16 addr:$dst,
963 (op (atomic_load_16 addr:$dst), (i16 GR16:$src))),
964 (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>;
965 def : Pat<(atomic_store_32 addr:$dst,
966 (op (atomic_load_32 addr:$dst), (i32 GR32:$src))),
967 (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>;
968 def : Pat<(atomic_store_64 addr:$dst,
969 (op (atomic_load_64 addr:$dst), (i64 GR64:$src))),
970 (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>;
972 defm : RELEASE_BINOP_MI<"ADD", add>;
973 defm : RELEASE_BINOP_MI<"AND", and>;
974 defm : RELEASE_BINOP_MI<"OR", or>;
975 defm : RELEASE_BINOP_MI<"XOR", xor>;
976 defm : RELEASE_BINOP_MI<"SUB", sub>;
978 // Atomic load + floating point patterns.
979 // FIXME: This could also handle SIMD operations with *ps and *pd instructions.
980 multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> {
981 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
982 (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>,
984 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
985 (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>,
987 def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
988 (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>,
989 Requires<[HasAVX512]>;
991 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
992 (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>,
994 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
995 (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>,
997 def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
998 (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>,
999 Requires<[HasAVX512]>;
1001 defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>;
1002 // FIXME: Add fsub, fmul, fdiv, ...
1004 multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32,
1006 def : Pat<(atomic_store_8 addr:$dst, dag8),
1007 (!cast<Instruction>(Name#8m) addr:$dst)>;
1008 def : Pat<(atomic_store_16 addr:$dst, dag16),
1009 (!cast<Instruction>(Name#16m) addr:$dst)>;
1010 def : Pat<(atomic_store_32 addr:$dst, dag32),
1011 (!cast<Instruction>(Name#32m) addr:$dst)>;
1012 def : Pat<(atomic_store_64 addr:$dst, dag64),
1013 (!cast<Instruction>(Name#64m) addr:$dst)>;
1016 let Predicates = [UseIncDec] in {
1017 defm : RELEASE_UNOP<"INC",
1018 (add (atomic_load_8 addr:$dst), (i8 1)),
1019 (add (atomic_load_16 addr:$dst), (i16 1)),
1020 (add (atomic_load_32 addr:$dst), (i32 1)),
1021 (add (atomic_load_64 addr:$dst), (i64 1))>;
1022 defm : RELEASE_UNOP<"DEC",
1023 (add (atomic_load_8 addr:$dst), (i8 -1)),
1024 (add (atomic_load_16 addr:$dst), (i16 -1)),
1025 (add (atomic_load_32 addr:$dst), (i32 -1)),
1026 (add (atomic_load_64 addr:$dst), (i64 -1))>;
1029 defm : RELEASE_UNOP<"NEG",
1030 (ineg (i8 (atomic_load_8 addr:$dst))),
1031 (ineg (i16 (atomic_load_16 addr:$dst))),
1032 (ineg (i32 (atomic_load_32 addr:$dst))),
1033 (ineg (i64 (atomic_load_64 addr:$dst)))>;
1034 defm : RELEASE_UNOP<"NOT",
1035 (not (i8 (atomic_load_8 addr:$dst))),
1036 (not (i16 (atomic_load_16 addr:$dst))),
1037 (not (i32 (atomic_load_32 addr:$dst))),
1038 (not (i64 (atomic_load_64 addr:$dst)))>;
1040 def : Pat<(atomic_store_8 addr:$dst, (i8 imm:$src)),
1041 (MOV8mi addr:$dst, imm:$src)>;
1042 def : Pat<(atomic_store_16 addr:$dst, (i16 imm:$src)),
1043 (MOV16mi addr:$dst, imm:$src)>;
1044 def : Pat<(atomic_store_32 addr:$dst, (i32 imm:$src)),
1045 (MOV32mi addr:$dst, imm:$src)>;
1046 def : Pat<(atomic_store_64 addr:$dst, (i64immSExt32:$src)),
1047 (MOV64mi32 addr:$dst, i64immSExt32:$src)>;
1049 def : Pat<(atomic_store_8 addr:$dst, GR8:$src),
1050 (MOV8mr addr:$dst, GR8:$src)>;
1051 def : Pat<(atomic_store_16 addr:$dst, GR16:$src),
1052 (MOV16mr addr:$dst, GR16:$src)>;
1053 def : Pat<(atomic_store_32 addr:$dst, GR32:$src),
1054 (MOV32mr addr:$dst, GR32:$src)>;
1055 def : Pat<(atomic_store_64 addr:$dst, GR64:$src),
1056 (MOV64mr addr:$dst, GR64:$src)>;
1058 def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>;
1059 def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>;
1060 def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>;
1061 def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>;
1063 // Floating point loads/stores.
1064 def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))),
1065 (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>;
1066 def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))),
1067 (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>;
1068 def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))),
1069 (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>;
1071 def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))),
1072 (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>;
1073 def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))),
1074 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>;
1075 def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))),
1076 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>;
1078 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1079 (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>;
1080 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1081 (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>;
1082 def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1083 (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1085 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1086 (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>;
1087 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1088 (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>;
1089 def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1090 (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1092 //===----------------------------------------------------------------------===//
1093 // DAG Pattern Matching Rules
1094 //===----------------------------------------------------------------------===//
1096 // Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
1097 // binary size compared to a regular MOV, but it introduces an unnecessary
1098 // load, so is not suitable for regular or optsize functions.
1099 let Predicates = [OptForMinSize] in {
1100 def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
1101 def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
1102 def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
1103 def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
1104 def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
1105 def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
1108 // In kernel code model, we can get the address of a label
1109 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1110 // the MOV64ri32 should accept these.
1111 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1112 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1113 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1114 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1115 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1116 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1117 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1118 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1119 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
1120 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
1121 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1122 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1124 // If we have small model and -static mode, it is safe to store global addresses
1125 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1126 // for MOV64mi32 should handle this sort of thing.
1127 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1128 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1129 Requires<[NearData, IsNotPIC]>;
1130 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1131 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1132 Requires<[NearData, IsNotPIC]>;
1133 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1134 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1135 Requires<[NearData, IsNotPIC]>;
1136 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1137 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1138 Requires<[NearData, IsNotPIC]>;
1139 def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
1140 (MOV64mi32 addr:$dst, mcsym:$src)>,
1141 Requires<[NearData, IsNotPIC]>;
1142 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1143 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1144 Requires<[NearData, IsNotPIC]>;
1146 def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
1147 def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
1151 // tls has some funny stuff here...
1152 // This corresponds to movabs $foo@tpoff, %rax
1153 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1154 (MOV64ri32 tglobaltlsaddr :$dst)>;
1155 // This corresponds to add $foo@tpoff, %rax
1156 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1157 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1160 // Direct PC relative function call for small code model. 32-bit displacement
1161 // sign extended to 64-bit.
1162 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1163 (CALL64pcrel32 tglobaladdr:$dst)>;
1164 def : Pat<(X86call (i64 texternalsym:$dst)),
1165 (CALL64pcrel32 texternalsym:$dst)>;
1167 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1168 // can never use callee-saved registers. That is the purpose of the GR64_TC
1169 // register classes.
1171 // The only volatile register that is never used by the calling convention is
1172 // %r11. This happens when calling a vararg function with 6 arguments.
1174 // Match an X86tcret that uses less than 7 volatile registers.
1175 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1176 (X86tcret node:$ptr, node:$off), [{
1177 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1178 unsigned NumRegs = 0;
1179 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1180 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1185 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1186 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
1187 Requires<[Not64BitMode, NotUseIndirectThunkCalls]>;
1189 // FIXME: This is disabled for 32-bit PIC mode because the global base
1190 // register which is part of the address mode may be assigned a
1191 // callee-saved register.
1192 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1193 (TCRETURNmi addr:$dst, imm:$off)>,
1194 Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>;
1196 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
1197 (TCRETURNdi tglobaladdr:$dst, imm:$off)>,
1198 Requires<[NotLP64]>;
1200 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
1201 (TCRETURNdi texternalsym:$dst, imm:$off)>,
1202 Requires<[NotLP64]>;
1204 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1205 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
1206 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1208 // Don't fold loads into X86tcret requiring more than 6 regs.
1209 // There wouldn't be enough scratch registers for base+index.
1210 def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
1211 (TCRETURNmi64 addr:$dst, imm:$off)>,
1212 Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1214 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1215 (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, imm:$off)>,
1216 Requires<[In64BitMode, UseIndirectThunkCalls]>;
1218 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1219 (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, imm:$off)>,
1220 Requires<[Not64BitMode, UseIndirectThunkCalls]>;
1222 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1223 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1226 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1227 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1230 // Normal calls, with various flavors of addresses.
1231 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1232 (CALLpcrel32 tglobaladdr:$dst)>;
1233 def : Pat<(X86call (i32 texternalsym:$dst)),
1234 (CALLpcrel32 texternalsym:$dst)>;
1235 def : Pat<(X86call (i32 imm:$dst)),
1236 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1240 // TEST R,R is smaller than CMP R,0
1241 def : Pat<(X86cmp GR8:$src1, 0),
1242 (TEST8rr GR8:$src1, GR8:$src1)>;
1243 def : Pat<(X86cmp GR16:$src1, 0),
1244 (TEST16rr GR16:$src1, GR16:$src1)>;
1245 def : Pat<(X86cmp GR32:$src1, 0),
1246 (TEST32rr GR32:$src1, GR32:$src1)>;
1247 def : Pat<(X86cmp GR64:$src1, 0),
1248 (TEST64rr GR64:$src1, GR64:$src1)>;
1250 // zextload bool -> zextload byte
1251 // i1 stored in one byte in zero-extended form.
1252 // Upper bits cleanup should be executed before Store.
1253 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1254 def : Pat<(zextloadi16i1 addr:$src),
1255 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1256 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1257 def : Pat<(zextloadi64i1 addr:$src),
1258 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1260 // extload bool -> extload byte
1261 // When extloading from 16-bit and smaller memory locations into 64-bit
1262 // registers, use zero-extending loads so that the entire 64-bit register is
1263 // defined, avoiding partial-register updates.
1265 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1266 def : Pat<(extloadi16i1 addr:$src),
1267 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1268 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1269 def : Pat<(extloadi16i8 addr:$src),
1270 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1271 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1272 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1274 // For other extloads, use subregs, since the high contents of the register are
1275 // defined after an extload.
1276 // NOTE: The extloadi64i32 pattern needs to be first as it will try to form
1277 // 32-bit loads for 4 byte aligned i8/i16 loads.
1278 def : Pat<(extloadi64i32 addr:$src),
1279 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1280 def : Pat<(extloadi64i1 addr:$src),
1281 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1282 def : Pat<(extloadi64i8 addr:$src),
1283 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1284 def : Pat<(extloadi64i16 addr:$src),
1285 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1287 // anyext. Define these to do an explicit zero-extend to
1288 // avoid partial-register updates.
1289 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1290 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1291 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1293 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1294 def : Pat<(i32 (anyext GR16:$src)),
1295 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1297 def : Pat<(i64 (anyext GR8 :$src)),
1298 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1299 def : Pat<(i64 (anyext GR16:$src)),
1300 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1301 def : Pat<(i64 (anyext GR32:$src)),
1302 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;
1304 // If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX
1305 // instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move
1306 // %ah to the lower byte of a register. By using a MOVSX here we allow a
1307 // post-isel peephole to merge the two MOVSX instructions into one.
1308 def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{
1309 return (N->getOperand(0).getOpcode() == ISD::SDIVREM &&
1310 N->getOperand(0).getResNo() == 1);
1312 def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>;
1314 // Any instruction that defines a 32-bit result leaves the high half of the
1315 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1316 // be copying from a truncate. Any other 32-bit operation will zero-extend
1317 // up to 64 bits. AssertSext/AssertZext aren't saying anything about the upper
1318 // 32 bits, they're probably just qualifying a CopyFromReg.
1319 def def32 : PatLeaf<(i32 GR32:$src), [{
1320 return N->getOpcode() != ISD::TRUNCATE &&
1321 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1322 N->getOpcode() != ISD::CopyFromReg &&
1323 N->getOpcode() != ISD::AssertSext &&
1324 N->getOpcode() != ISD::AssertZext;
1327 // In the case of a 32-bit def that is known to implicitly zero-extend,
1328 // we can use a SUBREG_TO_REG.
1329 def : Pat<(i64 (zext def32:$src)),
1330 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1331 def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)),
1332 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1334 //===----------------------------------------------------------------------===//
1335 // Pattern match OR as ADD
1336 //===----------------------------------------------------------------------===//
1338 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1339 // 3-addressified into an LEA instruction to avoid copies. However, we also
1340 // want to finally emit these instructions as an or at the end of the code
1341 // generator to make the generated code easier to read. To do this, we select
1342 // into "disjoint bits" pseudo ops.
1344 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1345 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1346 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1347 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1349 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0);
1350 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0);
1351 return (~Known0.Zero & ~Known1.Zero) == 0;
1355 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1356 // Try this before the selecting to OR.
1357 let SchedRW = [WriteALU] in {
1359 let isConvertibleToThreeAddress = 1, isPseudo = 1,
1360 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1361 let isCommutable = 1 in {
1362 def ADD8rr_DB : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
1363 "", // orb/addb REG, REG
1364 [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>;
1365 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1366 "", // orw/addw REG, REG
1367 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1368 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1369 "", // orl/addl REG, REG
1370 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1371 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1372 "", // orq/addq REG, REG
1373 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1376 // NOTE: These are order specific, we want the ri8 forms to be listed
1377 // first so that they are slightly preferred to the ri forms.
1379 def ADD8ri_DB : I<0, Pseudo,
1380 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1381 "", // orb/addb REG, imm8
1382 [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>;
1383 def ADD16ri8_DB : I<0, Pseudo,
1384 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1385 "", // orw/addw REG, imm8
1386 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1387 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1388 "", // orw/addw REG, imm
1389 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1391 def ADD32ri8_DB : I<0, Pseudo,
1392 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1393 "", // orl/addl REG, imm8
1394 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1395 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1396 "", // orl/addl REG, imm
1397 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1400 def ADD64ri8_DB : I<0, Pseudo,
1401 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1402 "", // orq/addq REG, imm8
1403 [(set GR64:$dst, (or_is_add GR64:$src1,
1404 i64immSExt8:$src2))]>;
1405 def ADD64ri32_DB : I<0, Pseudo,
1406 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1407 "", // orq/addq REG, imm
1408 [(set GR64:$dst, (or_is_add GR64:$src1,
1409 i64immSExt32:$src2))]>;
1411 } // AddedComplexity, SchedRW
1413 //===----------------------------------------------------------------------===//
1414 // Pattern match SUB as XOR
1415 //===----------------------------------------------------------------------===//
1417 // An immediate in the LHS of a subtract can't be encoded in the instruction.
1418 // If there is no possibility of a borrow we can use an XOR instead of a SUB
1419 // to enable the immediate to be folded.
1420 // TODO: Move this to a DAG combine?
1422 def sub_is_xor : PatFrag<(ops node:$lhs, node:$rhs), (sub node:$lhs, node:$rhs),[{
1423 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
1424 KnownBits Known = CurDAG->computeKnownBits(N->getOperand(1));
1426 // If all possible ones in the RHS are set in the LHS then there can't be
1427 // a borrow and we can use xor.
1428 return (~Known.Zero).isSubsetOf(CN->getAPIntValue());
1434 let AddedComplexity = 5 in {
1435 def : Pat<(sub_is_xor imm:$src2, GR8:$src1),
1436 (XOR8ri GR8:$src1, imm:$src2)>;
1437 def : Pat<(sub_is_xor i16immSExt8:$src2, GR16:$src1),
1438 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1439 def : Pat<(sub_is_xor imm:$src2, GR16:$src1),
1440 (XOR16ri GR16:$src1, imm:$src2)>;
1441 def : Pat<(sub_is_xor i32immSExt8:$src2, GR32:$src1),
1442 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1443 def : Pat<(sub_is_xor imm:$src2, GR32:$src1),
1444 (XOR32ri GR32:$src1, imm:$src2)>;
1445 def : Pat<(sub_is_xor i64immSExt8:$src2, GR64:$src1),
1446 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1447 def : Pat<(sub_is_xor i64immSExt32:$src2, GR64:$src1),
1448 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1451 //===----------------------------------------------------------------------===//
1453 //===----------------------------------------------------------------------===//
1455 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1456 // +128 doesn't, so in this special case use a sub instead of an add.
1457 def : Pat<(add GR16:$src1, 128),
1458 (SUB16ri8 GR16:$src1, -128)>;
1459 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1460 (SUB16mi8 addr:$dst, -128)>;
1462 def : Pat<(add GR32:$src1, 128),
1463 (SUB32ri8 GR32:$src1, -128)>;
1464 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1465 (SUB32mi8 addr:$dst, -128)>;
1467 def : Pat<(add GR64:$src1, 128),
1468 (SUB64ri8 GR64:$src1, -128)>;
1469 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1470 (SUB64mi8 addr:$dst, -128)>;
1472 def : Pat<(X86add_flag_nocf GR16:$src1, 128),
1473 (SUB16ri8 GR16:$src1, -128)>;
1474 def : Pat<(X86add_flag_nocf GR32:$src1, 128),
1475 (SUB32ri8 GR32:$src1, -128)>;
1476 def : Pat<(X86add_flag_nocf GR64:$src1, 128),
1477 (SUB64ri8 GR64:$src1, -128)>;
1479 // The same trick applies for 32-bit immediate fields in 64-bit
1481 def : Pat<(add GR64:$src1, 0x0000000080000000),
1482 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1483 def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
1484 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1486 def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
1487 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1489 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1490 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1491 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1492 // represented with a sign extension of a 8 bit constant, use that.
1493 // This can also reduce instruction size by eliminating the need for the REX
1496 // AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1497 let AddedComplexity = 1 in {
1498 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1502 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1503 (i32 (GetLo32XForm imm:$imm))),
1506 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1510 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1511 (i32 (GetLo32XForm imm:$imm))),
1513 } // AddedComplexity = 1
1516 // AddedComplexity is needed due to the increased complexity on the
1517 // i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1518 // the MOVZX patterns keeps thems together in DAGIsel tables.
1519 let AddedComplexity = 1 in {
1520 // r & (2^16-1) ==> movz
1521 def : Pat<(and GR32:$src1, 0xffff),
1522 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1523 // r & (2^8-1) ==> movz
1524 def : Pat<(and GR32:$src1, 0xff),
1525 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>;
1526 // r & (2^8-1) ==> movz
1527 def : Pat<(and GR16:$src1, 0xff),
1528 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)),
1531 // r & (2^32-1) ==> movz
1532 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1533 (SUBREG_TO_REG (i64 0),
1534 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1536 // r & (2^16-1) ==> movz
1537 def : Pat<(and GR64:$src, 0xffff),
1538 (SUBREG_TO_REG (i64 0),
1539 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1541 // r & (2^8-1) ==> movz
1542 def : Pat<(and GR64:$src, 0xff),
1543 (SUBREG_TO_REG (i64 0),
1544 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1546 } // AddedComplexity = 1
1549 // Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits.
1551 def BTRXForm : SDNodeXForm<imm, [{
1552 // Transformation function: Find the lowest 0.
1553 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingOnes(), SDLoc(N));
1556 def BTCBTSXForm : SDNodeXForm<imm, [{
1557 // Transformation function: Find the lowest 1.
1558 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingZeros(), SDLoc(N));
1561 def BTRMask64 : ImmLeaf<i64, [{
1562 return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm);
1565 def BTCBTSMask64 : ImmLeaf<i64, [{
1566 return !isInt<32>(Imm) && isPowerOf2_64(Imm);
1569 // For now only do this for optsize.
1570 let AddedComplexity = 1, Predicates=[OptForSize] in {
1571 def : Pat<(and GR64:$src1, BTRMask64:$mask),
1572 (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>;
1573 def : Pat<(or GR64:$src1, BTCBTSMask64:$mask),
1574 (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1575 def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask),
1576 (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1580 // sext_inreg patterns
1581 def : Pat<(sext_inreg GR32:$src, i16),
1582 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1583 def : Pat<(sext_inreg GR32:$src, i8),
1584 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>;
1586 def : Pat<(sext_inreg GR16:$src, i8),
1587 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)),
1590 def : Pat<(sext_inreg GR64:$src, i32),
1591 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1592 def : Pat<(sext_inreg GR64:$src, i16),
1593 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1594 def : Pat<(sext_inreg GR64:$src, i8),
1595 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1597 // sext, sext_load, zext, zext_load
1598 def: Pat<(i16 (sext GR8:$src)),
1599 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1600 def: Pat<(sextloadi16i8 addr:$src),
1601 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1602 def: Pat<(i16 (zext GR8:$src)),
1603 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1604 def: Pat<(zextloadi16i8 addr:$src),
1605 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1608 def : Pat<(i16 (trunc GR32:$src)),
1609 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1610 def : Pat<(i8 (trunc GR32:$src)),
1611 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1613 Requires<[Not64BitMode]>;
1614 def : Pat<(i8 (trunc GR16:$src)),
1615 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1617 Requires<[Not64BitMode]>;
1618 def : Pat<(i32 (trunc GR64:$src)),
1619 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1620 def : Pat<(i16 (trunc GR64:$src)),
1621 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1622 def : Pat<(i8 (trunc GR64:$src)),
1623 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1624 def : Pat<(i8 (trunc GR32:$src)),
1625 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1626 Requires<[In64BitMode]>;
1627 def : Pat<(i8 (trunc GR16:$src)),
1628 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1629 Requires<[In64BitMode]>;
1631 def immff00_ffff : ImmLeaf<i32, [{
1632 return Imm >= 0xff00 && Imm <= 0xffff;
1635 // h-register tricks
1636 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1637 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1638 Requires<[Not64BitMode]>;
1639 def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
1640 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1641 Requires<[Not64BitMode]>;
1642 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1643 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>,
1644 Requires<[Not64BitMode]>;
1645 def : Pat<(srl GR16:$src, (i8 8)),
1647 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1649 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1650 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1651 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1652 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1653 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1654 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1655 def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)),
1656 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1658 // h-register tricks.
1659 // For now, be conservative on x86-64 and use an h-register extract only if the
1660 // value is immediately zero-extended or stored, which are somewhat common
1661 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1662 // from being allocated in the same instruction as the h register, as there's
1663 // currently no way to describe this requirement to the register allocator.
1665 // h-register extract and zero-extend.
1666 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1670 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)),
1672 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1676 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1678 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1682 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1685 // h-register extract and store.
1686 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1689 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>;
1690 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1693 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>,
1694 Requires<[In64BitMode]>;
1695 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1698 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>,
1699 Requires<[In64BitMode]>;
1702 // (shl x, 1) ==> (add x, x)
1703 // Note that if x is undef (immediate or otherwise), we could theoretically
1704 // end up with the two uses of x getting different values, producing a result
1705 // where the least significant bit is not 0. However, the probability of this
1706 // happening is considered low enough that this is officially not a
1708 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1709 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1710 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1711 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1713 def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1714 return isUnneededShiftMask(N, 3);
1717 def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1718 return isUnneededShiftMask(N, 4);
1721 def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1722 return isUnneededShiftMask(N, 5);
1725 def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{
1726 return isUnneededShiftMask(N, 6);
1730 // Shift amount is implicitly masked.
1731 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1732 // (shift x (and y, 31)) ==> (shift x, y)
1733 def : Pat<(frag GR8:$src1, (shiftMask32 CL)),
1734 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1735 def : Pat<(frag GR16:$src1, (shiftMask32 CL)),
1736 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1737 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1738 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1739 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst),
1740 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1741 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst),
1742 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1743 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1744 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1746 // (shift x (and y, 63)) ==> (shift x, y)
1747 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1748 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1749 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1750 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1753 defm : MaskedShiftAmountPats<shl, "SHL">;
1754 defm : MaskedShiftAmountPats<srl, "SHR">;
1755 defm : MaskedShiftAmountPats<sra, "SAR">;
1757 // ROL/ROR instructions allow a stronger mask optimization than shift for 8- and
1758 // 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount
1759 // because over-rotating produces the same result. This is noted in the Intel
1760 // docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation
1761 // amount could affect EFLAGS results, but that does not matter because we are
1762 // not tracking flags for these nodes.
1763 multiclass MaskedRotateAmountPats<SDNode frag, string name> {
1764 // (rot x (and y, BitWidth - 1)) ==> (rot x, y)
1765 def : Pat<(frag GR8:$src1, (shiftMask8 CL)),
1766 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1767 def : Pat<(frag GR16:$src1, (shiftMask16 CL)),
1768 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1769 def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1770 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1771 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst),
1772 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1773 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst),
1774 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1775 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1776 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1778 // (rot x (and y, 63)) ==> (rot x, y)
1779 def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1780 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1781 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1782 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1786 defm : MaskedRotateAmountPats<rotl, "ROL">;
1787 defm : MaskedRotateAmountPats<rotr, "ROR">;
1789 // Double "funnel" shift amount is implicitly masked.
1790 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) (NOTE: modulo32)
1791 def : Pat<(X86fshl GR16:$src1, GR16:$src2, (shiftMask32 CL)),
1792 (SHLD16rrCL GR16:$src1, GR16:$src2)>;
1793 def : Pat<(X86fshr GR16:$src2, GR16:$src1, (shiftMask32 CL)),
1794 (SHRD16rrCL GR16:$src1, GR16:$src2)>;
1796 // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y)
1797 def : Pat<(fshl GR32:$src1, GR32:$src2, (shiftMask32 CL)),
1798 (SHLD32rrCL GR32:$src1, GR32:$src2)>;
1799 def : Pat<(fshr GR32:$src2, GR32:$src1, (shiftMask32 CL)),
1800 (SHRD32rrCL GR32:$src1, GR32:$src2)>;
1802 // (fshl/fshr x (and y, 63)) ==> (fshl/fshr x, y)
1803 def : Pat<(fshl GR64:$src1, GR64:$src2, (shiftMask64 CL)),
1804 (SHLD64rrCL GR64:$src1, GR64:$src2)>;
1805 def : Pat<(fshr GR64:$src2, GR64:$src1, (shiftMask64 CL)),
1806 (SHRD64rrCL GR64:$src1, GR64:$src2)>;
1808 let Predicates = [HasBMI2] in {
1809 let AddedComplexity = 1 in {
1810 def : Pat<(sra GR32:$src1, (shiftMask32 GR8:$src2)),
1811 (SARX32rr GR32:$src1,
1813 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1814 def : Pat<(sra GR64:$src1, (shiftMask64 GR8:$src2)),
1815 (SARX64rr GR64:$src1,
1817 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1819 def : Pat<(srl GR32:$src1, (shiftMask32 GR8:$src2)),
1820 (SHRX32rr GR32:$src1,
1822 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1823 def : Pat<(srl GR64:$src1, (shiftMask64 GR8:$src2)),
1824 (SHRX64rr GR64:$src1,
1826 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1828 def : Pat<(shl GR32:$src1, (shiftMask32 GR8:$src2)),
1829 (SHLX32rr GR32:$src1,
1831 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1832 def : Pat<(shl GR64:$src1, (shiftMask64 GR8:$src2)),
1833 (SHLX64rr GR64:$src1,
1835 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1838 def : Pat<(sra (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1839 (SARX32rm addr:$src1,
1841 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1842 def : Pat<(sra (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1843 (SARX64rm addr:$src1,
1845 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1847 def : Pat<(srl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1848 (SHRX32rm addr:$src1,
1850 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1851 def : Pat<(srl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1852 (SHRX64rm addr:$src1,
1854 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1856 def : Pat<(shl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)),
1857 (SHLX32rm addr:$src1,
1859 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1860 def : Pat<(shl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)),
1861 (SHLX64rm addr:$src1,
1863 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1866 // Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location.
1867 multiclass one_bit_patterns<RegisterClass RC, ValueType VT, Instruction BTR,
1868 Instruction BTS, Instruction BTC,
1869 PatFrag ShiftMask> {
1870 def : Pat<(and RC:$src1, (rotl -2, GR8:$src2)),
1872 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1873 def : Pat<(or RC:$src1, (shl 1, GR8:$src2)),
1875 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1876 def : Pat<(xor RC:$src1, (shl 1, GR8:$src2)),
1878 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1880 // Similar to above, but removing unneeded masking of the shift amount.
1881 def : Pat<(and RC:$src1, (rotl -2, (ShiftMask GR8:$src2))),
1883 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1884 def : Pat<(or RC:$src1, (shl 1, (ShiftMask GR8:$src2))),
1886 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1887 def : Pat<(xor RC:$src1, (shl 1, (ShiftMask GR8:$src2))),
1889 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1892 defm : one_bit_patterns<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>;
1893 defm : one_bit_patterns<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>;
1894 defm : one_bit_patterns<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>;
1896 //===----------------------------------------------------------------------===//
1897 // EFLAGS-defining Patterns
1898 //===----------------------------------------------------------------------===//
1901 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1902 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1903 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1904 def : Pat<(add GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>;
1907 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1908 (ADD8rm GR8:$src1, addr:$src2)>;
1909 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1910 (ADD16rm GR16:$src1, addr:$src2)>;
1911 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1912 (ADD32rm GR32:$src1, addr:$src2)>;
1913 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1914 (ADD64rm GR64:$src1, addr:$src2)>;
1917 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1918 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1919 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1920 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1921 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1922 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1923 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1924 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1925 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1926 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1927 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1930 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1931 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1932 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1933 def : Pat<(sub GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>;
1936 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1937 (SUB8rm GR8:$src1, addr:$src2)>;
1938 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1939 (SUB16rm GR16:$src1, addr:$src2)>;
1940 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1941 (SUB32rm GR32:$src1, addr:$src2)>;
1942 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1943 (SUB64rm GR64:$src1, addr:$src2)>;
1946 def : Pat<(sub GR8:$src1, imm:$src2),
1947 (SUB8ri GR8:$src1, imm:$src2)>;
1948 def : Pat<(sub GR16:$src1, imm:$src2),
1949 (SUB16ri GR16:$src1, imm:$src2)>;
1950 def : Pat<(sub GR32:$src1, imm:$src2),
1951 (SUB32ri GR32:$src1, imm:$src2)>;
1952 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1953 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1954 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1955 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1956 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1957 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1958 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1959 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1962 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
1963 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
1964 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
1965 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
1968 def : Pat<(mul GR16:$src1, GR16:$src2),
1969 (IMUL16rr GR16:$src1, GR16:$src2)>;
1970 def : Pat<(mul GR32:$src1, GR32:$src2),
1971 (IMUL32rr GR32:$src1, GR32:$src2)>;
1972 def : Pat<(mul GR64:$src1, GR64:$src2),
1973 (IMUL64rr GR64:$src1, GR64:$src2)>;
1976 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1977 (IMUL16rm GR16:$src1, addr:$src2)>;
1978 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1979 (IMUL32rm GR32:$src1, addr:$src2)>;
1980 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1981 (IMUL64rm GR64:$src1, addr:$src2)>;
1984 def : Pat<(mul GR16:$src1, imm:$src2),
1985 (IMUL16rri GR16:$src1, imm:$src2)>;
1986 def : Pat<(mul GR32:$src1, imm:$src2),
1987 (IMUL32rri GR32:$src1, imm:$src2)>;
1988 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1989 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1990 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1991 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1992 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1993 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1994 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1995 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1997 // reg = mul mem, imm
1998 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1999 (IMUL16rmi addr:$src1, imm:$src2)>;
2000 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
2001 (IMUL32rmi addr:$src1, imm:$src2)>;
2002 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
2003 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
2004 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
2005 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
2006 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
2007 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
2008 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
2009 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2011 // Increment/Decrement reg.
2012 // Do not make INC/DEC if it is slow
2013 let Predicates = [UseIncDec] in {
2014 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
2015 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
2016 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
2017 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
2018 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
2019 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
2020 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
2021 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
2023 def : Pat<(X86add_flag_nocf GR8:$src, -1), (DEC8r GR8:$src)>;
2024 def : Pat<(X86add_flag_nocf GR16:$src, -1), (DEC16r GR16:$src)>;
2025 def : Pat<(X86add_flag_nocf GR32:$src, -1), (DEC32r GR32:$src)>;
2026 def : Pat<(X86add_flag_nocf GR64:$src, -1), (DEC64r GR64:$src)>;
2027 def : Pat<(X86sub_flag_nocf GR8:$src, -1), (INC8r GR8:$src)>;
2028 def : Pat<(X86sub_flag_nocf GR16:$src, -1), (INC16r GR16:$src)>;
2029 def : Pat<(X86sub_flag_nocf GR32:$src, -1), (INC32r GR32:$src)>;
2030 def : Pat<(X86sub_flag_nocf GR64:$src, -1), (INC64r GR64:$src)>;
2034 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
2035 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
2036 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
2037 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
2040 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
2041 (OR8rm GR8:$src1, addr:$src2)>;
2042 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
2043 (OR16rm GR16:$src1, addr:$src2)>;
2044 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
2045 (OR32rm GR32:$src1, addr:$src2)>;
2046 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
2047 (OR64rm GR64:$src1, addr:$src2)>;
2050 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
2051 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
2052 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
2053 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
2054 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
2055 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
2056 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
2057 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
2058 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2059 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
2060 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2063 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
2064 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
2065 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
2066 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
2069 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
2070 (XOR8rm GR8:$src1, addr:$src2)>;
2071 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
2072 (XOR16rm GR16:$src1, addr:$src2)>;
2073 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
2074 (XOR32rm GR32:$src1, addr:$src2)>;
2075 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
2076 (XOR64rm GR64:$src1, addr:$src2)>;
2079 def : Pat<(xor GR8:$src1, imm:$src2),
2080 (XOR8ri GR8:$src1, imm:$src2)>;
2081 def : Pat<(xor GR16:$src1, imm:$src2),
2082 (XOR16ri GR16:$src1, imm:$src2)>;
2083 def : Pat<(xor GR32:$src1, imm:$src2),
2084 (XOR32ri GR32:$src1, imm:$src2)>;
2085 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
2086 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
2087 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
2088 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
2089 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
2090 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
2091 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
2092 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
2095 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
2096 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
2097 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
2098 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
2101 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
2102 (AND8rm GR8:$src1, addr:$src2)>;
2103 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
2104 (AND16rm GR16:$src1, addr:$src2)>;
2105 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
2106 (AND32rm GR32:$src1, addr:$src2)>;
2107 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
2108 (AND64rm GR64:$src1, addr:$src2)>;
2111 def : Pat<(and GR8:$src1, imm:$src2),
2112 (AND8ri GR8:$src1, imm:$src2)>;
2113 def : Pat<(and GR16:$src1, imm:$src2),
2114 (AND16ri GR16:$src1, imm:$src2)>;
2115 def : Pat<(and GR32:$src1, imm:$src2),
2116 (AND32ri GR32:$src1, imm:$src2)>;
2117 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
2118 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
2119 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
2120 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
2121 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
2122 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
2123 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
2124 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
2126 // Bit scan instruction patterns to match explicit zero-undef behavior.
2127 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
2128 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
2129 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
2130 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
2131 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
2132 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
2134 // When HasMOVBE is enabled it is possible to get a non-legalized
2135 // register-register 16 bit bswap. This maps it to a ROL instruction.
2136 let Predicates = [HasMOVBE] in {
2137 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;