1 //===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the various pseudo instructions used by the compiler,
11 // as well as Pat patterns used during instruction selection.
13 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // Pattern Matching Support
18 def GetLo32XForm : SDNodeXForm<imm, [{
19 // Transformation function: get the low 32 bits.
20 return getI32Imm((unsigned)N->getZExtValue(), SDLoc(N));
23 def GetLo8XForm : SDNodeXForm<imm, [{
24 // Transformation function: get the low 8 bits.
25 return getI8Imm((uint8_t)N->getZExtValue(), SDLoc(N));
29 //===----------------------------------------------------------------------===//
30 // Random Pseudo Instructions.
32 // PIC base construction. This expands to code that looks like this:
35 let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP],
36 SchedRW = [WriteJump] in
37 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
41 // ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
42 // a stack adjustment and the codegen must know that they may modify the stack
43 // pointer before prolog-epilog rewriting occurs.
44 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
45 // sub / add which can clobber EFLAGS.
46 let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in {
47 def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs),
48 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
49 "#ADJCALLSTACKDOWN", [], IIC_ALU_NONMEM>,
51 def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
53 [(X86callseq_end timm:$amt1, timm:$amt2)],
54 IIC_ALU_NONMEM>, Requires<[NotLP64]>;
56 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
57 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>;
60 // ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
61 // a stack adjustment and the codegen must know that they may modify the stack
62 // pointer before prolog-epilog rewriting occurs.
63 // Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
64 // sub / add which can clobber EFLAGS.
65 let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in {
66 def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs),
67 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
69 [], IIC_ALU_NONMEM>, Requires<[IsLP64]>;
70 def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
72 [(X86callseq_end timm:$amt1, timm:$amt2)],
73 IIC_ALU_NONMEM>, Requires<[IsLP64]>;
75 def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
76 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>;
78 let SchedRW = [WriteSystem] in {
80 // x86-64 va_start lowering magic.
81 let usesCustomInserter = 1, Defs = [EFLAGS] in {
82 def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
85 i64imm:$regsavefi, i64imm:$offset,
87 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset",
88 [(X86vastart_save_xmm_regs GR8:$al,
93 // The VAARG_64 pseudo-instruction takes the address of the va_list,
94 // and places the address of the next argument into a register.
95 let Defs = [EFLAGS] in
96 def VAARG_64 : I<0, Pseudo,
98 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
99 "#VAARG_64 $dst, $ap, $size, $mode, $align",
101 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)),
105 // When using segmented stacks these are lowered into instructions which first
106 // check if the current stacklet has enough free memory. If it does, memory is
107 // allocated by bumping the stack pointer. Otherwise memory is allocated from
110 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
111 def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
112 "# variable sized alloca for segmented stacks",
114 (X86SegAlloca GR32:$size))]>,
117 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
118 def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
119 "# variable sized alloca for segmented stacks",
121 (X86SegAlloca GR64:$size))]>,
122 Requires<[In64BitMode]>;
125 // Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
126 // targets. These calls are needed to probe the stack when allocating more than
127 // 4k bytes in one go. Touching the stack at 4K increments is necessary to
128 // ensure that the guard pages used by the OS virtual memory manager are
129 // allocated in correct sequence.
130 // The main point of having separate instruction are extra unmodelled effects
131 // (compared to ordinary calls) like stack pointer change.
133 let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
134 def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
135 "# dynamic stack allocation",
136 [(X86WinAlloca GR32:$size)]>,
139 let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
140 def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
141 "# dynamic stack allocation",
142 [(X86WinAlloca GR64:$size)]>,
143 Requires<[In64BitMode]>;
146 // These instructions XOR the frame pointer into a GPR. They are used in some
147 // stack protection schemes. These are post-RA pseudos because we only know the
148 // frame register after register allocation.
149 let Constraints = "$src = $dst", isPseudo = 1, Defs = [EFLAGS] in {
150 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
151 "xorl\t$$FP, $src", [], IIC_BIN_NONMEM>,
152 Requires<[NotLP64]>, Sched<[WriteALU]>;
153 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src),
154 "xorq\t$$FP $src", [], IIC_BIN_NONMEM>,
155 Requires<[In64BitMode]>, Sched<[WriteALU]>;
158 //===----------------------------------------------------------------------===//
159 // EH Pseudo Instructions
161 let SchedRW = [WriteSystem] in {
162 let isTerminator = 1, isReturn = 1, isBarrier = 1,
163 hasCtrlDep = 1, isCodeGenOnly = 1 in {
164 def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
165 "ret\t#eh_return, addr: $addr",
166 [(X86ehret GR32:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
170 let isTerminator = 1, isReturn = 1, isBarrier = 1,
171 hasCtrlDep = 1, isCodeGenOnly = 1 in {
172 def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
173 "ret\t#eh_return, addr: $addr",
174 [(X86ehret GR64:$addr)], IIC_RET>, Sched<[WriteJumpLd]>;
178 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
179 isCodeGenOnly = 1, isReturn = 1 in {
180 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>;
182 // CATCHRET needs a custom inserter for SEH.
183 let usesCustomInserter = 1 in
184 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from),
186 [(catchret bb:$dst, bb:$from)]>;
189 let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1,
190 usesCustomInserter = 1 in
191 def CATCHPAD : I<0, Pseudo, (outs), (ins), "# CATCHPAD", [(catchpad)]>;
193 // This instruction is responsible for re-establishing stack pointers after an
194 // exception has been caught and we are rejoining normal control flow in the
195 // parent function or funclet. It generally sets ESP and EBP, and optionally
196 // ESI. It is only needed for 32-bit WinEH, as the runtime restores CSRs for us
198 let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1 in
199 def EH_RESTORE : I<0, Pseudo, (outs), (ins), "# EH_RESTORE", []>;
201 let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
202 usesCustomInserter = 1 in {
203 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
205 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
206 Requires<[Not64BitMode]>;
207 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
209 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
210 Requires<[In64BitMode]>;
211 let isTerminator = 1 in {
212 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
213 "#EH_SJLJ_LONGJMP32",
214 [(X86eh_sjlj_longjmp addr:$buf)]>,
215 Requires<[Not64BitMode]>;
216 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
217 "#EH_SJLJ_LONGJMP64",
218 [(X86eh_sjlj_longjmp addr:$buf)]>,
219 Requires<[In64BitMode]>;
223 let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
224 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
225 "#EH_SjLj_Setup\t$dst", []>;
229 //===----------------------------------------------------------------------===//
230 // Pseudo instructions used by unwind info.
232 let isPseudo = 1, SchedRW = [WriteSystem] in {
233 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
234 "#SEH_PushReg $reg", []>;
235 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
236 "#SEH_SaveReg $reg, $dst", []>;
237 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
238 "#SEH_SaveXMM $reg, $dst", []>;
239 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
240 "#SEH_StackAlloc $size", []>;
241 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
242 "#SEH_SetFrame $reg, $offset", []>;
243 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
244 "#SEH_PushFrame $mode", []>;
245 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
246 "#SEH_EndPrologue", []>;
247 def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
248 "#SEH_Epilogue", []>;
251 //===----------------------------------------------------------------------===//
252 // Pseudo instructions used by segmented stacks.
255 // This is lowered into a RET instruction by MCInstLower. We need
256 // this so that we don't have to have a MachineBasicBlock which ends
257 // with a RET and also has successors.
258 let isPseudo = 1, SchedRW = [WriteJumpLd] in {
259 def MORESTACK_RET: I<0, Pseudo, (outs), (ins),
262 // This instruction is lowered to a RET followed by a MOV. The two
263 // instructions are not generated on a higher level since then the
264 // verifier sees a MachineBasicBlock ending with a non-terminator.
265 def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins),
269 //===----------------------------------------------------------------------===//
270 // Alias Instructions
271 //===----------------------------------------------------------------------===//
273 // Alias instruction mapping movr0 to xor.
274 // FIXME: remove when we can teach regalloc that xor reg, reg is ok.
275 let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
276 isPseudo = 1, AddedComplexity = 10 in
277 def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
278 [(set GR32:$dst, 0)], IIC_ALU_NONMEM>, Sched<[WriteZero]>;
280 // Other widths can also make use of the 32-bit xor, which may have a smaller
281 // encoding and avoid partial register updates.
282 let AddedComplexity = 10 in {
283 def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
284 def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
285 def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>;
288 let Predicates = [OptForSize, Not64BitMode],
289 AddedComplexity = 10 in {
290 let SchedRW = [WriteALU] in {
291 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
292 // which only require 3 bytes compared to MOV32ri which requires 5.
293 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
294 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
295 [(set GR32:$dst, 1)], IIC_ALU_NONMEM>;
296 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
297 [(set GR32:$dst, -1)], IIC_ALU_NONMEM>;
301 // MOV16ri is 4 bytes, so the instructions above are smaller.
302 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>;
303 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
306 let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5,
307 SchedRW = [WriteALU] in {
308 // AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
309 def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
310 [(set GR32:$dst, i32immSExt8:$src)], IIC_ALU_NONMEM>,
311 Requires<[OptForMinSize, NotWin64WithoutFP]>;
312 def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
313 [(set GR64:$dst, i64immSExt8:$src)], IIC_ALU_NONMEM>,
314 Requires<[OptForMinSize, NotWin64WithoutFP]>;
317 // Materialize i64 constant where top 32-bits are zero. This could theoretically
318 // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
319 // that would make it more difficult to rematerialize.
320 let isReMaterializable = 1, isAsCheapAsAMove = 1,
321 isPseudo = 1, hasSideEffects = 0, SchedRW = [WriteALU] in
322 def MOV32ri64 : I<0, Pseudo, (outs GR32:$dst), (ins i64i32imm:$src), "", [],
325 // This 64-bit pseudo-move can be used for both a 64-bit constant that is
326 // actually the zero-extension of a 32-bit constant and for labels in the
327 // x86-64 small code model.
328 def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [imm, X86Wrapper]>;
330 let AddedComplexity = 1 in
331 def : Pat<(i64 mov64imm32:$src),
332 (SUBREG_TO_REG (i64 0), (MOV32ri64 mov64imm32:$src), sub_32bit)>;
334 // Use sbb to materialize carry bit.
335 let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteALU] in {
336 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
337 // However, Pat<> can't replicate the destination reg into the inputs of the
339 def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "",
340 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
341 def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "",
342 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
343 def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "",
344 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
345 def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "",
346 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
350 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
352 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
354 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
357 def : Pat<(i16 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
359 def : Pat<(i32 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
361 def : Pat<(i64 (sext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
364 // We canonicalize 'setb' to "(and (sbb reg,reg), 1)" on the hope that the and
365 // will be eliminated and that the sbb can be extended up to a wider type. When
366 // this happens, it is great. However, if we are left with an 8-bit sbb and an
367 // and, we might as well just match it as a setb.
368 def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
371 // (add OP, SETB) -> (adc OP, 0)
372 def : Pat<(add (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR8:$op),
373 (ADC8ri GR8:$op, 0)>;
374 def : Pat<(add (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR32:$op),
375 (ADC32ri8 GR32:$op, 0)>;
376 def : Pat<(add (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1), GR64:$op),
377 (ADC64ri8 GR64:$op, 0)>;
379 // (sub OP, SETB) -> (sbb OP, 0)
380 def : Pat<(sub GR8:$op, (and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
381 (SBB8ri GR8:$op, 0)>;
382 def : Pat<(sub GR32:$op, (and (i32 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
383 (SBB32ri8 GR32:$op, 0)>;
384 def : Pat<(sub GR64:$op, (and (i64 (X86setcc_c X86_COND_B, EFLAGS)), 1)),
385 (SBB64ri8 GR64:$op, 0)>;
387 // (sub OP, SETCC_CARRY) -> (adc OP, 0)
388 def : Pat<(sub GR8:$op, (i8 (X86setcc_c X86_COND_B, EFLAGS))),
389 (ADC8ri GR8:$op, 0)>;
390 def : Pat<(sub GR32:$op, (i32 (X86setcc_c X86_COND_B, EFLAGS))),
391 (ADC32ri8 GR32:$op, 0)>;
392 def : Pat<(sub GR64:$op, (i64 (X86setcc_c X86_COND_B, EFLAGS))),
393 (ADC64ri8 GR64:$op, 0)>;
395 //===----------------------------------------------------------------------===//
396 // String Pseudo Instructions
398 let SchedRW = [WriteMicrocoded] in {
399 let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
400 def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
401 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
402 Requires<[Not64BitMode]>;
403 def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
404 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
405 Requires<[Not64BitMode]>;
406 def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
407 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
408 Requires<[Not64BitMode]>;
411 let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
412 def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), "{rep;movsb|rep movsb}",
413 [(X86rep_movs i8)], IIC_REP_MOVS>, REP,
414 Requires<[In64BitMode]>;
415 def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsw|rep movsw}",
416 [(X86rep_movs i16)], IIC_REP_MOVS>, REP, OpSize16,
417 Requires<[In64BitMode]>;
418 def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), "{rep;movsl|rep movsd}",
419 [(X86rep_movs i32)], IIC_REP_MOVS>, REP, OpSize32,
420 Requires<[In64BitMode]>;
421 def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
422 [(X86rep_movs i64)], IIC_REP_MOVS>, REP,
423 Requires<[In64BitMode]>;
426 // FIXME: Should use "(X86rep_stos AL)" as the pattern.
427 let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
428 let Uses = [AL,ECX,EDI] in
429 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
430 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
431 Requires<[Not64BitMode]>;
432 let Uses = [AX,ECX,EDI] in
433 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
434 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
435 Requires<[Not64BitMode]>;
436 let Uses = [EAX,ECX,EDI] in
437 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
438 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
439 Requires<[Not64BitMode]>;
442 let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
443 let Uses = [AL,RCX,RDI] in
444 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), "{rep;stosb|rep stosb}",
445 [(X86rep_stos i8)], IIC_REP_STOS>, REP,
446 Requires<[In64BitMode]>;
447 let Uses = [AX,RCX,RDI] in
448 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosw|rep stosw}",
449 [(X86rep_stos i16)], IIC_REP_STOS>, REP, OpSize16,
450 Requires<[In64BitMode]>;
451 let Uses = [RAX,RCX,RDI] in
452 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), "{rep;stosl|rep stosd}",
453 [(X86rep_stos i32)], IIC_REP_STOS>, REP, OpSize32,
454 Requires<[In64BitMode]>;
456 let Uses = [RAX,RCX,RDI] in
457 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
458 [(X86rep_stos i64)], IIC_REP_STOS>, REP,
459 Requires<[In64BitMode]>;
463 //===----------------------------------------------------------------------===//
464 // Thread Local Storage Instructions
466 let SchedRW = [WriteSystem] in {
469 // All calls clobber the non-callee saved registers. ESP is marked as
470 // a use to prevent stack-pointer assignments that appear immediately
471 // before calls from potentially appearing dead.
472 let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
473 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
474 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
475 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
476 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
477 usesCustomInserter = 1, Uses = [ESP, SSP] in {
478 def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
480 [(X86tlsaddr tls32addr:$sym)]>,
481 Requires<[Not64BitMode]>;
482 def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
484 [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
485 Requires<[Not64BitMode]>;
488 // All calls clobber the non-callee saved registers. RSP is marked as
489 // a use to prevent stack-pointer assignments that appear immediately
490 // before calls from potentially appearing dead.
491 let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
492 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
493 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
494 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
495 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
496 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
497 usesCustomInserter = 1, Uses = [RSP, SSP] in {
498 def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
500 [(X86tlsaddr tls64addr:$sym)]>,
501 Requires<[In64BitMode]>;
502 def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
504 [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
505 Requires<[In64BitMode]>;
508 // Darwin TLS Support
509 // For i386, the address of the thunk is passed on the stack, on return the
510 // address of the variable is in %eax. %ecx is trashed during the function
511 // call. All other registers are preserved.
512 let Defs = [EAX, ECX, EFLAGS, DF],
514 usesCustomInserter = 1 in
515 def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
517 [(X86TLSCall addr:$sym)]>,
518 Requires<[Not64BitMode]>;
520 // For x86_64, the address of the thunk is passed in %rdi, but the
521 // pseudo directly use the symbol, so do not add an implicit use of
522 // %rdi. The lowering will do the right thing with RDI.
523 // On return the address of the variable is in %rax. All other
524 // registers are preserved.
525 let Defs = [RAX, EFLAGS, DF],
527 usesCustomInserter = 1 in
528 def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
530 [(X86TLSCall addr:$sym)]>,
531 Requires<[In64BitMode]>;
534 //===----------------------------------------------------------------------===//
535 // Conditional Move Pseudo Instructions
537 // CMOV* - Used to implement the SELECT DAG operation. Expanded after
538 // instruction selection into a branch sequence.
539 multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
540 def CMOV#NAME : I<0, Pseudo,
541 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
542 "#CMOV_"#NAME#" PSEUDO!",
543 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, imm:$cond,
547 let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
548 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
549 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
550 // however that requires promoting the operands, and can induce additional
551 // i8 register pressure.
552 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
554 let Predicates = [NoCMov] in {
555 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
556 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
557 } // Predicates = [NoCMov]
559 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
561 let Predicates = [FPStackf32] in
562 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
564 let Predicates = [FPStackf64] in
565 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
567 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
569 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>;
570 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>;
571 defm _FR128 : CMOVrr_PSEUDO<FR128, f128>;
572 defm _V4F32 : CMOVrr_PSEUDO<VR128, v4f32>;
573 defm _V2F64 : CMOVrr_PSEUDO<VR128, v2f64>;
574 defm _V2I64 : CMOVrr_PSEUDO<VR128, v2i64>;
575 defm _V8F32 : CMOVrr_PSEUDO<VR256, v8f32>;
576 defm _V4F64 : CMOVrr_PSEUDO<VR256, v4f64>;
577 defm _V4I64 : CMOVrr_PSEUDO<VR256, v4i64>;
578 defm _V8I64 : CMOVrr_PSEUDO<VR512, v8i64>;
579 defm _V8F64 : CMOVrr_PSEUDO<VR512, v8f64>;
580 defm _V16F32 : CMOVrr_PSEUDO<VR512, v16f32>;
581 defm _V8I1 : CMOVrr_PSEUDO<VK8, v8i1>;
582 defm _V16I1 : CMOVrr_PSEUDO<VK16, v16i1>;
583 defm _V32I1 : CMOVrr_PSEUDO<VK32, v32i1>;
584 defm _V64I1 : CMOVrr_PSEUDO<VK64, v64i1>;
585 } // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS]
587 //===----------------------------------------------------------------------===//
588 // Normal-Instructions-With-Lock-Prefix Pseudo Instructions
589 //===----------------------------------------------------------------------===//
591 // FIXME: Use normal instructions and add lock prefix dynamically.
595 // TODO: Get this to fold the constant into the instruction.
596 let isCodeGenOnly = 1, Defs = [EFLAGS] in
597 def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
598 "or{l}\t{$zero, $dst|$dst, $zero}", [],
599 IIC_ALU_MEM>, Requires<[Not64BitMode]>, OpSize32, LOCK,
600 Sched<[WriteALULd, WriteRMW]>;
602 let hasSideEffects = 1 in
603 def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
605 [(X86MemBarrier)]>, Sched<[WriteLoad]>;
607 // RegOpc corresponds to the mr version of the instruction
608 // ImmOpc corresponds to the mi version of the instruction
609 // ImmOpc8 corresponds to the mi8 version of the instruction
610 // ImmMod corresponds to the instruction format of the mi and mi8 versions
611 multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
612 Format ImmMod, SDNode Op, string mnemonic> {
613 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
614 SchedRW = [WriteALULd, WriteRMW] in {
616 def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
617 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
618 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
619 !strconcat(mnemonic, "{b}\t",
620 "{$src2, $dst|$dst, $src2}"),
621 [(set EFLAGS, (Op addr:$dst, GR8:$src2))],
622 IIC_ALU_NONMEM>, LOCK;
624 def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
625 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
626 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
627 !strconcat(mnemonic, "{w}\t",
628 "{$src2, $dst|$dst, $src2}"),
629 [(set EFLAGS, (Op addr:$dst, GR16:$src2))],
630 IIC_ALU_NONMEM>, OpSize16, LOCK;
632 def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
633 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
634 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
635 !strconcat(mnemonic, "{l}\t",
636 "{$src2, $dst|$dst, $src2}"),
637 [(set EFLAGS, (Op addr:$dst, GR32:$src2))],
638 IIC_ALU_NONMEM>, OpSize32, LOCK;
640 def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
641 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
642 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
643 !strconcat(mnemonic, "{q}\t",
644 "{$src2, $dst|$dst, $src2}"),
645 [(set EFLAGS, (Op addr:$dst, GR64:$src2))],
646 IIC_ALU_NONMEM>, LOCK;
648 def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
649 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
650 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
651 !strconcat(mnemonic, "{b}\t",
652 "{$src2, $dst|$dst, $src2}"),
653 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))],
656 def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
657 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
658 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
659 !strconcat(mnemonic, "{w}\t",
660 "{$src2, $dst|$dst, $src2}"),
661 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))],
662 IIC_ALU_MEM>, OpSize16, LOCK;
664 def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
665 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
666 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
667 !strconcat(mnemonic, "{l}\t",
668 "{$src2, $dst|$dst, $src2}"),
669 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))],
670 IIC_ALU_MEM>, OpSize32, LOCK;
672 def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
673 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
674 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
675 !strconcat(mnemonic, "{q}\t",
676 "{$src2, $dst|$dst, $src2}"),
677 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))],
680 def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
681 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
682 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
683 !strconcat(mnemonic, "{w}\t",
684 "{$src2, $dst|$dst, $src2}"),
685 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))],
686 IIC_ALU_MEM>, OpSize16, LOCK;
688 def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
689 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
690 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
691 !strconcat(mnemonic, "{l}\t",
692 "{$src2, $dst|$dst, $src2}"),
693 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))],
694 IIC_ALU_MEM>, OpSize32, LOCK;
696 def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
697 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
698 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
699 !strconcat(mnemonic, "{q}\t",
700 "{$src2, $dst|$dst, $src2}"),
701 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))],
708 defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
709 defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
710 defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
711 defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
712 defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
714 multiclass LOCK_ArithUnOp<bits<8> Opc8, bits<8> Opc, Format Form,
715 string frag, string mnemonic> {
716 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
717 SchedRW = [WriteALULd, WriteRMW] in {
718 def NAME#8m : I<Opc8, Form, (outs), (ins i8mem :$dst),
719 !strconcat(mnemonic, "{b}\t$dst"),
720 [(set EFLAGS, (!cast<PatFrag>(frag # "_8") addr:$dst))],
721 IIC_UNARY_MEM>, LOCK;
722 def NAME#16m : I<Opc, Form, (outs), (ins i16mem:$dst),
723 !strconcat(mnemonic, "{w}\t$dst"),
724 [(set EFLAGS, (!cast<PatFrag>(frag # "_16") addr:$dst))],
725 IIC_UNARY_MEM>, OpSize16, LOCK;
726 def NAME#32m : I<Opc, Form, (outs), (ins i32mem:$dst),
727 !strconcat(mnemonic, "{l}\t$dst"),
728 [(set EFLAGS, (!cast<PatFrag>(frag # "_32") addr:$dst))],
729 IIC_UNARY_MEM>, OpSize32, LOCK;
730 def NAME#64m : RI<Opc, Form, (outs), (ins i64mem:$dst),
731 !strconcat(mnemonic, "{q}\t$dst"),
732 [(set EFLAGS, (!cast<PatFrag>(frag # "_64") addr:$dst))],
733 IIC_UNARY_MEM>, LOCK;
737 multiclass unary_atomic_intrin<SDNode atomic_op> {
738 def _8 : PatFrag<(ops node:$ptr),
739 (atomic_op node:$ptr), [{
740 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
742 def _16 : PatFrag<(ops node:$ptr),
743 (atomic_op node:$ptr), [{
744 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
746 def _32 : PatFrag<(ops node:$ptr),
747 (atomic_op node:$ptr), [{
748 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
750 def _64 : PatFrag<(ops node:$ptr),
751 (atomic_op node:$ptr), [{
752 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
756 defm X86lock_inc : unary_atomic_intrin<X86lock_inc>;
757 defm X86lock_dec : unary_atomic_intrin<X86lock_dec>;
759 defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "X86lock_inc", "inc">;
760 defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "X86lock_dec", "dec">;
762 // Atomic compare and swap.
763 multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic,
764 SDPatternOperator frag, X86MemOperand x86memop,
765 InstrItinClass itin> {
766 let isCodeGenOnly = 1, usesCustomInserter = 1 in {
767 def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr),
768 !strconcat(mnemonic, "\t$ptr"),
769 [(frag addr:$ptr)], itin>, TB, LOCK;
773 multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
774 string mnemonic, SDPatternOperator frag,
775 InstrItinClass itin8, InstrItinClass itin> {
776 let isCodeGenOnly = 1, SchedRW = [WriteALULd, WriteRMW] in {
777 let Defs = [AL, EFLAGS], Uses = [AL] in
778 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
779 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
780 [(frag addr:$ptr, GR8:$swap, 1)], itin8>, TB, LOCK;
781 let Defs = [AX, EFLAGS], Uses = [AX] in
782 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
783 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
784 [(frag addr:$ptr, GR16:$swap, 2)], itin>, TB, OpSize16, LOCK;
785 let Defs = [EAX, EFLAGS], Uses = [EAX] in
786 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
787 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
788 [(frag addr:$ptr, GR32:$swap, 4)], itin>, TB, OpSize32, LOCK;
789 let Defs = [RAX, EFLAGS], Uses = [RAX] in
790 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
791 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
792 [(frag addr:$ptr, GR64:$swap, 8)], itin>, TB, LOCK;
796 let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
797 SchedRW = [WriteALULd, WriteRMW] in {
798 defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b",
803 // This pseudo must be used when the frame uses RBX as
804 // the base pointer. Indeed, in such situation RBX is a reserved
805 // register and the register allocator will ignore any use/def of
806 // it. In other words, the register will not fix the clobbering of
807 // RBX that will happen when setting the arguments for the instrucion.
809 // Unlike the actual related instuction, we mark that this one
810 // defines EBX (instead of using EBX).
811 // The rationale is that we will define RBX during the expansion of
812 // the pseudo. The argument feeding EBX is ebx_input.
814 // The additional argument, $ebx_save, is a temporary register used to
815 // save the value of RBX across the actual instruction.
817 // To make sure the register assigned to $ebx_save does not interfere with
818 // the definition of the actual instruction, we use a definition $dst which
819 // is tied to $rbx_save. That way, the live-range of $rbx_save spans across
820 // the instruction and we are sure we will have a valid register to restore
822 let Defs = [EAX, EDX, EBX, EFLAGS], Uses = [EAX, ECX, EDX],
823 SchedRW = [WriteALULd, WriteRMW], isCodeGenOnly = 1, isPseudo = 1,
824 Constraints = "$ebx_save = $dst", usesCustomInserter = 1 in {
825 def LCMPXCHG8B_SAVE_EBX :
826 I<0, Pseudo, (outs GR32:$dst),
827 (ins i64mem:$ptr, GR32:$ebx_input, GR32:$ebx_save),
828 !strconcat("cmpxchg8b", "\t$ptr"),
829 [(set GR32:$dst, (X86cas8save_ebx addr:$ptr, GR32:$ebx_input,
835 let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
836 Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW] in {
837 defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b",
839 IIC_CMPX_LOCK_16B>, REX_W;
842 // Same as LCMPXCHG8B_SAVE_RBX but for the 16 Bytes variant.
843 let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX],
844 Predicates = [HasCmpxchg16b], SchedRW = [WriteALULd, WriteRMW],
845 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst",
846 usesCustomInserter = 1 in {
847 def LCMPXCHG16B_SAVE_RBX :
848 I<0, Pseudo, (outs GR64:$dst),
849 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save),
850 !strconcat("cmpxchg16b", "\t$ptr"),
851 [(set GR64:$dst, (X86cas16save_rbx addr:$ptr, GR64:$rbx_input,
856 defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg",
857 X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>;
859 // Atomic exchange and add
860 multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
862 InstrItinClass itin8, InstrItinClass itin> {
863 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1,
864 SchedRW = [WriteALULd, WriteRMW] in {
865 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst),
866 (ins GR8:$val, i8mem:$ptr),
867 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
869 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))],
871 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
872 (ins GR16:$val, i16mem:$ptr),
873 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
876 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))],
878 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
879 (ins GR32:$val, i32mem:$ptr),
880 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
883 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))],
885 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
886 (ins GR64:$val, i64mem:$ptr),
887 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
890 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))],
895 defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add",
896 IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>,
899 /* The following multiclass tries to make sure that in code like
900 * x.store (immediate op x.load(acquire), release)
902 * x.store (register op x.load(acquire), release)
903 * an operation directly on memory is generated instead of wasting a register.
904 * It is not automatic as atomic_store/load are only lowered to MOV instructions
905 * extremely late to prevent them from being accidentally reordered in the backend
906 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
908 multiclass RELEASE_BINOP_MI<SDNode op> {
909 def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
910 "#BINOP "#NAME#"8mi PSEUDO!",
911 [(atomic_store_8 addr:$dst, (op
912 (atomic_load_8 addr:$dst), (i8 imm:$src)))]>;
913 def NAME#8mr : I<0, Pseudo, (outs), (ins i8mem:$dst, GR8:$src),
914 "#BINOP "#NAME#"8mr PSEUDO!",
915 [(atomic_store_8 addr:$dst, (op
916 (atomic_load_8 addr:$dst), GR8:$src))]>;
917 // NAME#16 is not generated as 16-bit arithmetic instructions are considered
918 // costly and avoided as far as possible by this backend anyway
919 def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
920 "#BINOP "#NAME#"32mi PSEUDO!",
921 [(atomic_store_32 addr:$dst, (op
922 (atomic_load_32 addr:$dst), (i32 imm:$src)))]>;
923 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
924 "#BINOP "#NAME#"32mr PSEUDO!",
925 [(atomic_store_32 addr:$dst, (op
926 (atomic_load_32 addr:$dst), GR32:$src))]>;
927 def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
928 "#BINOP "#NAME#"64mi32 PSEUDO!",
929 [(atomic_store_64 addr:$dst, (op
930 (atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;
931 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
932 "#BINOP "#NAME#"64mr PSEUDO!",
933 [(atomic_store_64 addr:$dst, (op
934 (atomic_load_64 addr:$dst), GR64:$src))]>;
936 let Defs = [EFLAGS], SchedRW = [WriteMicrocoded] in {
937 defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
938 defm RELEASE_AND : RELEASE_BINOP_MI<and>;
939 defm RELEASE_OR : RELEASE_BINOP_MI<or>;
940 defm RELEASE_XOR : RELEASE_BINOP_MI<xor>;
941 // Note: we don't deal with sub, because substractions of constants are
942 // optimized into additions before this code can run.
945 // Same as above, but for floating-point.
946 // FIXME: imm version.
947 // FIXME: Version that doesn't clobber $src, using AVX's VADDSS.
948 // FIXME: This could also handle SIMD operations with *ps and *pd instructions.
949 let usesCustomInserter = 1, SchedRW = [WriteMicrocoded] in {
950 multiclass RELEASE_FP_BINOP_MI<SDNode op> {
951 def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, FR32:$src),
952 "#BINOP "#NAME#"32mr PSEUDO!",
953 [(atomic_store_32 addr:$dst,
955 (f32 (bitconvert (i32 (atomic_load_32 addr:$dst)))),
956 FR32:$src))))]>, Requires<[HasSSE1]>;
957 def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, FR64:$src),
958 "#BINOP "#NAME#"64mr PSEUDO!",
959 [(atomic_store_64 addr:$dst,
961 (f64 (bitconvert (i64 (atomic_load_64 addr:$dst)))),
962 FR64:$src))))]>, Requires<[HasSSE2]>;
964 defm RELEASE_FADD : RELEASE_FP_BINOP_MI<fadd>;
965 // FIXME: Add fsub, fmul, fdiv, ...
968 multiclass RELEASE_UNOP<dag dag8, dag dag16, dag dag32, dag dag64> {
969 def NAME#8m : I<0, Pseudo, (outs), (ins i8mem:$dst),
970 "#UNOP "#NAME#"8m PSEUDO!",
971 [(atomic_store_8 addr:$dst, dag8)]>;
972 def NAME#16m : I<0, Pseudo, (outs), (ins i16mem:$dst),
973 "#UNOP "#NAME#"16m PSEUDO!",
974 [(atomic_store_16 addr:$dst, dag16)]>;
975 def NAME#32m : I<0, Pseudo, (outs), (ins i32mem:$dst),
976 "#UNOP "#NAME#"32m PSEUDO!",
977 [(atomic_store_32 addr:$dst, dag32)]>;
978 def NAME#64m : I<0, Pseudo, (outs), (ins i64mem:$dst),
979 "#UNOP "#NAME#"64m PSEUDO!",
980 [(atomic_store_64 addr:$dst, dag64)]>;
983 let Defs = [EFLAGS], Predicates = [UseIncDec], SchedRW = [WriteMicrocoded] in {
984 defm RELEASE_INC : RELEASE_UNOP<
985 (add (atomic_load_8 addr:$dst), (i8 1)),
986 (add (atomic_load_16 addr:$dst), (i16 1)),
987 (add (atomic_load_32 addr:$dst), (i32 1)),
988 (add (atomic_load_64 addr:$dst), (i64 1))>;
989 defm RELEASE_DEC : RELEASE_UNOP<
990 (add (atomic_load_8 addr:$dst), (i8 -1)),
991 (add (atomic_load_16 addr:$dst), (i16 -1)),
992 (add (atomic_load_32 addr:$dst), (i32 -1)),
993 (add (atomic_load_64 addr:$dst), (i64 -1))>;
996 TODO: These don't work because the type inference of TableGen fails.
997 TODO: find a way to fix it.
998 let Defs = [EFLAGS] in {
999 defm RELEASE_NEG : RELEASE_UNOP<
1000 (ineg (atomic_load_8 addr:$dst)),
1001 (ineg (atomic_load_16 addr:$dst)),
1002 (ineg (atomic_load_32 addr:$dst)),
1003 (ineg (atomic_load_64 addr:$dst))>;
1005 // NOT doesn't set flags.
1006 defm RELEASE_NOT : RELEASE_UNOP<
1007 (not (atomic_load_8 addr:$dst)),
1008 (not (atomic_load_16 addr:$dst)),
1009 (not (atomic_load_32 addr:$dst)),
1010 (not (atomic_load_64 addr:$dst))>;
1013 let SchedRW = [WriteMicrocoded] in {
1014 def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
1015 "#RELEASE_MOV8mi PSEUDO!",
1016 [(atomic_store_8 addr:$dst, (i8 imm:$src))]>;
1017 def RELEASE_MOV16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),
1018 "#RELEASE_MOV16mi PSEUDO!",
1019 [(atomic_store_16 addr:$dst, (i16 imm:$src))]>;
1020 def RELEASE_MOV32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
1021 "#RELEASE_MOV32mi PSEUDO!",
1022 [(atomic_store_32 addr:$dst, (i32 imm:$src))]>;
1023 def RELEASE_MOV64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
1024 "#RELEASE_MOV64mi32 PSEUDO!",
1025 [(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;
1027 def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
1028 "#RELEASE_MOV8mr PSEUDO!",
1029 [(atomic_store_8 addr:$dst, GR8 :$src)]>;
1030 def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
1031 "#RELEASE_MOV16mr PSEUDO!",
1032 [(atomic_store_16 addr:$dst, GR16:$src)]>;
1033 def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
1034 "#RELEASE_MOV32mr PSEUDO!",
1035 [(atomic_store_32 addr:$dst, GR32:$src)]>;
1036 def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
1037 "#RELEASE_MOV64mr PSEUDO!",
1038 [(atomic_store_64 addr:$dst, GR64:$src)]>;
1040 def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
1041 "#ACQUIRE_MOV8rm PSEUDO!",
1042 [(set GR8:$dst, (atomic_load_8 addr:$src))]>;
1043 def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
1044 "#ACQUIRE_MOV16rm PSEUDO!",
1045 [(set GR16:$dst, (atomic_load_16 addr:$src))]>;
1046 def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
1047 "#ACQUIRE_MOV32rm PSEUDO!",
1048 [(set GR32:$dst, (atomic_load_32 addr:$src))]>;
1049 def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
1050 "#ACQUIRE_MOV64rm PSEUDO!",
1051 [(set GR64:$dst, (atomic_load_64 addr:$src))]>;
1054 //===----------------------------------------------------------------------===//
1055 // DAG Pattern Matching Rules
1056 //===----------------------------------------------------------------------===//
1058 // Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
1059 // binary size compared to a regular MOV, but it introduces an unnecessary
1060 // load, so is not suitable for regular or optsize functions.
1061 let Predicates = [OptForMinSize] in {
1062 def : Pat<(store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>;
1063 def : Pat<(store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>;
1064 def : Pat<(store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>;
1065 def : Pat<(store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>;
1066 def : Pat<(store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>;
1067 def : Pat<(store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>;
1070 // In kernel code model, we can get the address of a label
1071 // into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of
1072 // the MOV64ri32 should accept these.
1073 def : Pat<(i64 (X86Wrapper tconstpool :$dst)),
1074 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>;
1075 def : Pat<(i64 (X86Wrapper tjumptable :$dst)),
1076 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>;
1077 def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1078 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1079 def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1080 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1081 def : Pat<(i64 (X86Wrapper mcsym:$dst)),
1082 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
1083 def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1084 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1086 // If we have small model and -static mode, it is safe to store global addresses
1087 // directly as immediates. FIXME: This is really a hack, the 'imm' predicate
1088 // for MOV64mi32 should handle this sort of thing.
1089 def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1090 (MOV64mi32 addr:$dst, tconstpool:$src)>,
1091 Requires<[NearData, IsNotPIC]>;
1092 def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1093 (MOV64mi32 addr:$dst, tjumptable:$src)>,
1094 Requires<[NearData, IsNotPIC]>;
1095 def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1096 (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1097 Requires<[NearData, IsNotPIC]>;
1098 def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1099 (MOV64mi32 addr:$dst, texternalsym:$src)>,
1100 Requires<[NearData, IsNotPIC]>;
1101 def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
1102 (MOV64mi32 addr:$dst, mcsym:$src)>,
1103 Requires<[NearData, IsNotPIC]>;
1104 def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1105 (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1106 Requires<[NearData, IsNotPIC]>;
1108 def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
1109 def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
1113 // tls has some funny stuff here...
1114 // This corresponds to movabs $foo@tpoff, %rax
1115 def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1116 (MOV64ri32 tglobaltlsaddr :$dst)>;
1117 // This corresponds to add $foo@tpoff, %rax
1118 def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1119 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1122 // Direct PC relative function call for small code model. 32-bit displacement
1123 // sign extended to 64-bit.
1124 def : Pat<(X86call (i64 tglobaladdr:$dst)),
1125 (CALL64pcrel32 tglobaladdr:$dst)>;
1126 def : Pat<(X86call (i64 texternalsym:$dst)),
1127 (CALL64pcrel32 texternalsym:$dst)>;
1129 // Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1130 // can never use callee-saved registers. That is the purpose of the GR64_TC
1131 // register classes.
1133 // The only volatile register that is never used by the calling convention is
1134 // %r11. This happens when calling a vararg function with 6 arguments.
1136 // Match an X86tcret that uses less than 7 volatile registers.
1137 def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off),
1138 (X86tcret node:$ptr, node:$off), [{
1139 // X86tcret args: (*chain, ptr, imm, regs..., glue)
1140 unsigned NumRegs = 0;
1141 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i)
1142 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6)
1147 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1148 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>,
1149 Requires<[Not64BitMode, NotUseRetpoline]>;
1151 // FIXME: This is disabled for 32-bit PIC mode because the global base
1152 // register which is part of the address mode may be assigned a
1153 // callee-saved register.
1154 def : Pat<(X86tcret (load addr:$dst), imm:$off),
1155 (TCRETURNmi addr:$dst, imm:$off)>,
1156 Requires<[Not64BitMode, IsNotPIC, NotUseRetpoline]>;
1158 def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
1159 (TCRETURNdi tglobaladdr:$dst, imm:$off)>,
1160 Requires<[NotLP64]>;
1162 def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
1163 (TCRETURNdi texternalsym:$dst, imm:$off)>,
1164 Requires<[NotLP64]>;
1166 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1167 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>,
1168 Requires<[In64BitMode, NotUseRetpoline]>;
1170 // Don't fold loads into X86tcret requiring more than 6 regs.
1171 // There wouldn't be enough scratch registers for base+index.
1172 def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off),
1173 (TCRETURNmi64 addr:$dst, imm:$off)>,
1174 Requires<[In64BitMode, NotUseRetpoline]>;
1176 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1177 (RETPOLINE_TCRETURN64 ptr_rc_tailcall:$dst, imm:$off)>,
1178 Requires<[In64BitMode, UseRetpoline]>;
1180 def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off),
1181 (RETPOLINE_TCRETURN32 ptr_rc_tailcall:$dst, imm:$off)>,
1182 Requires<[Not64BitMode, UseRetpoline]>;
1184 def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
1185 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
1188 def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
1189 (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
1192 // Normal calls, with various flavors of addresses.
1193 def : Pat<(X86call (i32 tglobaladdr:$dst)),
1194 (CALLpcrel32 tglobaladdr:$dst)>;
1195 def : Pat<(X86call (i32 texternalsym:$dst)),
1196 (CALLpcrel32 texternalsym:$dst)>;
1197 def : Pat<(X86call (i32 imm:$dst)),
1198 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1202 // TEST R,R is smaller than CMP R,0
1203 def : Pat<(X86cmp GR8:$src1, 0),
1204 (TEST8rr GR8:$src1, GR8:$src1)>;
1205 def : Pat<(X86cmp GR16:$src1, 0),
1206 (TEST16rr GR16:$src1, GR16:$src1)>;
1207 def : Pat<(X86cmp GR32:$src1, 0),
1208 (TEST32rr GR32:$src1, GR32:$src1)>;
1209 def : Pat<(X86cmp GR64:$src1, 0),
1210 (TEST64rr GR64:$src1, GR64:$src1)>;
1212 // Conditional moves with folded loads with operands swapped and conditions
1214 multiclass CMOVmr<PatLeaf InvertedCond, Instruction Inst16, Instruction Inst32,
1215 Instruction Inst64> {
1216 let Predicates = [HasCMov] in {
1217 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1218 (Inst16 GR16:$src2, addr:$src1)>;
1219 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1220 (Inst32 GR32:$src2, addr:$src1)>;
1221 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1222 (Inst64 GR64:$src2, addr:$src1)>;
1226 defm : CMOVmr<X86_COND_B , CMOVAE16rm, CMOVAE32rm, CMOVAE64rm>;
1227 defm : CMOVmr<X86_COND_AE, CMOVB16rm , CMOVB32rm , CMOVB64rm>;
1228 defm : CMOVmr<X86_COND_E , CMOVNE16rm, CMOVNE32rm, CMOVNE64rm>;
1229 defm : CMOVmr<X86_COND_NE, CMOVE16rm , CMOVE32rm , CMOVE64rm>;
1230 defm : CMOVmr<X86_COND_BE, CMOVA16rm , CMOVA32rm , CMOVA64rm>;
1231 defm : CMOVmr<X86_COND_A , CMOVBE16rm, CMOVBE32rm, CMOVBE64rm>;
1232 defm : CMOVmr<X86_COND_L , CMOVGE16rm, CMOVGE32rm, CMOVGE64rm>;
1233 defm : CMOVmr<X86_COND_GE, CMOVL16rm , CMOVL32rm , CMOVL64rm>;
1234 defm : CMOVmr<X86_COND_LE, CMOVG16rm , CMOVG32rm , CMOVG64rm>;
1235 defm : CMOVmr<X86_COND_G , CMOVLE16rm, CMOVLE32rm, CMOVLE64rm>;
1236 defm : CMOVmr<X86_COND_P , CMOVNP16rm, CMOVNP32rm, CMOVNP64rm>;
1237 defm : CMOVmr<X86_COND_NP, CMOVP16rm , CMOVP32rm , CMOVP64rm>;
1238 defm : CMOVmr<X86_COND_S , CMOVNS16rm, CMOVNS32rm, CMOVNS64rm>;
1239 defm : CMOVmr<X86_COND_NS, CMOVS16rm , CMOVS32rm , CMOVS64rm>;
1240 defm : CMOVmr<X86_COND_O , CMOVNO16rm, CMOVNO32rm, CMOVNO64rm>;
1241 defm : CMOVmr<X86_COND_NO, CMOVO16rm , CMOVO32rm , CMOVO64rm>;
1243 // zextload bool -> zextload byte
1244 // i1 stored in one byte in zero-extended form.
1245 // Upper bits cleanup should be executed before Store.
1246 def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1247 def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1248 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1249 def : Pat<(zextloadi64i1 addr:$src),
1250 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1252 // extload bool -> extload byte
1253 // When extloading from 16-bit and smaller memory locations into 64-bit
1254 // registers, use zero-extending loads so that the entire 64-bit register is
1255 // defined, avoiding partial-register updates.
1257 def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>;
1258 def : Pat<(extloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
1259 def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1260 def : Pat<(extloadi16i8 addr:$src), (MOVZX16rm8 addr:$src)>;
1261 def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>;
1262 def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1264 // For other extloads, use subregs, since the high contents of the register are
1265 // defined after an extload.
1266 def : Pat<(extloadi64i1 addr:$src),
1267 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1268 def : Pat<(extloadi64i8 addr:$src),
1269 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1270 def : Pat<(extloadi64i16 addr:$src),
1271 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1272 def : Pat<(extloadi64i32 addr:$src),
1273 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1275 // anyext. Define these to do an explicit zero-extend to
1276 // avoid partial-register updates.
1277 def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1278 (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1279 def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
1281 // Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1282 def : Pat<(i32 (anyext GR16:$src)),
1283 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1285 def : Pat<(i64 (anyext GR8 :$src)),
1286 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>;
1287 def : Pat<(i64 (anyext GR16:$src)),
1288 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1289 def : Pat<(i64 (anyext GR32:$src)),
1290 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;
1293 // Any instruction that defines a 32-bit result leaves the high half of the
1294 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
1295 // be copying from a truncate. Any other 32-bit operation will zero-extend
1296 // up to 64 bits. AssertSext/AssertZext aren't saying anything about the upper
1297 // 32 bits, they're probably just qualifying a CopyFromReg.
1298 def def32 : PatLeaf<(i32 GR32:$src), [{
1299 return N->getOpcode() != ISD::TRUNCATE &&
1300 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
1301 N->getOpcode() != ISD::CopyFromReg &&
1302 N->getOpcode() != ISD::AssertSext &&
1303 N->getOpcode() != ISD::AssertZext;
1306 // In the case of a 32-bit def that is known to implicitly zero-extend,
1307 // we can use a SUBREG_TO_REG.
1308 def : Pat<(i64 (zext def32:$src)),
1309 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1311 //===----------------------------------------------------------------------===//
1312 // Pattern match OR as ADD
1313 //===----------------------------------------------------------------------===//
1315 // If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1316 // 3-addressified into an LEA instruction to avoid copies. However, we also
1317 // want to finally emit these instructions as an or at the end of the code
1318 // generator to make the generated code easier to read. To do this, we select
1319 // into "disjoint bits" pseudo ops.
1321 // Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
1322 def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
1323 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
1324 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
1327 CurDAG->computeKnownBits(N->getOperand(0), Known0, 0);
1329 CurDAG->computeKnownBits(N->getOperand(1), Known1, 0);
1330 return (~Known0.Zero & ~Known1.Zero) == 0;
1334 // (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1335 // Try this before the selecting to OR.
1336 let AddedComplexity = 5, SchedRW = [WriteALU] in {
1338 let isConvertibleToThreeAddress = 1,
1339 Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1340 let isCommutable = 1 in {
1341 def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1342 "", // orw/addw REG, REG
1343 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1344 def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1345 "", // orl/addl REG, REG
1346 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1347 def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1348 "", // orq/addq REG, REG
1349 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1352 // NOTE: These are order specific, we want the ri8 forms to be listed
1353 // first so that they are slightly preferred to the ri forms.
1355 def ADD16ri8_DB : I<0, Pseudo,
1356 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
1357 "", // orw/addw REG, imm8
1358 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>;
1359 def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1360 "", // orw/addw REG, imm
1361 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1363 def ADD32ri8_DB : I<0, Pseudo,
1364 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
1365 "", // orl/addl REG, imm8
1366 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>;
1367 def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1368 "", // orl/addl REG, imm
1369 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1372 def ADD64ri8_DB : I<0, Pseudo,
1373 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
1374 "", // orq/addq REG, imm8
1375 [(set GR64:$dst, (or_is_add GR64:$src1,
1376 i64immSExt8:$src2))]>;
1377 def ADD64ri32_DB : I<0, Pseudo,
1378 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1379 "", // orq/addq REG, imm
1380 [(set GR64:$dst, (or_is_add GR64:$src1,
1381 i64immSExt32:$src2))]>;
1383 } // AddedComplexity, SchedRW
1386 //===----------------------------------------------------------------------===//
1388 //===----------------------------------------------------------------------===//
1390 // Odd encoding trick: -128 fits into an 8-bit immediate field while
1391 // +128 doesn't, so in this special case use a sub instead of an add.
1392 def : Pat<(add GR16:$src1, 128),
1393 (SUB16ri8 GR16:$src1, -128)>;
1394 def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1395 (SUB16mi8 addr:$dst, -128)>;
1397 def : Pat<(add GR32:$src1, 128),
1398 (SUB32ri8 GR32:$src1, -128)>;
1399 def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1400 (SUB32mi8 addr:$dst, -128)>;
1402 def : Pat<(add GR64:$src1, 128),
1403 (SUB64ri8 GR64:$src1, -128)>;
1404 def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1405 (SUB64mi8 addr:$dst, -128)>;
1407 // The same trick applies for 32-bit immediate fields in 64-bit
1409 def : Pat<(add GR64:$src1, 0x0000000080000000),
1410 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1411 def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
1412 (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1414 // To avoid needing to materialize an immediate in a register, use a 32-bit and
1415 // with implicit zero-extension instead of a 64-bit and if the immediate has at
1416 // least 32 bits of leading zeros. If in addition the last 32 bits can be
1417 // represented with a sign extension of a 8 bit constant, use that.
1418 // This can also reduce instruction size by eliminating the need for the REX
1421 // AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1422 let AddedComplexity = 1 in {
1423 def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm),
1427 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1428 (i32 (GetLo8XForm imm:$imm))),
1431 def : Pat<(and GR64:$src, i64immZExt32:$imm),
1435 (EXTRACT_SUBREG GR64:$src, sub_32bit),
1436 (i32 (GetLo32XForm imm:$imm))),
1438 } // AddedComplexity = 1
1441 // AddedComplexity is needed due to the increased complexity on the
1442 // i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1443 // the MOVZX patterns keeps thems together in DAGIsel tables.
1444 let AddedComplexity = 1 in {
1445 // r & (2^16-1) ==> movz
1446 def : Pat<(and GR32:$src1, 0xffff),
1447 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1448 // r & (2^8-1) ==> movz
1449 def : Pat<(and GR32:$src1, 0xff),
1450 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>;
1451 // r & (2^8-1) ==> movz
1452 def : Pat<(and GR16:$src1, 0xff),
1453 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)),
1456 // r & (2^32-1) ==> movz
1457 def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1458 (SUBREG_TO_REG (i64 0),
1459 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1461 // r & (2^16-1) ==> movz
1462 def : Pat<(and GR64:$src, 0xffff),
1463 (SUBREG_TO_REG (i64 0),
1464 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1466 // r & (2^8-1) ==> movz
1467 def : Pat<(and GR64:$src, 0xff),
1468 (SUBREG_TO_REG (i64 0),
1469 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1471 } // AddedComplexity = 1
1474 // sext_inreg patterns
1475 def : Pat<(sext_inreg GR32:$src, i16),
1476 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1477 def : Pat<(sext_inreg GR32:$src, i8),
1478 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>;
1480 def : Pat<(sext_inreg GR16:$src, i8),
1481 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)),
1484 def : Pat<(sext_inreg GR64:$src, i32),
1485 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1486 def : Pat<(sext_inreg GR64:$src, i16),
1487 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1488 def : Pat<(sext_inreg GR64:$src, i8),
1489 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1491 // sext, sext_load, zext, zext_load
1492 def: Pat<(i16 (sext GR8:$src)),
1493 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1494 def: Pat<(sextloadi16i8 addr:$src),
1495 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1496 def: Pat<(i16 (zext GR8:$src)),
1497 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1498 def: Pat<(zextloadi16i8 addr:$src),
1499 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1502 def : Pat<(i16 (trunc GR32:$src)),
1503 (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1504 def : Pat<(i8 (trunc GR32:$src)),
1505 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1507 Requires<[Not64BitMode]>;
1508 def : Pat<(i8 (trunc GR16:$src)),
1509 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1511 Requires<[Not64BitMode]>;
1512 def : Pat<(i32 (trunc GR64:$src)),
1513 (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1514 def : Pat<(i16 (trunc GR64:$src)),
1515 (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1516 def : Pat<(i8 (trunc GR64:$src)),
1517 (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1518 def : Pat<(i8 (trunc GR32:$src)),
1519 (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1520 Requires<[In64BitMode]>;
1521 def : Pat<(i8 (trunc GR16:$src)),
1522 (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1523 Requires<[In64BitMode]>;
1525 // h-register tricks
1526 def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1527 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1528 Requires<[Not64BitMode]>;
1529 def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
1530 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1531 Requires<[Not64BitMode]>;
1532 def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1533 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>,
1534 Requires<[Not64BitMode]>;
1535 def : Pat<(srl GR16:$src, (i8 8)),
1537 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1539 def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1540 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1541 def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1542 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1543 def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1544 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1545 def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
1546 (MOVZX32_NOREXrr8 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1548 // h-register tricks.
1549 // For now, be conservative on x86-64 and use an h-register extract only if the
1550 // value is immediately zero-extended or stored, which are somewhat common
1551 // cases. This uses a bunch of code to prevent a register requiring a REX prefix
1552 // from being allocated in the same instruction as the h register, as there's
1553 // currently no way to describe this requirement to the register allocator.
1555 // h-register extract and zero-extend.
1556 def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1560 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)),
1562 def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1566 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1568 def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1572 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1575 // h-register extract and store.
1576 def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1579 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>;
1580 def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1583 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>,
1584 Requires<[In64BitMode]>;
1585 def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1588 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>,
1589 Requires<[In64BitMode]>;
1592 // (shl x, 1) ==> (add x, x)
1593 // Note that if x is undef (immediate or otherwise), we could theoretically
1594 // end up with the two uses of x getting different values, producing a result
1595 // where the least significant bit is not 0. However, the probability of this
1596 // happening is considered low enough that this is officially not a
1598 def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>;
1599 def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1600 def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1601 def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1603 // Helper imms to check if a mask doesn't change significant shift/rotate bits.
1604 def immShift8 : ImmLeaf<i8, [{
1605 return countTrailingOnes<uint64_t>(Imm) >= 3;
1607 def immShift16 : ImmLeaf<i8, [{
1608 return countTrailingOnes<uint64_t>(Imm) >= 4;
1610 def immShift32 : ImmLeaf<i8, [{
1611 return countTrailingOnes<uint64_t>(Imm) >= 5;
1613 def immShift64 : ImmLeaf<i8, [{
1614 return countTrailingOnes<uint64_t>(Imm) >= 6;
1617 // Shift amount is implicitly masked.
1618 multiclass MaskedShiftAmountPats<SDNode frag, string name> {
1619 // (shift x (and y, 31)) ==> (shift x, y)
1620 def : Pat<(frag GR8:$src1, (and CL, immShift32)),
1621 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1622 def : Pat<(frag GR16:$src1, (and CL, immShift32)),
1623 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1624 def : Pat<(frag GR32:$src1, (and CL, immShift32)),
1625 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1626 def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift32)), addr:$dst),
1627 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1628 def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift32)), addr:$dst),
1629 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1630 def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1631 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1633 // (shift x (and y, 63)) ==> (shift x, y)
1634 def : Pat<(frag GR64:$src1, (and CL, immShift64)),
1635 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1636 def : Pat<(store (frag (loadi64 addr:$dst), (and CL, immShift64)), addr:$dst),
1637 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1640 defm : MaskedShiftAmountPats<shl, "SHL">;
1641 defm : MaskedShiftAmountPats<srl, "SHR">;
1642 defm : MaskedShiftAmountPats<sra, "SAR">;
1644 // ROL/ROR instructions allow a stronger mask optimization than shift for 8- and
1645 // 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount
1646 // because over-rotating produces the same result. This is noted in the Intel
1647 // docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation
1648 // amount could affect EFLAGS results, but that does not matter because we are
1649 // not tracking flags for these nodes.
1650 multiclass MaskedRotateAmountPats<SDNode frag, string name> {
1651 // (rot x (and y, BitWidth - 1)) ==> (rot x, y)
1652 def : Pat<(frag GR8:$src1, (and CL, immShift8)),
1653 (!cast<Instruction>(name # "8rCL") GR8:$src1)>;
1654 def : Pat<(frag GR16:$src1, (and CL, immShift16)),
1655 (!cast<Instruction>(name # "16rCL") GR16:$src1)>;
1656 def : Pat<(frag GR32:$src1, (and CL, immShift32)),
1657 (!cast<Instruction>(name # "32rCL") GR32:$src1)>;
1658 def : Pat<(store (frag (loadi8 addr:$dst), (and CL, immShift8)), addr:$dst),
1659 (!cast<Instruction>(name # "8mCL") addr:$dst)>;
1660 def : Pat<(store (frag (loadi16 addr:$dst), (and CL, immShift16)), addr:$dst),
1661 (!cast<Instruction>(name # "16mCL") addr:$dst)>;
1662 def : Pat<(store (frag (loadi32 addr:$dst), (and CL, immShift32)), addr:$dst),
1663 (!cast<Instruction>(name # "32mCL") addr:$dst)>;
1665 // (rot x (and y, 63)) ==> (rot x, y)
1666 def : Pat<(frag GR64:$src1, (and CL, immShift64)),
1667 (!cast<Instruction>(name # "64rCL") GR64:$src1)>;
1668 def : Pat<(store (frag (loadi64 addr:$dst), (and CL, immShift64)), addr:$dst),
1669 (!cast<Instruction>(name # "64mCL") addr:$dst)>;
1673 defm : MaskedRotateAmountPats<rotl, "ROL">;
1674 defm : MaskedRotateAmountPats<rotr, "ROR">;
1676 // Double shift amount is implicitly masked.
1677 multiclass MaskedDoubleShiftAmountPats<SDNode frag, string name> {
1678 // (shift x (and y, 31)) ==> (shift x, y)
1679 def : Pat<(frag GR16:$src1, GR16:$src2, (and CL, immShift32)),
1680 (!cast<Instruction>(name # "16rrCL") GR16:$src1, GR16:$src2)>;
1681 def : Pat<(frag GR32:$src1, GR32:$src2, (and CL, immShift32)),
1682 (!cast<Instruction>(name # "32rrCL") GR32:$src1, GR32:$src2)>;
1684 // (shift x (and y, 63)) ==> (shift x, y)
1685 def : Pat<(frag GR64:$src1, GR64:$src2, (and CL, immShift64)),
1686 (!cast<Instruction>(name # "64rrCL") GR64:$src1, GR64:$src2)>;
1689 defm : MaskedDoubleShiftAmountPats<X86shld, "SHLD">;
1690 defm : MaskedDoubleShiftAmountPats<X86shrd, "SHRD">;
1692 let Predicates = [HasBMI2] in {
1693 let AddedComplexity = 1 in {
1694 def : Pat<(sra GR32:$src1, (and GR8:$src2, immShift32)),
1695 (SARX32rr GR32:$src1,
1697 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1698 def : Pat<(sra GR64:$src1, (and GR8:$src2, immShift64)),
1699 (SARX64rr GR64:$src1,
1701 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1703 def : Pat<(srl GR32:$src1, (and GR8:$src2, immShift32)),
1704 (SHRX32rr GR32:$src1,
1706 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1707 def : Pat<(srl GR64:$src1, (and GR8:$src2, immShift64)),
1708 (SHRX64rr GR64:$src1,
1710 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1712 def : Pat<(shl GR32:$src1, (and GR8:$src2, immShift32)),
1713 (SHLX32rr GR32:$src1,
1715 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1716 def : Pat<(shl GR64:$src1, (and GR8:$src2, immShift64)),
1717 (SHLX64rr GR64:$src1,
1719 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1722 let AddedComplexity = -20 in {
1723 def : Pat<(sra (loadi32 addr:$src1), (and GR8:$src2, immShift32)),
1724 (SARX32rm addr:$src1,
1726 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1727 def : Pat<(sra (loadi64 addr:$src1), (and GR8:$src2, immShift64)),
1728 (SARX64rm addr:$src1,
1730 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1732 def : Pat<(srl (loadi32 addr:$src1), (and GR8:$src2, immShift32)),
1733 (SHRX32rm addr:$src1,
1735 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1736 def : Pat<(srl (loadi64 addr:$src1), (and GR8:$src2, immShift64)),
1737 (SHRX64rm addr:$src1,
1739 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1741 def : Pat<(shl (loadi32 addr:$src1), (and GR8:$src2, immShift32)),
1742 (SHLX32rm addr:$src1,
1744 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1745 def : Pat<(shl (loadi64 addr:$src1), (and GR8:$src2, immShift64)),
1746 (SHLX64rm addr:$src1,
1748 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1752 // (anyext (setcc_carry)) -> (setcc_carry)
1753 def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1755 def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1757 def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
1760 //===----------------------------------------------------------------------===//
1761 // EFLAGS-defining Patterns
1762 //===----------------------------------------------------------------------===//
1765 def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
1766 def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
1767 def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
1770 def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
1771 (ADD8rm GR8:$src1, addr:$src2)>;
1772 def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
1773 (ADD16rm GR16:$src1, addr:$src2)>;
1774 def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
1775 (ADD32rm GR32:$src1, addr:$src2)>;
1778 def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
1779 def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
1780 def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
1781 def : Pat<(add GR16:$src1, i16immSExt8:$src2),
1782 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
1783 def : Pat<(add GR32:$src1, i32immSExt8:$src2),
1784 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
1787 def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
1788 def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
1789 def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
1792 def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
1793 (SUB8rm GR8:$src1, addr:$src2)>;
1794 def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
1795 (SUB16rm GR16:$src1, addr:$src2)>;
1796 def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
1797 (SUB32rm GR32:$src1, addr:$src2)>;
1800 def : Pat<(sub GR8:$src1, imm:$src2),
1801 (SUB8ri GR8:$src1, imm:$src2)>;
1802 def : Pat<(sub GR16:$src1, imm:$src2),
1803 (SUB16ri GR16:$src1, imm:$src2)>;
1804 def : Pat<(sub GR32:$src1, imm:$src2),
1805 (SUB32ri GR32:$src1, imm:$src2)>;
1806 def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
1807 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
1808 def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
1809 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
1812 def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>;
1813 def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>;
1814 def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>;
1815 def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>;
1817 // sub reg, relocImm
1818 def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt8_su:$src2),
1819 (SUB64ri8 GR64:$src1, i64relocImmSExt8_su:$src2)>;
1820 def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt32_su:$src2),
1821 (SUB64ri32 GR64:$src1, i64relocImmSExt32_su:$src2)>;
1824 def : Pat<(mul GR16:$src1, GR16:$src2),
1825 (IMUL16rr GR16:$src1, GR16:$src2)>;
1826 def : Pat<(mul GR32:$src1, GR32:$src2),
1827 (IMUL32rr GR32:$src1, GR32:$src2)>;
1830 def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
1831 (IMUL16rm GR16:$src1, addr:$src2)>;
1832 def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
1833 (IMUL32rm GR32:$src1, addr:$src2)>;
1836 def : Pat<(mul GR16:$src1, imm:$src2),
1837 (IMUL16rri GR16:$src1, imm:$src2)>;
1838 def : Pat<(mul GR32:$src1, imm:$src2),
1839 (IMUL32rri GR32:$src1, imm:$src2)>;
1840 def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
1841 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
1842 def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
1843 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
1845 // reg = mul mem, imm
1846 def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
1847 (IMUL16rmi addr:$src1, imm:$src2)>;
1848 def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
1849 (IMUL32rmi addr:$src1, imm:$src2)>;
1850 def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
1851 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
1852 def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
1853 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
1855 // Patterns for nodes that do not produce flags, for instructions that do.
1858 def : Pat<(add GR64:$src1, GR64:$src2),
1859 (ADD64rr GR64:$src1, GR64:$src2)>;
1860 def : Pat<(add GR64:$src1, i64immSExt8:$src2),
1861 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
1862 def : Pat<(add GR64:$src1, i64immSExt32:$src2),
1863 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
1864 def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
1865 (ADD64rm GR64:$src1, addr:$src2)>;
1868 def : Pat<(sub GR64:$src1, GR64:$src2),
1869 (SUB64rr GR64:$src1, GR64:$src2)>;
1870 def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
1871 (SUB64rm GR64:$src1, addr:$src2)>;
1872 def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
1873 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
1874 def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
1875 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
1878 def : Pat<(mul GR64:$src1, GR64:$src2),
1879 (IMUL64rr GR64:$src1, GR64:$src2)>;
1880 def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
1881 (IMUL64rm GR64:$src1, addr:$src2)>;
1882 def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
1883 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
1884 def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
1885 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
1886 def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
1887 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
1888 def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
1889 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
1891 // Increment/Decrement reg.
1892 // Do not make INC/DEC if it is slow
1893 let Predicates = [UseIncDec] in {
1894 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>;
1895 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>;
1896 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>;
1897 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
1898 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>;
1899 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>;
1900 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>;
1901 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
1905 def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
1906 def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
1907 def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
1908 def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>;
1911 def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
1912 (OR8rm GR8:$src1, addr:$src2)>;
1913 def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
1914 (OR16rm GR16:$src1, addr:$src2)>;
1915 def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
1916 (OR32rm GR32:$src1, addr:$src2)>;
1917 def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
1918 (OR64rm GR64:$src1, addr:$src2)>;
1921 def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
1922 def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
1923 def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
1924 def : Pat<(or GR16:$src1, i16immSExt8:$src2),
1925 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1926 def : Pat<(or GR32:$src1, i32immSExt8:$src2),
1927 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1928 def : Pat<(or GR64:$src1, i64immSExt8:$src2),
1929 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1930 def : Pat<(or GR64:$src1, i64immSExt32:$src2),
1931 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1934 def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
1935 def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
1936 def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
1937 def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>;
1940 def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
1941 (XOR8rm GR8:$src1, addr:$src2)>;
1942 def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
1943 (XOR16rm GR16:$src1, addr:$src2)>;
1944 def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
1945 (XOR32rm GR32:$src1, addr:$src2)>;
1946 def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
1947 (XOR64rm GR64:$src1, addr:$src2)>;
1950 def : Pat<(xor GR8:$src1, imm:$src2),
1951 (XOR8ri GR8:$src1, imm:$src2)>;
1952 def : Pat<(xor GR16:$src1, imm:$src2),
1953 (XOR16ri GR16:$src1, imm:$src2)>;
1954 def : Pat<(xor GR32:$src1, imm:$src2),
1955 (XOR32ri GR32:$src1, imm:$src2)>;
1956 def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
1957 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
1958 def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
1959 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
1960 def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
1961 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
1962 def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
1963 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
1966 def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
1967 def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
1968 def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
1969 def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>;
1972 def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
1973 (AND8rm GR8:$src1, addr:$src2)>;
1974 def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
1975 (AND16rm GR16:$src1, addr:$src2)>;
1976 def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
1977 (AND32rm GR32:$src1, addr:$src2)>;
1978 def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
1979 (AND64rm GR64:$src1, addr:$src2)>;
1982 def : Pat<(and GR8:$src1, imm:$src2),
1983 (AND8ri GR8:$src1, imm:$src2)>;
1984 def : Pat<(and GR16:$src1, imm:$src2),
1985 (AND16ri GR16:$src1, imm:$src2)>;
1986 def : Pat<(and GR32:$src1, imm:$src2),
1987 (AND32ri GR32:$src1, imm:$src2)>;
1988 def : Pat<(and GR16:$src1, i16immSExt8:$src2),
1989 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
1990 def : Pat<(and GR32:$src1, i32immSExt8:$src2),
1991 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
1992 def : Pat<(and GR64:$src1, i64immSExt8:$src2),
1993 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
1994 def : Pat<(and GR64:$src1, i64immSExt32:$src2),
1995 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
1997 // Bit scan instruction patterns to match explicit zero-undef behavior.
1998 def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>;
1999 def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>;
2000 def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>;
2001 def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>;
2002 def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>;
2003 def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>;
2005 // When HasMOVBE is enabled it is possible to get a non-legalized
2006 // register-register 16 bit bswap. This maps it to a ROL instruction.
2007 let Predicates = [HasMOVBE] in {
2008 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;