1 //===-- X86InstrShiftRotate.td - Shift and Rotate Instrs ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the shift and rotate instructions.
12 //===----------------------------------------------------------------------===//
14 // FIXME: Someone needs to smear multipattern goodness all over this file.
16 let Defs = [EFLAGS] in {
18 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
20 def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
21 "shl{b}\t{%cl, $dst|$dst, cl}",
22 [(set GR8:$dst, (shl GR8:$src1, CL))], IIC_SR>;
23 def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
24 "shl{w}\t{%cl, $dst|$dst, cl}",
25 [(set GR16:$dst, (shl GR16:$src1, CL))], IIC_SR>, OpSize16;
26 def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
27 "shl{l}\t{%cl, $dst|$dst, cl}",
28 [(set GR32:$dst, (shl GR32:$src1, CL))], IIC_SR>, OpSize32;
29 def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
30 "shl{q}\t{%cl, $dst|$dst, cl}",
31 [(set GR64:$dst, (shl GR64:$src1, CL))], IIC_SR>;
34 def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
35 "shl{b}\t{$src2, $dst|$dst, $src2}",
36 [(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))], IIC_SR>;
38 let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
39 def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
40 "shl{w}\t{$src2, $dst|$dst, $src2}",
41 [(set GR16:$dst, (shl GR16:$src1, (i8 imm:$src2)))], IIC_SR>,
43 def SHL32ri : Ii8<0xC1, MRM4r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
44 "shl{l}\t{$src2, $dst|$dst, $src2}",
45 [(set GR32:$dst, (shl GR32:$src1, (i8 imm:$src2)))], IIC_SR>,
47 def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
48 (ins GR64:$src1, u8imm:$src2),
49 "shl{q}\t{$src2, $dst|$dst, $src2}",
50 [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))],
52 } // isConvertibleToThreeAddress = 1
54 // NOTE: We don't include patterns for shifts of a register by one, because
55 // 'add reg,reg' is cheaper (and we have a Pat pattern for shift-by-one).
56 let hasSideEffects = 0 in {
57 def SHL8r1 : I<0xD0, MRM4r, (outs GR8:$dst), (ins GR8:$src1),
58 "shl{b}\t$dst", [], IIC_SR>;
59 def SHL16r1 : I<0xD1, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
60 "shl{w}\t$dst", [], IIC_SR>, OpSize16;
61 def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
62 "shl{l}\t$dst", [], IIC_SR>, OpSize32;
63 def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
64 "shl{q}\t$dst", [], IIC_SR>;
65 } // hasSideEffects = 0
66 } // Constraints = "$src = $dst", SchedRW
69 let SchedRW = [WriteShiftLd, WriteRMW] in {
70 // FIXME: Why do we need an explicit "Uses = [CL]" when the instr has a pattern
73 def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
74 "shl{b}\t{%cl, $dst|$dst, cl}",
75 [(store (shl (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>;
76 def SHL16mCL : I<0xD3, MRM4m, (outs), (ins i16mem:$dst),
77 "shl{w}\t{%cl, $dst|$dst, cl}",
78 [(store (shl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>,
80 def SHL32mCL : I<0xD3, MRM4m, (outs), (ins i32mem:$dst),
81 "shl{l}\t{%cl, $dst|$dst, cl}",
82 [(store (shl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>,
84 def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
85 "shl{q}\t{%cl, $dst|$dst, cl}",
86 [(store (shl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>,
87 Requires<[In64BitMode]>;
89 def SHL8mi : Ii8<0xC0, MRM4m, (outs), (ins i8mem :$dst, u8imm:$src),
90 "shl{b}\t{$src, $dst|$dst, $src}",
91 [(store (shl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)],
93 def SHL16mi : Ii8<0xC1, MRM4m, (outs), (ins i16mem:$dst, u8imm:$src),
94 "shl{w}\t{$src, $dst|$dst, $src}",
95 [(store (shl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
97 def SHL32mi : Ii8<0xC1, MRM4m, (outs), (ins i32mem:$dst, u8imm:$src),
98 "shl{l}\t{$src, $dst|$dst, $src}",
99 [(store (shl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
101 def SHL64mi : RIi8<0xC1, MRM4m, (outs), (ins i64mem:$dst, u8imm:$src),
102 "shl{q}\t{$src, $dst|$dst, $src}",
103 [(store (shl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
104 IIC_SR>, Requires<[In64BitMode]>;
107 def SHL8m1 : I<0xD0, MRM4m, (outs), (ins i8mem :$dst),
109 [(store (shl (loadi8 addr:$dst), (i8 1)), addr:$dst)],
111 def SHL16m1 : I<0xD1, MRM4m, (outs), (ins i16mem:$dst),
113 [(store (shl (loadi16 addr:$dst), (i8 1)), addr:$dst)],
115 def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
117 [(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)],
119 def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
121 [(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)],
122 IIC_SR>, Requires<[In64BitMode]>;
125 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
127 def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
128 "shr{b}\t{%cl, $dst|$dst, cl}",
129 [(set GR8:$dst, (srl GR8:$src1, CL))], IIC_SR>;
130 def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
131 "shr{w}\t{%cl, $dst|$dst, cl}",
132 [(set GR16:$dst, (srl GR16:$src1, CL))], IIC_SR>, OpSize16;
133 def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
134 "shr{l}\t{%cl, $dst|$dst, cl}",
135 [(set GR32:$dst, (srl GR32:$src1, CL))], IIC_SR>, OpSize32;
136 def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
137 "shr{q}\t{%cl, $dst|$dst, cl}",
138 [(set GR64:$dst, (srl GR64:$src1, CL))], IIC_SR>;
141 def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$src2),
142 "shr{b}\t{$src2, $dst|$dst, $src2}",
143 [(set GR8:$dst, (srl GR8:$src1, (i8 imm:$src2)))], IIC_SR>;
144 def SHR16ri : Ii8<0xC1, MRM5r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
145 "shr{w}\t{$src2, $dst|$dst, $src2}",
146 [(set GR16:$dst, (srl GR16:$src1, (i8 imm:$src2)))],
148 def SHR32ri : Ii8<0xC1, MRM5r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
149 "shr{l}\t{$src2, $dst|$dst, $src2}",
150 [(set GR32:$dst, (srl GR32:$src1, (i8 imm:$src2)))],
152 def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$src2),
153 "shr{q}\t{$src2, $dst|$dst, $src2}",
154 [(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))], IIC_SR>;
157 def SHR8r1 : I<0xD0, MRM5r, (outs GR8:$dst), (ins GR8:$src1),
159 [(set GR8:$dst, (srl GR8:$src1, (i8 1)))], IIC_SR>;
160 def SHR16r1 : I<0xD1, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
162 [(set GR16:$dst, (srl GR16:$src1, (i8 1)))], IIC_SR>, OpSize16;
163 def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
165 [(set GR32:$dst, (srl GR32:$src1, (i8 1)))], IIC_SR>, OpSize32;
166 def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
168 [(set GR64:$dst, (srl GR64:$src1, (i8 1)))], IIC_SR>;
169 } // Constraints = "$src = $dst", SchedRW
172 let SchedRW = [WriteShiftLd, WriteRMW] in {
174 def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
175 "shr{b}\t{%cl, $dst|$dst, cl}",
176 [(store (srl (loadi8 addr:$dst), CL), addr:$dst)], IIC_SR>;
177 def SHR16mCL : I<0xD3, MRM5m, (outs), (ins i16mem:$dst),
178 "shr{w}\t{%cl, $dst|$dst, cl}",
179 [(store (srl (loadi16 addr:$dst), CL), addr:$dst)], IIC_SR>,
181 def SHR32mCL : I<0xD3, MRM5m, (outs), (ins i32mem:$dst),
182 "shr{l}\t{%cl, $dst|$dst, cl}",
183 [(store (srl (loadi32 addr:$dst), CL), addr:$dst)], IIC_SR>,
185 def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
186 "shr{q}\t{%cl, $dst|$dst, cl}",
187 [(store (srl (loadi64 addr:$dst), CL), addr:$dst)], IIC_SR>,
188 Requires<[In64BitMode]>;
190 def SHR8mi : Ii8<0xC0, MRM5m, (outs), (ins i8mem :$dst, u8imm:$src),
191 "shr{b}\t{$src, $dst|$dst, $src}",
192 [(store (srl (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)],
194 def SHR16mi : Ii8<0xC1, MRM5m, (outs), (ins i16mem:$dst, u8imm:$src),
195 "shr{w}\t{$src, $dst|$dst, $src}",
196 [(store (srl (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
198 def SHR32mi : Ii8<0xC1, MRM5m, (outs), (ins i32mem:$dst, u8imm:$src),
199 "shr{l}\t{$src, $dst|$dst, $src}",
200 [(store (srl (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
202 def SHR64mi : RIi8<0xC1, MRM5m, (outs), (ins i64mem:$dst, u8imm:$src),
203 "shr{q}\t{$src, $dst|$dst, $src}",
204 [(store (srl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
205 IIC_SR>, Requires<[In64BitMode]>;
208 def SHR8m1 : I<0xD0, MRM5m, (outs), (ins i8mem :$dst),
210 [(store (srl (loadi8 addr:$dst), (i8 1)), addr:$dst)],
212 def SHR16m1 : I<0xD1, MRM5m, (outs), (ins i16mem:$dst),
214 [(store (srl (loadi16 addr:$dst), (i8 1)), addr:$dst)],
216 def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
218 [(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)],
220 def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
222 [(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)],
223 IIC_SR>, Requires<[In64BitMode]>;
226 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
228 def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
229 "sar{b}\t{%cl, $dst|$dst, cl}",
230 [(set GR8:$dst, (sra GR8:$src1, CL))],
232 def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
233 "sar{w}\t{%cl, $dst|$dst, cl}",
234 [(set GR16:$dst, (sra GR16:$src1, CL))],
236 def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
237 "sar{l}\t{%cl, $dst|$dst, cl}",
238 [(set GR32:$dst, (sra GR32:$src1, CL))],
240 def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
241 "sar{q}\t{%cl, $dst|$dst, cl}",
242 [(set GR64:$dst, (sra GR64:$src1, CL))],
246 def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
247 "sar{b}\t{$src2, $dst|$dst, $src2}",
248 [(set GR8:$dst, (sra GR8:$src1, (i8 imm:$src2)))],
250 def SAR16ri : Ii8<0xC1, MRM7r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
251 "sar{w}\t{$src2, $dst|$dst, $src2}",
252 [(set GR16:$dst, (sra GR16:$src1, (i8 imm:$src2)))],
254 def SAR32ri : Ii8<0xC1, MRM7r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
255 "sar{l}\t{$src2, $dst|$dst, $src2}",
256 [(set GR32:$dst, (sra GR32:$src1, (i8 imm:$src2)))],
258 def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
259 (ins GR64:$src1, u8imm:$src2),
260 "sar{q}\t{$src2, $dst|$dst, $src2}",
261 [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))],
265 def SAR8r1 : I<0xD0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
267 [(set GR8:$dst, (sra GR8:$src1, (i8 1)))],
269 def SAR16r1 : I<0xD1, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
271 [(set GR16:$dst, (sra GR16:$src1, (i8 1)))],
273 def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
275 [(set GR32:$dst, (sra GR32:$src1, (i8 1)))],
277 def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
279 [(set GR64:$dst, (sra GR64:$src1, (i8 1)))],
281 } // Constraints = "$src = $dst", SchedRW
284 let SchedRW = [WriteShiftLd, WriteRMW] in {
286 def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
287 "sar{b}\t{%cl, $dst|$dst, cl}",
288 [(store (sra (loadi8 addr:$dst), CL), addr:$dst)],
290 def SAR16mCL : I<0xD3, MRM7m, (outs), (ins i16mem:$dst),
291 "sar{w}\t{%cl, $dst|$dst, cl}",
292 [(store (sra (loadi16 addr:$dst), CL), addr:$dst)],
294 def SAR32mCL : I<0xD3, MRM7m, (outs), (ins i32mem:$dst),
295 "sar{l}\t{%cl, $dst|$dst, cl}",
296 [(store (sra (loadi32 addr:$dst), CL), addr:$dst)],
298 def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
299 "sar{q}\t{%cl, $dst|$dst, cl}",
300 [(store (sra (loadi64 addr:$dst), CL), addr:$dst)],
301 IIC_SR>, Requires<[In64BitMode]>;
303 def SAR8mi : Ii8<0xC0, MRM7m, (outs), (ins i8mem :$dst, u8imm:$src),
304 "sar{b}\t{$src, $dst|$dst, $src}",
305 [(store (sra (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)],
307 def SAR16mi : Ii8<0xC1, MRM7m, (outs), (ins i16mem:$dst, u8imm:$src),
308 "sar{w}\t{$src, $dst|$dst, $src}",
309 [(store (sra (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
311 def SAR32mi : Ii8<0xC1, MRM7m, (outs), (ins i32mem:$dst, u8imm:$src),
312 "sar{l}\t{$src, $dst|$dst, $src}",
313 [(store (sra (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
315 def SAR64mi : RIi8<0xC1, MRM7m, (outs), (ins i64mem:$dst, u8imm:$src),
316 "sar{q}\t{$src, $dst|$dst, $src}",
317 [(store (sra (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
318 IIC_SR>, Requires<[In64BitMode]>;
321 def SAR8m1 : I<0xD0, MRM7m, (outs), (ins i8mem :$dst),
323 [(store (sra (loadi8 addr:$dst), (i8 1)), addr:$dst)],
325 def SAR16m1 : I<0xD1, MRM7m, (outs), (ins i16mem:$dst),
327 [(store (sra (loadi16 addr:$dst), (i8 1)), addr:$dst)],
329 def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
331 [(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)],
333 def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
335 [(store (sra (loadi64 addr:$dst), (i8 1)), addr:$dst)],
336 IIC_SR>, Requires<[In64BitMode]>;
339 //===----------------------------------------------------------------------===//
340 // Rotate instructions
341 //===----------------------------------------------------------------------===//
343 let hasSideEffects = 0 in {
344 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
346 let Uses = [CL, EFLAGS] in {
347 def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
348 "rcl{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
349 def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
350 "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize16;
351 def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
352 "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize32;
353 def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
354 "rcl{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
355 } // Uses = [CL, EFLAGS]
357 let Uses = [EFLAGS] in {
358 def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
359 "rcl{b}\t$dst", [], IIC_SR>;
360 def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
361 "rcl{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
362 def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
363 "rcl{w}\t$dst", [], IIC_SR>, OpSize16;
364 def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
365 "rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize16;
366 def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
367 "rcl{l}\t$dst", [], IIC_SR>, OpSize32;
368 def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
369 "rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize32;
370 def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src1),
371 "rcl{q}\t$dst", [], IIC_SR>;
372 def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
373 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
376 let Uses = [CL, EFLAGS] in {
377 def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
378 "rcr{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
379 def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
380 "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize16;
381 def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
382 "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize32;
383 def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
384 "rcr{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
385 } // Uses = [CL, EFLAGS]
387 let Uses = [EFLAGS] in {
388 def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
389 "rcr{b}\t$dst", [], IIC_SR>;
390 def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, u8imm:$cnt),
391 "rcr{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
392 def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
393 "rcr{w}\t$dst", [], IIC_SR>, OpSize16;
394 def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$cnt),
395 "rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize16;
396 def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
397 "rcr{l}\t$dst", [], IIC_SR>, OpSize32;
398 def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$cnt),
399 "rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize32;
400 def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src1),
401 "rcr{q}\t$dst", [], IIC_SR>;
402 def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src1, u8imm:$cnt),
403 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
406 } // Constraints = "$src = $dst"
408 let SchedRW = [WriteShiftLd, WriteRMW], mayStore = 1 in {
409 let Uses = [EFLAGS] in {
410 def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
411 "rcl{b}\t$dst", [], IIC_SR>;
412 def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, u8imm:$cnt),
413 "rcl{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
414 def RCL16m1 : I<0xD1, MRM2m, (outs), (ins i16mem:$dst),
415 "rcl{w}\t$dst", [], IIC_SR>, OpSize16;
416 def RCL16mi : Ii8<0xC1, MRM2m, (outs), (ins i16mem:$dst, u8imm:$cnt),
417 "rcl{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize16;
418 def RCL32m1 : I<0xD1, MRM2m, (outs), (ins i32mem:$dst),
419 "rcl{l}\t$dst", [], IIC_SR>, OpSize32;
420 def RCL32mi : Ii8<0xC1, MRM2m, (outs), (ins i32mem:$dst, u8imm:$cnt),
421 "rcl{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize32;
422 def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
423 "rcl{q}\t$dst", [], IIC_SR>, Requires<[In64BitMode]>;
424 def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, u8imm:$cnt),
425 "rcl{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>,
426 Requires<[In64BitMode]>;
428 def RCR8m1 : I<0xD0, MRM3m, (outs), (ins i8mem:$dst),
429 "rcr{b}\t$dst", [], IIC_SR>;
430 def RCR8mi : Ii8<0xC0, MRM3m, (outs), (ins i8mem:$dst, u8imm:$cnt),
431 "rcr{b}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>;
432 def RCR16m1 : I<0xD1, MRM3m, (outs), (ins i16mem:$dst),
433 "rcr{w}\t$dst", [], IIC_SR>, OpSize16;
434 def RCR16mi : Ii8<0xC1, MRM3m, (outs), (ins i16mem:$dst, u8imm:$cnt),
435 "rcr{w}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize16;
436 def RCR32m1 : I<0xD1, MRM3m, (outs), (ins i32mem:$dst),
437 "rcr{l}\t$dst", [], IIC_SR>, OpSize32;
438 def RCR32mi : Ii8<0xC1, MRM3m, (outs), (ins i32mem:$dst, u8imm:$cnt),
439 "rcr{l}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>, OpSize32;
440 def RCR64m1 : RI<0xD1, MRM3m, (outs), (ins i64mem:$dst),
441 "rcr{q}\t$dst", [], IIC_SR>, Requires<[In64BitMode]>;
442 def RCR64mi : RIi8<0xC1, MRM3m, (outs), (ins i64mem:$dst, u8imm:$cnt),
443 "rcr{q}\t{$cnt, $dst|$dst, $cnt}", [], IIC_SR>,
444 Requires<[In64BitMode]>;
447 let Uses = [CL, EFLAGS] in {
448 def RCL8mCL : I<0xD2, MRM2m, (outs), (ins i8mem:$dst),
449 "rcl{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
450 def RCL16mCL : I<0xD3, MRM2m, (outs), (ins i16mem:$dst),
451 "rcl{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize16;
452 def RCL32mCL : I<0xD3, MRM2m, (outs), (ins i32mem:$dst),
453 "rcl{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize32;
454 def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
455 "rcl{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>,
456 Requires<[In64BitMode]>;
458 def RCR8mCL : I<0xD2, MRM3m, (outs), (ins i8mem:$dst),
459 "rcr{b}\t{%cl, $dst|$dst, cl}", [], IIC_SR>;
460 def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
461 "rcr{w}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize16;
462 def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
463 "rcr{l}\t{%cl, $dst|$dst, cl}", [], IIC_SR>, OpSize32;
464 def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
465 "rcr{q}\t{%cl, $dst|$dst, cl}", [], IIC_SR>,
466 Requires<[In64BitMode]>;
467 } // Uses = [CL, EFLAGS]
469 } // hasSideEffects = 0
471 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
472 // FIXME: provide shorter instructions when imm8 == 1
474 def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
475 "rol{b}\t{%cl, $dst|$dst, cl}",
476 [(set GR8:$dst, (rotl GR8:$src1, CL))], IIC_SR>;
477 def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
478 "rol{w}\t{%cl, $dst|$dst, cl}",
479 [(set GR16:$dst, (rotl GR16:$src1, CL))], IIC_SR>, OpSize16;
480 def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
481 "rol{l}\t{%cl, $dst|$dst, cl}",
482 [(set GR32:$dst, (rotl GR32:$src1, CL))], IIC_SR>, OpSize32;
483 def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
484 "rol{q}\t{%cl, $dst|$dst, cl}",
485 [(set GR64:$dst, (rotl GR64:$src1, CL))], IIC_SR>;
488 def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
489 "rol{b}\t{$src2, $dst|$dst, $src2}",
490 [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))], IIC_SR>;
491 def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
492 "rol{w}\t{$src2, $dst|$dst, $src2}",
493 [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))],
495 def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
496 "rol{l}\t{$src2, $dst|$dst, $src2}",
497 [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))],
499 def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
500 (ins GR64:$src1, u8imm:$src2),
501 "rol{q}\t{$src2, $dst|$dst, $src2}",
502 [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))],
506 def ROL8r1 : I<0xD0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
508 [(set GR8:$dst, (rotl GR8:$src1, (i8 1)))],
510 def ROL16r1 : I<0xD1, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
512 [(set GR16:$dst, (rotl GR16:$src1, (i8 1)))],
514 def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
516 [(set GR32:$dst, (rotl GR32:$src1, (i8 1)))],
518 def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
520 [(set GR64:$dst, (rotl GR64:$src1, (i8 1)))],
522 } // Constraints = "$src = $dst", SchedRW
524 let SchedRW = [WriteShiftLd, WriteRMW] in {
526 def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
527 "rol{b}\t{%cl, $dst|$dst, cl}",
528 [(store (rotl (loadi8 addr:$dst), CL), addr:$dst)],
530 def ROL16mCL : I<0xD3, MRM0m, (outs), (ins i16mem:$dst),
531 "rol{w}\t{%cl, $dst|$dst, cl}",
532 [(store (rotl (loadi16 addr:$dst), CL), addr:$dst)],
534 def ROL32mCL : I<0xD3, MRM0m, (outs), (ins i32mem:$dst),
535 "rol{l}\t{%cl, $dst|$dst, cl}",
536 [(store (rotl (loadi32 addr:$dst), CL), addr:$dst)],
538 def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
539 "rol{q}\t{%cl, $dst|$dst, cl}",
540 [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)],
541 IIC_SR>, Requires<[In64BitMode]>;
543 def ROL8mi : Ii8<0xC0, MRM0m, (outs), (ins i8mem :$dst, u8imm:$src1),
544 "rol{b}\t{$src1, $dst|$dst, $src1}",
545 [(store (rotl (loadi8 addr:$dst), (i8 imm:$src1)), addr:$dst)],
547 def ROL16mi : Ii8<0xC1, MRM0m, (outs), (ins i16mem:$dst, u8imm:$src1),
548 "rol{w}\t{$src1, $dst|$dst, $src1}",
549 [(store (rotl (loadi16 addr:$dst), (i8 imm:$src1)), addr:$dst)],
551 def ROL32mi : Ii8<0xC1, MRM0m, (outs), (ins i32mem:$dst, u8imm:$src1),
552 "rol{l}\t{$src1, $dst|$dst, $src1}",
553 [(store (rotl (loadi32 addr:$dst), (i8 imm:$src1)), addr:$dst)],
555 def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, u8imm:$src1),
556 "rol{q}\t{$src1, $dst|$dst, $src1}",
557 [(store (rotl (loadi64 addr:$dst), (i8 imm:$src1)), addr:$dst)],
558 IIC_SR>, Requires<[In64BitMode]>;
561 def ROL8m1 : I<0xD0, MRM0m, (outs), (ins i8mem :$dst),
563 [(store (rotl (loadi8 addr:$dst), (i8 1)), addr:$dst)],
565 def ROL16m1 : I<0xD1, MRM0m, (outs), (ins i16mem:$dst),
567 [(store (rotl (loadi16 addr:$dst), (i8 1)), addr:$dst)],
569 def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
571 [(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)],
573 def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
575 [(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)],
576 IIC_SR>, Requires<[In64BitMode]>;
579 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
581 def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
582 "ror{b}\t{%cl, $dst|$dst, cl}",
583 [(set GR8:$dst, (rotr GR8:$src1, CL))], IIC_SR>;
584 def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
585 "ror{w}\t{%cl, $dst|$dst, cl}",
586 [(set GR16:$dst, (rotr GR16:$src1, CL))], IIC_SR>, OpSize16;
587 def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
588 "ror{l}\t{%cl, $dst|$dst, cl}",
589 [(set GR32:$dst, (rotr GR32:$src1, CL))], IIC_SR>, OpSize32;
590 def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
591 "ror{q}\t{%cl, $dst|$dst, cl}",
592 [(set GR64:$dst, (rotr GR64:$src1, CL))], IIC_SR>;
595 def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, u8imm:$src2),
596 "ror{b}\t{$src2, $dst|$dst, $src2}",
597 [(set GR8:$dst, (rotr GR8:$src1, (i8 relocImm:$src2)))],
599 def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, u8imm:$src2),
600 "ror{w}\t{$src2, $dst|$dst, $src2}",
601 [(set GR16:$dst, (rotr GR16:$src1, (i8 relocImm:$src2)))],
603 def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, u8imm:$src2),
604 "ror{l}\t{$src2, $dst|$dst, $src2}",
605 [(set GR32:$dst, (rotr GR32:$src1, (i8 relocImm:$src2)))],
607 def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
608 (ins GR64:$src1, u8imm:$src2),
609 "ror{q}\t{$src2, $dst|$dst, $src2}",
610 [(set GR64:$dst, (rotr GR64:$src1, (i8 relocImm:$src2)))],
614 def ROR8r1 : I<0xD0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
616 [(set GR8:$dst, (rotl GR8:$src1, (i8 7)))],
618 def ROR16r1 : I<0xD1, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
620 [(set GR16:$dst, (rotl GR16:$src1, (i8 15)))],
622 def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
624 [(set GR32:$dst, (rotl GR32:$src1, (i8 31)))],
626 def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
628 [(set GR64:$dst, (rotl GR64:$src1, (i8 63)))],
630 } // Constraints = "$src = $dst", SchedRW
632 let SchedRW = [WriteShiftLd, WriteRMW] in {
634 def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
635 "ror{b}\t{%cl, $dst|$dst, cl}",
636 [(store (rotr (loadi8 addr:$dst), CL), addr:$dst)],
638 def ROR16mCL : I<0xD3, MRM1m, (outs), (ins i16mem:$dst),
639 "ror{w}\t{%cl, $dst|$dst, cl}",
640 [(store (rotr (loadi16 addr:$dst), CL), addr:$dst)],
642 def ROR32mCL : I<0xD3, MRM1m, (outs), (ins i32mem:$dst),
643 "ror{l}\t{%cl, $dst|$dst, cl}",
644 [(store (rotr (loadi32 addr:$dst), CL), addr:$dst)],
646 def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
647 "ror{q}\t{%cl, $dst|$dst, cl}",
648 [(store (rotr (loadi64 addr:$dst), CL), addr:$dst)],
649 IIC_SR>, Requires<[In64BitMode]>;
651 def ROR8mi : Ii8<0xC0, MRM1m, (outs), (ins i8mem :$dst, u8imm:$src),
652 "ror{b}\t{$src, $dst|$dst, $src}",
653 [(store (rotr (loadi8 addr:$dst), (i8 imm:$src)), addr:$dst)],
655 def ROR16mi : Ii8<0xC1, MRM1m, (outs), (ins i16mem:$dst, u8imm:$src),
656 "ror{w}\t{$src, $dst|$dst, $src}",
657 [(store (rotr (loadi16 addr:$dst), (i8 imm:$src)), addr:$dst)],
659 def ROR32mi : Ii8<0xC1, MRM1m, (outs), (ins i32mem:$dst, u8imm:$src),
660 "ror{l}\t{$src, $dst|$dst, $src}",
661 [(store (rotr (loadi32 addr:$dst), (i8 imm:$src)), addr:$dst)],
663 def ROR64mi : RIi8<0xC1, MRM1m, (outs), (ins i64mem:$dst, u8imm:$src),
664 "ror{q}\t{$src, $dst|$dst, $src}",
665 [(store (rotr (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)],
666 IIC_SR>, Requires<[In64BitMode]>;
669 def ROR8m1 : I<0xD0, MRM1m, (outs), (ins i8mem :$dst),
671 [(store (rotl (loadi8 addr:$dst), (i8 7)), addr:$dst)],
673 def ROR16m1 : I<0xD1, MRM1m, (outs), (ins i16mem:$dst),
675 [(store (rotl (loadi16 addr:$dst), (i8 15)), addr:$dst)],
677 def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
679 [(store (rotl (loadi32 addr:$dst), (i8 31)), addr:$dst)],
681 def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
683 [(store (rotl (loadi64 addr:$dst), (i8 63)), addr:$dst)],
684 IIC_SR>, Requires<[In64BitMode]>;
688 //===----------------------------------------------------------------------===//
689 // Double shift instructions (generalizations of rotate)
690 //===----------------------------------------------------------------------===//
692 let Constraints = "$src1 = $dst", SchedRW = [WriteShift] in {
695 def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst),
696 (ins GR16:$src1, GR16:$src2),
697 "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
698 [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))],
701 def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst),
702 (ins GR16:$src1, GR16:$src2),
703 "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
704 [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))],
707 def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst),
708 (ins GR32:$src1, GR32:$src2),
709 "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
710 [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))],
711 IIC_SHD32_REG_CL>, TB, OpSize32;
712 def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst),
713 (ins GR32:$src1, GR32:$src2),
714 "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
715 [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))],
716 IIC_SHD32_REG_CL>, TB, OpSize32;
717 def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
718 (ins GR64:$src1, GR64:$src2),
719 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
720 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))],
723 def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst),
724 (ins GR64:$src1, GR64:$src2),
725 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
726 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))],
731 let isCommutable = 1 in { // These instructions commute to each other.
732 def SHLD16rri8 : Ii8<0xA4, MRMDestReg,
734 (ins GR16:$src1, GR16:$src2, u8imm:$src3),
735 "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
736 [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2,
737 (i8 imm:$src3)))], IIC_SHD16_REG_IM>,
739 def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
741 (ins GR16:$src1, GR16:$src2, u8imm:$src3),
742 "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
743 [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2,
744 (i8 imm:$src3)))], IIC_SHD16_REG_IM>,
746 def SHLD32rri8 : Ii8<0xA4, MRMDestReg,
748 (ins GR32:$src1, GR32:$src2, u8imm:$src3),
749 "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
750 [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2,
751 (i8 imm:$src3)))], IIC_SHD32_REG_IM>,
753 def SHRD32rri8 : Ii8<0xAC, MRMDestReg,
755 (ins GR32:$src1, GR32:$src2, u8imm:$src3),
756 "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
757 [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2,
758 (i8 imm:$src3)))], IIC_SHD32_REG_IM>,
760 def SHLD64rri8 : RIi8<0xA4, MRMDestReg,
762 (ins GR64:$src1, GR64:$src2, u8imm:$src3),
763 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
764 [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2,
765 (i8 imm:$src3)))], IIC_SHD64_REG_IM>,
767 def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
769 (ins GR64:$src1, GR64:$src2, u8imm:$src3),
770 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
771 [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2,
772 (i8 imm:$src3)))], IIC_SHD64_REG_IM>,
775 } // Constraints = "$src = $dst", SchedRW
777 let SchedRW = [WriteShiftLd, WriteRMW] in {
779 def SHLD16mrCL : I<0xA5, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
780 "shld{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
781 [(store (X86shld (loadi16 addr:$dst), GR16:$src2, CL),
782 addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize16;
783 def SHRD16mrCL : I<0xAD, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
784 "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, cl}",
785 [(store (X86shrd (loadi16 addr:$dst), GR16:$src2, CL),
786 addr:$dst)], IIC_SHD16_MEM_CL>, TB, OpSize16;
788 def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
789 "shld{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
790 [(store (X86shld (loadi32 addr:$dst), GR32:$src2, CL),
791 addr:$dst)], IIC_SHD32_MEM_CL>, TB, OpSize32;
792 def SHRD32mrCL : I<0xAD, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
793 "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, cl}",
794 [(store (X86shrd (loadi32 addr:$dst), GR32:$src2, CL),
795 addr:$dst)], IIC_SHD32_MEM_CL>, TB, OpSize32;
797 def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
798 "shld{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
799 [(store (X86shld (loadi64 addr:$dst), GR64:$src2, CL),
800 addr:$dst)], IIC_SHD64_MEM_CL>, TB;
801 def SHRD64mrCL : RI<0xAD, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
802 "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, cl}",
803 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2, CL),
804 addr:$dst)], IIC_SHD64_MEM_CL>, TB;
807 def SHLD16mri8 : Ii8<0xA4, MRMDestMem,
808 (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
809 "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
810 [(store (X86shld (loadi16 addr:$dst), GR16:$src2,
811 (i8 imm:$src3)), addr:$dst)],
814 def SHRD16mri8 : Ii8<0xAC, MRMDestMem,
815 (outs), (ins i16mem:$dst, GR16:$src2, u8imm:$src3),
816 "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
817 [(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
818 (i8 imm:$src3)), addr:$dst)],
822 def SHLD32mri8 : Ii8<0xA4, MRMDestMem,
823 (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
824 "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
825 [(store (X86shld (loadi32 addr:$dst), GR32:$src2,
826 (i8 imm:$src3)), addr:$dst)],
829 def SHRD32mri8 : Ii8<0xAC, MRMDestMem,
830 (outs), (ins i32mem:$dst, GR32:$src2, u8imm:$src3),
831 "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
832 [(store (X86shrd (loadi32 addr:$dst), GR32:$src2,
833 (i8 imm:$src3)), addr:$dst)],
837 def SHLD64mri8 : RIi8<0xA4, MRMDestMem,
838 (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
839 "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
840 [(store (X86shld (loadi64 addr:$dst), GR64:$src2,
841 (i8 imm:$src3)), addr:$dst)],
844 def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
845 (outs), (ins i64mem:$dst, GR64:$src2, u8imm:$src3),
846 "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}",
847 [(store (X86shrd (loadi64 addr:$dst), GR64:$src2,
848 (i8 imm:$src3)), addr:$dst)],
855 // Sandy Bridge and newer Intel processors support faster rotates using
856 // SHLD to avoid a partial flag update on the normal rotate instructions.
857 let Predicates = [HasFastSHLDRotate], AddedComplexity = 5 in {
858 def : Pat<(rotl GR32:$src, (i8 imm:$shamt)),
859 (SHLD32rri8 GR32:$src, GR32:$src, imm:$shamt)>;
860 def : Pat<(rotl GR64:$src, (i8 imm:$shamt)),
861 (SHLD64rri8 GR64:$src, GR64:$src, imm:$shamt)>;
864 def ROT32L2R_imm8 : SDNodeXForm<imm, [{
865 // Convert a ROTL shamt to a ROTR shamt on 32-bit integer.
866 return getI8Imm(32 - N->getZExtValue(), SDLoc(N));
869 def ROT64L2R_imm8 : SDNodeXForm<imm, [{
870 // Convert a ROTL shamt to a ROTR shamt on 64-bit integer.
871 return getI8Imm(64 - N->getZExtValue(), SDLoc(N));
874 multiclass bmi_rotate<string asm, RegisterClass RC, X86MemOperand x86memop> {
875 let hasSideEffects = 0 in {
876 def ri : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, u8imm:$src2),
877 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
878 []>, TAXD, VEX, Sched<[WriteShift]>;
880 def mi : Ii8<0xF0, MRMSrcMem, (outs RC:$dst),
881 (ins x86memop:$src1, u8imm:$src2),
882 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
883 []>, TAXD, VEX, Sched<[WriteShiftLd]>;
887 multiclass bmi_shift<string asm, RegisterClass RC, X86MemOperand x86memop> {
888 let hasSideEffects = 0 in {
889 def rr : I<0xF7, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
890 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
891 VEX, Sched<[WriteShift]>;
893 def rm : I<0xF7, MRMSrcMem4VOp3,
894 (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
895 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
896 VEX, Sched<[WriteShiftLd,
898 ReadDefault, ReadDefault, ReadDefault, ReadDefault,
905 let Predicates = [HasBMI2] in {
906 defm RORX32 : bmi_rotate<"rorx{l}", GR32, i32mem>;
907 defm RORX64 : bmi_rotate<"rorx{q}", GR64, i64mem>, VEX_W;
908 defm SARX32 : bmi_shift<"sarx{l}", GR32, i32mem>, T8XS;
909 defm SARX64 : bmi_shift<"sarx{q}", GR64, i64mem>, T8XS, VEX_W;
910 defm SHRX32 : bmi_shift<"shrx{l}", GR32, i32mem>, T8XD;
911 defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8XD, VEX_W;
912 defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8PD;
913 defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8PD, VEX_W;
915 // Prefer RORX which is non-destructive and doesn't update EFLAGS.
916 let AddedComplexity = 10 in {
917 def : Pat<(rotl GR32:$src, (i8 imm:$shamt)),
918 (RORX32ri GR32:$src, (ROT32L2R_imm8 imm:$shamt))>;
919 def : Pat<(rotl GR64:$src, (i8 imm:$shamt)),
920 (RORX64ri GR64:$src, (ROT64L2R_imm8 imm:$shamt))>;
923 def : Pat<(rotl (loadi32 addr:$src), (i8 imm:$shamt)),
924 (RORX32mi addr:$src, (ROT32L2R_imm8 imm:$shamt))>;
925 def : Pat<(rotl (loadi64 addr:$src), (i8 imm:$shamt)),
926 (RORX64mi addr:$src, (ROT64L2R_imm8 imm:$shamt))>;
928 // Prefer SARX/SHRX/SHLX over SAR/SHR/SHL with variable shift BUT not
929 // immedidate shift, i.e. the following code is considered better
938 // shlx %sil, %edi, %esi
941 let AddedComplexity = 1 in {
942 def : Pat<(sra GR32:$src1, GR8:$src2),
943 (SARX32rr GR32:$src1,
945 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
946 def : Pat<(sra GR64:$src1, GR8:$src2),
947 (SARX64rr GR64:$src1,
949 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
951 def : Pat<(srl GR32:$src1, GR8:$src2),
952 (SHRX32rr GR32:$src1,
954 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
955 def : Pat<(srl GR64:$src1, GR8:$src2),
956 (SHRX64rr GR64:$src1,
958 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
960 def : Pat<(shl GR32:$src1, GR8:$src2),
961 (SHLX32rr GR32:$src1,
963 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
964 def : Pat<(shl GR64:$src1, GR8:$src2),
965 (SHLX64rr GR64:$src1,
967 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
970 // Artificially lower the complexity so that we'll favor
977 // shlx %al, (%ecx), %esi
978 let AddedComplexity = -20 in {
979 def : Pat<(sra (loadi32 addr:$src1), GR8:$src2),
980 (SARX32rm addr:$src1,
982 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
983 def : Pat<(sra (loadi64 addr:$src1), GR8:$src2),
984 (SARX64rm addr:$src1,
986 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
988 def : Pat<(srl (loadi32 addr:$src1), GR8:$src2),
989 (SHRX32rm addr:$src1,
991 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
992 def : Pat<(srl (loadi64 addr:$src1), GR8:$src2),
993 (SHRX64rm addr:$src1,
995 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
997 def : Pat<(shl (loadi32 addr:$src1), GR8:$src2),
998 (SHLX32rm addr:$src1,
1000 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1001 def : Pat<(shl (loadi64 addr:$src1), GR8:$src2),
1002 (SHLX64rm addr:$src1,
1004 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;