1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512F
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
5 define <16 x i32> @_inreg16xi32(i32 %a) {
6 ; ALL-LABEL: _inreg16xi32:
8 ; ALL-NEXT: vpbroadcastd %edi, %zmm0
10 %b = insertelement <16 x i32> undef, i32 %a, i32 0
11 %c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
15 define <8 x i64> @_inreg8xi64(i64 %a) {
16 ; ALL-LABEL: _inreg8xi64:
18 ; ALL-NEXT: vpbroadcastq %rdi, %zmm0
20 %b = insertelement <8 x i64> undef, i64 %a, i32 0
21 %c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
25 define <16 x float> @_ss16xfloat_v4(<4 x float> %a) {
26 ; ALL-LABEL: _ss16xfloat_v4:
28 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
30 %b = shufflevector <4 x float> %a, <4 x float> undef, <16 x i32> zeroinitializer
34 define <16 x float> @_inreg16xfloat(float %a) {
35 ; ALL-LABEL: _inreg16xfloat:
37 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
39 %b = insertelement <16 x float> undef, float %a, i32 0
40 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
44 define <16 x float> @_ss16xfloat_mask(float %a, <16 x float> %i, <16 x i32> %mask1) {
45 ; ALL-LABEL: _ss16xfloat_mask:
47 ; ALL-NEXT: vpxord %zmm3, %zmm3, %zmm3
48 ; ALL-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
49 ; ALL-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
50 ; ALL-NEXT: vmovaps %zmm1, %zmm0
52 %mask = icmp ne <16 x i32> %mask1, zeroinitializer
53 %b = insertelement <16 x float> undef, float %a, i32 0
54 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
55 %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> %i
59 define <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
60 ; ALL-LABEL: _ss16xfloat_maskz:
62 ; ALL-NEXT: vpxord %zmm2, %zmm2, %zmm2
63 ; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
64 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
66 %mask = icmp ne <16 x i32> %mask1, zeroinitializer
67 %b = insertelement <16 x float> undef, float %a, i32 0
68 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
69 %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> zeroinitializer
73 define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
74 ; ALL-LABEL: _ss16xfloat_load:
76 ; ALL-NEXT: vbroadcastss (%rdi), %zmm0
78 %a = load float, float* %a.ptr
79 %b = insertelement <16 x float> undef, float %a, i32 0
80 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
84 define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
85 ; ALL-LABEL: _ss16xfloat_mask_load:
87 ; ALL-NEXT: vpxord %zmm2, %zmm2, %zmm2
88 ; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
89 ; ALL-NEXT: vbroadcastss (%rdi), %zmm0 {%k1}
91 %a = load float, float* %a.ptr
92 %mask = icmp ne <16 x i32> %mask1, zeroinitializer
93 %b = insertelement <16 x float> undef, float %a, i32 0
94 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
95 %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> %i
99 define <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
100 ; ALL-LABEL: _ss16xfloat_maskz_load:
102 ; ALL-NEXT: vpxord %zmm1, %zmm1, %zmm1
103 ; ALL-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
104 ; ALL-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z}
106 %a = load float, float* %a.ptr
107 %mask = icmp ne <16 x i32> %mask1, zeroinitializer
108 %b = insertelement <16 x float> undef, float %a, i32 0
109 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
110 %r = select <16 x i1> %mask, <16 x float> %c, <16 x float> zeroinitializer
114 define <8 x double> @_inreg8xdouble(double %a) {
115 ; ALL-LABEL: _inreg8xdouble:
117 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
119 %b = insertelement <8 x double> undef, double %a, i32 0
120 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
124 define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
125 ; ALL-LABEL: _sd8xdouble_mask:
127 ; ALL-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
128 ; ALL-NEXT: vpxor %ymm3, %ymm3, %ymm3
129 ; ALL-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
130 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
131 ; ALL-NEXT: vmovaps %zmm1, %zmm0
133 %mask = icmp ne <8 x i32> %mask1, zeroinitializer
134 %b = insertelement <8 x double> undef, double %a, i32 0
135 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
136 %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> %i
140 define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
141 ; ALL-LABEL: _sd8xdouble_maskz:
143 ; ALL-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
144 ; ALL-NEXT: vpxor %ymm2, %ymm2, %ymm2
145 ; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
146 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
148 %mask = icmp ne <8 x i32> %mask1, zeroinitializer
149 %b = insertelement <8 x double> undef, double %a, i32 0
150 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
151 %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> zeroinitializer
155 define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
156 ; ALL-LABEL: _sd8xdouble_load:
158 ; ALL-NEXT: vbroadcastsd (%rdi), %zmm0
160 %a = load double, double* %a.ptr
161 %b = insertelement <8 x double> undef, double %a, i32 0
162 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
166 define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
167 ; ALL-LABEL: _sd8xdouble_mask_load:
169 ; ALL-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
170 ; ALL-NEXT: vpxor %ymm2, %ymm2, %ymm2
171 ; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
172 ; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
174 %a = load double, double* %a.ptr
175 %mask = icmp ne <8 x i32> %mask1, zeroinitializer
176 %b = insertelement <8 x double> undef, double %a, i32 0
177 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
178 %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> %i
182 define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
183 ; ALL-LABEL: _sd8xdouble_maskz_load:
185 ; ALL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
186 ; ALL-NEXT: vpxor %ymm1, %ymm1, %ymm1
187 ; ALL-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
188 ; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
190 %a = load double, double* %a.ptr
191 %mask = icmp ne <8 x i32> %mask1, zeroinitializer
192 %b = insertelement <8 x double> undef, double %a, i32 0
193 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
194 %r = select <8 x i1> %mask, <8 x double> %c, <8 x double> zeroinitializer
198 define <16 x i32> @_xmm16xi32(<16 x i32> %a) {
199 ; ALL-LABEL: _xmm16xi32:
201 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
203 %b = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> zeroinitializer
207 define <16 x float> @_xmm16xfloat(<16 x float> %a) {
208 ; ALL-LABEL: _xmm16xfloat:
210 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
212 %b = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> zeroinitializer
216 define <16 x i32> @test_vbroadcast() {
217 ; ALL-LABEL: test_vbroadcast:
218 ; ALL: # BB#0: # %entry
219 ; ALL-NEXT: vpxord %zmm0, %zmm0, %zmm0
220 ; ALL-NEXT: vcmpunordps %zmm0, %zmm0, %k1
221 ; ALL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
222 ; ALL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
223 ; ALL-NEXT: knotw %k1, %k1
224 ; ALL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
227 %0 = sext <16 x i1> zeroinitializer to <16 x i32>
228 %1 = fcmp uno <16 x float> undef, zeroinitializer
229 %2 = sext <16 x i1> %1 to <16 x i32>
230 %3 = select <16 x i1> %1, <16 x i32> %0, <16 x i32> %2
234 ; We implement the set1 intrinsics with vector initializers. Verify that the
235 ; IR generated will produce broadcasts at the end.
236 define <8 x double> @test_set1_pd(double %d) #2 {
237 ; ALL-LABEL: test_set1_pd:
238 ; ALL: # BB#0: # %entry
239 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
242 %vecinit.i = insertelement <8 x double> undef, double %d, i32 0
243 %vecinit1.i = insertelement <8 x double> %vecinit.i, double %d, i32 1
244 %vecinit2.i = insertelement <8 x double> %vecinit1.i, double %d, i32 2
245 %vecinit3.i = insertelement <8 x double> %vecinit2.i, double %d, i32 3
246 %vecinit4.i = insertelement <8 x double> %vecinit3.i, double %d, i32 4
247 %vecinit5.i = insertelement <8 x double> %vecinit4.i, double %d, i32 5
248 %vecinit6.i = insertelement <8 x double> %vecinit5.i, double %d, i32 6
249 %vecinit7.i = insertelement <8 x double> %vecinit6.i, double %d, i32 7
250 ret <8 x double> %vecinit7.i
253 define <8 x i64> @test_set1_epi64(i64 %d) #2 {
254 ; ALL-LABEL: test_set1_epi64:
255 ; ALL: # BB#0: # %entry
256 ; ALL-NEXT: vpbroadcastq %rdi, %zmm0
259 %vecinit.i = insertelement <8 x i64> undef, i64 %d, i32 0
260 %vecinit1.i = insertelement <8 x i64> %vecinit.i, i64 %d, i32 1
261 %vecinit2.i = insertelement <8 x i64> %vecinit1.i, i64 %d, i32 2
262 %vecinit3.i = insertelement <8 x i64> %vecinit2.i, i64 %d, i32 3
263 %vecinit4.i = insertelement <8 x i64> %vecinit3.i, i64 %d, i32 4
264 %vecinit5.i = insertelement <8 x i64> %vecinit4.i, i64 %d, i32 5
265 %vecinit6.i = insertelement <8 x i64> %vecinit5.i, i64 %d, i32 6
266 %vecinit7.i = insertelement <8 x i64> %vecinit6.i, i64 %d, i32 7
267 ret <8 x i64> %vecinit7.i
270 define <16 x float> @test_set1_ps(float %f) #2 {
271 ; ALL-LABEL: test_set1_ps:
272 ; ALL: # BB#0: # %entry
273 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
276 %vecinit.i = insertelement <16 x float> undef, float %f, i32 0
277 %vecinit1.i = insertelement <16 x float> %vecinit.i, float %f, i32 1
278 %vecinit2.i = insertelement <16 x float> %vecinit1.i, float %f, i32 2
279 %vecinit3.i = insertelement <16 x float> %vecinit2.i, float %f, i32 3
280 %vecinit4.i = insertelement <16 x float> %vecinit3.i, float %f, i32 4
281 %vecinit5.i = insertelement <16 x float> %vecinit4.i, float %f, i32 5
282 %vecinit6.i = insertelement <16 x float> %vecinit5.i, float %f, i32 6
283 %vecinit7.i = insertelement <16 x float> %vecinit6.i, float %f, i32 7
284 %vecinit8.i = insertelement <16 x float> %vecinit7.i, float %f, i32 8
285 %vecinit9.i = insertelement <16 x float> %vecinit8.i, float %f, i32 9
286 %vecinit10.i = insertelement <16 x float> %vecinit9.i, float %f, i32 10
287 %vecinit11.i = insertelement <16 x float> %vecinit10.i, float %f, i32 11
288 %vecinit12.i = insertelement <16 x float> %vecinit11.i, float %f, i32 12
289 %vecinit13.i = insertelement <16 x float> %vecinit12.i, float %f, i32 13
290 %vecinit14.i = insertelement <16 x float> %vecinit13.i, float %f, i32 14
291 %vecinit15.i = insertelement <16 x float> %vecinit14.i, float %f, i32 15
292 ret <16 x float> %vecinit15.i
295 define <16 x i32> @test_set1_epi32(i32 %f) #2 {
296 ; ALL-LABEL: test_set1_epi32:
297 ; ALL: # BB#0: # %entry
298 ; ALL-NEXT: vpbroadcastd %edi, %zmm0
301 %vecinit.i = insertelement <16 x i32> undef, i32 %f, i32 0
302 %vecinit1.i = insertelement <16 x i32> %vecinit.i, i32 %f, i32 1
303 %vecinit2.i = insertelement <16 x i32> %vecinit1.i, i32 %f, i32 2
304 %vecinit3.i = insertelement <16 x i32> %vecinit2.i, i32 %f, i32 3
305 %vecinit4.i = insertelement <16 x i32> %vecinit3.i, i32 %f, i32 4
306 %vecinit5.i = insertelement <16 x i32> %vecinit4.i, i32 %f, i32 5
307 %vecinit6.i = insertelement <16 x i32> %vecinit5.i, i32 %f, i32 6
308 %vecinit7.i = insertelement <16 x i32> %vecinit6.i, i32 %f, i32 7
309 %vecinit8.i = insertelement <16 x i32> %vecinit7.i, i32 %f, i32 8
310 %vecinit9.i = insertelement <16 x i32> %vecinit8.i, i32 %f, i32 9
311 %vecinit10.i = insertelement <16 x i32> %vecinit9.i, i32 %f, i32 10
312 %vecinit11.i = insertelement <16 x i32> %vecinit10.i, i32 %f, i32 11
313 %vecinit12.i = insertelement <16 x i32> %vecinit11.i, i32 %f, i32 12
314 %vecinit13.i = insertelement <16 x i32> %vecinit12.i, i32 %f, i32 13
315 %vecinit14.i = insertelement <16 x i32> %vecinit13.i, i32 %f, i32 14
316 %vecinit15.i = insertelement <16 x i32> %vecinit14.i, i32 %f, i32 15
317 ret <16 x i32> %vecinit15.i
320 ; We implement the scalar broadcast intrinsics with vector initializers.
321 ; Verify that the IR generated will produce the broadcast at the end.
322 define <8 x double> @test_mm512_broadcastsd_pd(<2 x double> %a) {
323 ; ALL-LABEL: test_mm512_broadcastsd_pd:
324 ; ALL: # BB#0: # %entry
325 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
328 %0 = extractelement <2 x double> %a, i32 0
329 %vecinit.i = insertelement <8 x double> undef, double %0, i32 0
330 %vecinit1.i = insertelement <8 x double> %vecinit.i, double %0, i32 1
331 %vecinit2.i = insertelement <8 x double> %vecinit1.i, double %0, i32 2
332 %vecinit3.i = insertelement <8 x double> %vecinit2.i, double %0, i32 3
333 %vecinit4.i = insertelement <8 x double> %vecinit3.i, double %0, i32 4
334 %vecinit5.i = insertelement <8 x double> %vecinit4.i, double %0, i32 5
335 %vecinit6.i = insertelement <8 x double> %vecinit5.i, double %0, i32 6
336 %vecinit7.i = insertelement <8 x double> %vecinit6.i, double %0, i32 7
337 ret <8 x double> %vecinit7.i
340 define <16 x float> @test1(<8 x float>%a) {
343 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
345 %res = shufflevector <8 x float> %a, <8 x float> undef, <16 x i32> zeroinitializer
349 define <8 x double> @test2(<4 x double>%a) {
352 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
354 %res = shufflevector <4 x double> %a, <4 x double> undef, <8 x i32> zeroinitializer
358 define <64 x i8> @_invec32xi8(<32 x i8>%a) {
359 ; AVX512F-LABEL: _invec32xi8:
361 ; AVX512F-NEXT: vpbroadcastb %xmm0, %ymm0
362 ; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
365 ; AVX512BW-LABEL: _invec32xi8:
367 ; AVX512BW-NEXT: vpbroadcastb %xmm0, %zmm0
368 ; AVX512BW-NEXT: retq
369 %res = shufflevector <32 x i8> %a, <32 x i8> undef, <64 x i32> zeroinitializer
373 define <32 x i16> @_invec16xi16(<16 x i16>%a) {
374 ; AVX512F-LABEL: _invec16xi16:
376 ; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0
377 ; AVX512F-NEXT: vmovdqa %ymm0, %ymm1
380 ; AVX512BW-LABEL: _invec16xi16:
382 ; AVX512BW-NEXT: vpbroadcastw %xmm0, %zmm0
383 ; AVX512BW-NEXT: retq
384 %res = shufflevector <16 x i16> %a, <16 x i16> undef, <32 x i32> zeroinitializer
388 define <16 x i32> @_invec8xi32(<8 x i32>%a) {
389 ; ALL-LABEL: _invec8xi32:
391 ; ALL-NEXT: vbroadcastss %xmm0, %zmm0
393 %res = shufflevector <8 x i32> %a, <8 x i32> undef, <16 x i32> zeroinitializer
397 define <8 x i64> @_invec4xi64(<4 x i64>%a) {
398 ; ALL-LABEL: _invec4xi64:
400 ; ALL-NEXT: vbroadcastsd %xmm0, %zmm0
402 %res = shufflevector <4 x i64> %a, <4 x i64> undef, <8 x i32> zeroinitializer
406 declare void @func_f32(float)
407 define <16 x float> @broadcast_ss_spill(float %x) {
408 ; ALL-LABEL: broadcast_ss_spill:
410 ; ALL-NEXT: pushq %rax
412 ; ALL-NEXT: .cfi_def_cfa_offset 16
413 ; ALL-NEXT: vaddss %xmm0, %xmm0, %xmm0
414 ; ALL-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
415 ; ALL-NEXT: callq func_f32
416 ; ALL-NEXT: vbroadcastss {{[0-9]+}}(%rsp), %zmm0 # 4-byte Folded Reload
417 ; ALL-NEXT: popq %rax
419 %a = fadd float %x, %x
420 call void @func_f32(float %a)
421 %b = insertelement <16 x float> undef, float %a, i32 0
422 %c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
426 declare void @func_f64(double)
427 define <8 x double> @broadcast_sd_spill(double %x) {
428 ; ALL-LABEL: broadcast_sd_spill:
430 ; ALL-NEXT: pushq %rax
432 ; ALL-NEXT: .cfi_def_cfa_offset 16
433 ; ALL-NEXT: vaddsd %xmm0, %xmm0, %xmm0
434 ; ALL-NEXT: vmovsd %xmm0, (%rsp) # 8-byte Spill
435 ; ALL-NEXT: callq func_f64
436 ; ALL-NEXT: vbroadcastsd (%rsp), %zmm0 # 8-byte Folded Reload
437 ; ALL-NEXT: popq %rax
439 %a = fadd double %x, %x
440 call void @func_f64(double %a)
441 %b = insertelement <8 x double> undef, double %a, i32 0
442 %c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer