1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE
3 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX
7 define void @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4 x i32> %F, <8 x i16> %G, <16 x i8> %H, i64 %I) nounwind {
10 ; X32-SSE-NEXT: pushl %ebp
11 ; X32-SSE-NEXT: movl %esp, %ebp
12 ; X32-SSE-NEXT: pushl %esi
13 ; X32-SSE-NEXT: andl $-16, %esp
14 ; X32-SSE-NEXT: subl $16, %esp
15 ; X32-SSE-NEXT: movl 72(%ebp), %eax
16 ; X32-SSE-NEXT: movl 76(%ebp), %ecx
17 ; X32-SSE-NEXT: movl 12(%ebp), %edx
18 ; X32-SSE-NEXT: movdqa 56(%ebp), %xmm3
19 ; X32-SSE-NEXT: movdqa 40(%ebp), %xmm4
20 ; X32-SSE-NEXT: movdqa 24(%ebp), %xmm5
21 ; X32-SSE-NEXT: movl 8(%ebp), %esi
22 ; X32-SSE-NEXT: addps .LCPI0_0, %xmm0
23 ; X32-SSE-NEXT: movntps %xmm0, (%esi)
24 ; X32-SSE-NEXT: paddq .LCPI0_1, %xmm2
25 ; X32-SSE-NEXT: movntdq %xmm2, (%esi)
26 ; X32-SSE-NEXT: addpd .LCPI0_2, %xmm1
27 ; X32-SSE-NEXT: movntpd %xmm1, (%esi)
28 ; X32-SSE-NEXT: paddd .LCPI0_3, %xmm5
29 ; X32-SSE-NEXT: movntdq %xmm5, (%esi)
30 ; X32-SSE-NEXT: paddw .LCPI0_4, %xmm4
31 ; X32-SSE-NEXT: movntdq %xmm4, (%esi)
32 ; X32-SSE-NEXT: paddb .LCPI0_5, %xmm3
33 ; X32-SSE-NEXT: movntdq %xmm3, (%esi)
34 ; X32-SSE-NEXT: movntil %edx, (%esi)
35 ; X32-SSE-NEXT: movntil %ecx, 4(%esi)
36 ; X32-SSE-NEXT: movntil %eax, (%esi)
37 ; X32-SSE-NEXT: leal -4(%ebp), %esp
38 ; X32-SSE-NEXT: popl %esi
39 ; X32-SSE-NEXT: popl %ebp
44 ; X32-AVX-NEXT: pushl %ebp
45 ; X32-AVX-NEXT: movl %esp, %ebp
46 ; X32-AVX-NEXT: pushl %esi
47 ; X32-AVX-NEXT: andl $-16, %esp
48 ; X32-AVX-NEXT: subl $16, %esp
49 ; X32-AVX-NEXT: movl 72(%ebp), %eax
50 ; X32-AVX-NEXT: movl 76(%ebp), %ecx
51 ; X32-AVX-NEXT: movl 12(%ebp), %edx
52 ; X32-AVX-NEXT: vmovdqa 56(%ebp), %xmm3
53 ; X32-AVX-NEXT: vmovdqa 40(%ebp), %xmm4
54 ; X32-AVX-NEXT: vmovdqa 24(%ebp), %xmm5
55 ; X32-AVX-NEXT: movl 8(%ebp), %esi
56 ; X32-AVX-NEXT: vaddps .LCPI0_0, %xmm0, %xmm0
57 ; X32-AVX-NEXT: vmovntps %xmm0, (%esi)
58 ; X32-AVX-NEXT: vpaddq .LCPI0_1, %xmm2, %xmm0
59 ; X32-AVX-NEXT: vmovntdq %xmm0, (%esi)
60 ; X32-AVX-NEXT: vaddpd .LCPI0_2, %xmm1, %xmm0
61 ; X32-AVX-NEXT: vmovntpd %xmm0, (%esi)
62 ; X32-AVX-NEXT: vpaddd .LCPI0_3, %xmm5, %xmm0
63 ; X32-AVX-NEXT: vmovntdq %xmm0, (%esi)
64 ; X32-AVX-NEXT: vpaddw .LCPI0_4, %xmm4, %xmm0
65 ; X32-AVX-NEXT: vmovntdq %xmm0, (%esi)
66 ; X32-AVX-NEXT: vpaddb .LCPI0_5, %xmm3, %xmm0
67 ; X32-AVX-NEXT: vmovntdq %xmm0, (%esi)
68 ; X32-AVX-NEXT: movntil %edx, (%esi)
69 ; X32-AVX-NEXT: movntil %ecx, 4(%esi)
70 ; X32-AVX-NEXT: movntil %eax, (%esi)
71 ; X32-AVX-NEXT: leal -4(%ebp), %esp
72 ; X32-AVX-NEXT: popl %esi
73 ; X32-AVX-NEXT: popl %ebp
78 ; X64-SSE-NEXT: addps {{.*}}(%rip), %xmm0
79 ; X64-SSE-NEXT: movntps %xmm0, (%rdi)
80 ; X64-SSE-NEXT: paddq {{.*}}(%rip), %xmm2
81 ; X64-SSE-NEXT: movntdq %xmm2, (%rdi)
82 ; X64-SSE-NEXT: addpd {{.*}}(%rip), %xmm1
83 ; X64-SSE-NEXT: movntpd %xmm1, (%rdi)
84 ; X64-SSE-NEXT: paddd {{.*}}(%rip), %xmm3
85 ; X64-SSE-NEXT: movntdq %xmm3, (%rdi)
86 ; X64-SSE-NEXT: paddw {{.*}}(%rip), %xmm4
87 ; X64-SSE-NEXT: movntdq %xmm4, (%rdi)
88 ; X64-SSE-NEXT: paddb {{.*}}(%rip), %xmm5
89 ; X64-SSE-NEXT: movntdq %xmm5, (%rdi)
90 ; X64-SSE-NEXT: movntil %esi, (%rdi)
91 ; X64-SSE-NEXT: movntiq %rdx, (%rdi)
96 ; X64-AVX-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
97 ; X64-AVX-NEXT: vmovntps %xmm0, (%rdi)
98 ; X64-AVX-NEXT: vpaddq {{.*}}(%rip), %xmm2, %xmm0
99 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
100 ; X64-AVX-NEXT: vaddpd {{.*}}(%rip), %xmm1, %xmm0
101 ; X64-AVX-NEXT: vmovntpd %xmm0, (%rdi)
102 ; X64-AVX-NEXT: vpaddd {{.*}}(%rip), %xmm3, %xmm0
103 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
104 ; X64-AVX-NEXT: vpaddw {{.*}}(%rip), %xmm4, %xmm0
105 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
106 ; X64-AVX-NEXT: vpaddb {{.*}}(%rip), %xmm5, %xmm0
107 ; X64-AVX-NEXT: vmovntdq %xmm0, (%rdi)
108 ; X64-AVX-NEXT: movntil %esi, (%rdi)
109 ; X64-AVX-NEXT: movntiq %rdx, (%rdi)
111 %cast = bitcast i8* %B to <4 x float>*
112 %A2 = fadd <4 x float> %A, <float 1.0, float 2.0, float 3.0, float 4.0>
113 store <4 x float> %A2, <4 x float>* %cast, align 16, !nontemporal !0
114 %cast1 = bitcast i8* %B to <2 x i64>*
115 %E2 = add <2 x i64> %E, <i64 1, i64 2>
116 store <2 x i64> %E2, <2 x i64>* %cast1, align 16, !nontemporal !0
117 %cast2 = bitcast i8* %B to <2 x double>*
118 %C2 = fadd <2 x double> %C, <double 1.0, double 2.0>
119 store <2 x double> %C2, <2 x double>* %cast2, align 16, !nontemporal !0
120 %cast3 = bitcast i8* %B to <4 x i32>*
121 %F2 = add <4 x i32> %F, <i32 1, i32 2, i32 3, i32 4>
122 store <4 x i32> %F2, <4 x i32>* %cast3, align 16, !nontemporal !0
123 %cast4 = bitcast i8* %B to <8 x i16>*
124 %G2 = add <8 x i16> %G, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
125 store <8 x i16> %G2, <8 x i16>* %cast4, align 16, !nontemporal !0
126 %cast5 = bitcast i8* %B to <16 x i8>*
127 %H2 = add <16 x i8> %H, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
128 store <16 x i8> %H2, <16 x i8>* %cast5, align 16, !nontemporal !0
129 %cast6 = bitcast i8* %B to i32*
130 store i32 %D, i32* %cast6, align 1, !nontemporal !0
131 %cast7 = bitcast i8* %B to i64*
132 store i64 %I, i64* %cast7, align 1, !nontemporal !0