1 /*===---- avxintrin.h - AVX intrinsics -------------------------------------===
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 *===-----------------------------------------------------------------------===
25 #error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
31 typedef double __v4df __attribute__ ((__vector_size__ (32)));
32 typedef float __v8sf __attribute__ ((__vector_size__ (32)));
33 typedef long long __v4di __attribute__ ((__vector_size__ (32)));
34 typedef int __v8si __attribute__ ((__vector_size__ (32)));
35 typedef short __v16hi __attribute__ ((__vector_size__ (32)));
36 typedef char __v32qi __attribute__ ((__vector_size__ (32)));
38 typedef float __m256 __attribute__ ((__vector_size__ (32)));
39 typedef double __m256d __attribute__((__vector_size__(32)));
40 typedef long long __m256i __attribute__((__vector_size__(32)));
43 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
44 _mm256_add_pd(__m256d __a, __m256d __b)
49 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
50 _mm256_add_ps(__m256 __a, __m256 __b)
55 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
56 _mm256_sub_pd(__m256d __a, __m256d __b)
61 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
62 _mm256_sub_ps(__m256 __a, __m256 __b)
67 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
68 _mm256_addsub_pd(__m256d __a, __m256d __b)
70 return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b);
73 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
74 _mm256_addsub_ps(__m256 __a, __m256 __b)
76 return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b);
79 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
80 _mm256_div_pd(__m256d __a, __m256d __b)
85 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
86 _mm256_div_ps(__m256 __a, __m256 __b)
91 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
92 _mm256_max_pd(__m256d __a, __m256d __b)
94 return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b);
97 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
98 _mm256_max_ps(__m256 __a, __m256 __b)
100 return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b);
103 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
104 _mm256_min_pd(__m256d __a, __m256d __b)
106 return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b);
109 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
110 _mm256_min_ps(__m256 __a, __m256 __b)
112 return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b);
115 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
116 _mm256_mul_pd(__m256d __a, __m256d __b)
121 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
122 _mm256_mul_ps(__m256 __a, __m256 __b)
127 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
128 _mm256_sqrt_pd(__m256d __a)
130 return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a);
133 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
134 _mm256_sqrt_ps(__m256 __a)
136 return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a);
139 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
140 _mm256_rsqrt_ps(__m256 __a)
142 return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a);
145 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
146 _mm256_rcp_ps(__m256 __a)
148 return (__m256)__builtin_ia32_rcpps256((__v8sf)__a);
151 #define _mm256_round_pd(V, M) __extension__ ({ \
153 (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); })
155 #define _mm256_round_ps(V, M) __extension__ ({ \
157 (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); })
159 #define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
160 #define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
161 #define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
162 #define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
165 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
166 _mm256_and_pd(__m256d __a, __m256d __b)
168 return (__m256d)((__v4di)__a & (__v4di)__b);
171 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
172 _mm256_and_ps(__m256 __a, __m256 __b)
174 return (__m256)((__v8si)__a & (__v8si)__b);
177 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
178 _mm256_andnot_pd(__m256d __a, __m256d __b)
180 return (__m256d)(~(__v4di)__a & (__v4di)__b);
183 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
184 _mm256_andnot_ps(__m256 __a, __m256 __b)
186 return (__m256)(~(__v8si)__a & (__v8si)__b);
189 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
190 _mm256_or_pd(__m256d __a, __m256d __b)
192 return (__m256d)((__v4di)__a | (__v4di)__b);
195 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
196 _mm256_or_ps(__m256 __a, __m256 __b)
198 return (__m256)((__v8si)__a | (__v8si)__b);
201 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
202 _mm256_xor_pd(__m256d __a, __m256d __b)
204 return (__m256d)((__v4di)__a ^ (__v4di)__b);
207 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
208 _mm256_xor_ps(__m256 __a, __m256 __b)
210 return (__m256)((__v8si)__a ^ (__v8si)__b);
213 /* Horizontal arithmetic */
214 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
215 _mm256_hadd_pd(__m256d __a, __m256d __b)
217 return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
220 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
221 _mm256_hadd_ps(__m256 __a, __m256 __b)
223 return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
226 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
227 _mm256_hsub_pd(__m256d __a, __m256d __b)
229 return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
232 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
233 _mm256_hsub_ps(__m256 __a, __m256 __b)
235 return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
238 /* Vector permutations */
239 static __inline __m128d __attribute__((__always_inline__, __nodebug__))
240 _mm_permutevar_pd(__m128d __a, __m128i __c)
242 return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
245 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
246 _mm256_permutevar_pd(__m256d __a, __m256i __c)
248 return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c);
251 static __inline __m128 __attribute__((__always_inline__, __nodebug__))
252 _mm_permutevar_ps(__m128 __a, __m128i __c)
254 return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c);
257 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
258 _mm256_permutevar_ps(__m256 __a, __m256i __c)
260 return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a,
264 #define _mm_permute_pd(A, C) __extension__ ({ \
266 (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \
267 (C) & 0x1, ((C) & 0x2) >> 1); })
269 #define _mm256_permute_pd(A, C) __extension__ ({ \
271 (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \
272 (C) & 0x1, ((C) & 0x2) >> 1, \
273 2 + (((C) & 0x4) >> 2), \
274 2 + (((C) & 0x8) >> 3)); })
276 #define _mm_permute_ps(A, C) __extension__ ({ \
278 (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \
279 (C) & 0x3, ((C) & 0xc) >> 2, \
280 ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
282 #define _mm256_permute_ps(A, C) __extension__ ({ \
284 (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \
285 (C) & 0x3, ((C) & 0xc) >> 2, \
286 ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
287 4 + (((C) & 0x03) >> 0), \
288 4 + (((C) & 0x0c) >> 2), \
289 4 + (((C) & 0x30) >> 4), \
290 4 + (((C) & 0xc0) >> 6)); })
292 #define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
293 __m256d __V1 = (V1); \
294 __m256d __V2 = (V2); \
295 (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)__V1, (__v4df)__V2, (M)); })
297 #define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
298 __m256 __V1 = (V1); \
299 __m256 __V2 = (V2); \
300 (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
302 #define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
303 __m256i __V1 = (V1); \
304 __m256i __V2 = (V2); \
305 (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)__V1, (__v8si)__V2, (M)); })
308 #define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
309 __m256d __V1 = (V1); \
310 __m256d __V2 = (V2); \
311 (__m256d)__builtin_ia32_blendpd256((__v4df)__V1, (__v4df)__V2, (M)); })
313 #define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
314 __m256 __V1 = (V1); \
315 __m256 __V2 = (V2); \
316 (__m256)__builtin_ia32_blendps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
318 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
319 _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
321 return (__m256d)__builtin_ia32_blendvpd256(
322 (__v4df)__a, (__v4df)__b, (__v4df)__c);
325 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
326 _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
328 return (__m256)__builtin_ia32_blendvps256(
329 (__v8sf)__a, (__v8sf)__b, (__v8sf)__c);
332 /* Vector Dot Product */
333 #define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
334 __m256 __V1 = (V1); \
335 __m256 __V2 = (V2); \
336 (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
339 #define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
342 (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \
343 (mask) & 0x3, ((mask) & 0xc) >> 2, \
344 (((mask) & 0x30) >> 4) + 8, (((mask) & 0xc0) >> 6) + 8, \
345 ((mask) & 0x3) + 4, (((mask) & 0xc) >> 2) + 4, \
346 (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); })
348 #define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
351 (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \
353 (((mask) & 0x2) >> 1) + 4, \
354 (((mask) & 0x4) >> 2) + 2, \
355 (((mask) & 0x8) >> 3) + 6); })
358 #define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
359 #define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
360 #define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
361 #define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
362 #define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
363 #define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
364 #define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
365 #define _CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */
366 #define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
367 #define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */
368 #define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
369 #define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
370 #define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
371 #define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
372 #define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
373 #define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
374 #define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
375 #define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
376 #define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
377 #define _CMP_UNORD_S 0x13 /* Unordered (signaling) */
378 #define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
379 #define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
380 #define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */
381 #define _CMP_ORD_S 0x17 /* Ordered (signaling) */
382 #define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
383 #define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */
384 #define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
385 #define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
386 #define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
387 #define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
388 #define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
389 #define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
391 #define _mm_cmp_pd(a, b, c) __extension__ ({ \
394 (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); })
396 #define _mm_cmp_ps(a, b, c) __extension__ ({ \
399 (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); })
401 #define _mm256_cmp_pd(a, b, c) __extension__ ({ \
404 (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); })
406 #define _mm256_cmp_ps(a, b, c) __extension__ ({ \
409 (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); })
411 #define _mm_cmp_sd(a, b, c) __extension__ ({ \
414 (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); })
416 #define _mm_cmp_ss(a, b, c) __extension__ ({ \
419 (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); })
422 #define _mm256_extractf128_pd(A, O) __extension__ ({ \
424 (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)__A, (O)); })
426 #define _mm256_extractf128_ps(A, O) __extension__ ({ \
428 (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)__A, (O)); })
430 #define _mm256_extractf128_si256(A, O) __extension__ ({ \
432 (__m128i)__builtin_ia32_vextractf128_si256((__v8si)__A, (O)); })
434 static __inline int __attribute__((__always_inline__, __nodebug__))
435 _mm256_extract_epi32(__m256i __a, int const __imm)
437 __v8si __b = (__v8si)__a;
438 return __b[__imm & 7];
441 static __inline int __attribute__((__always_inline__, __nodebug__))
442 _mm256_extract_epi16(__m256i __a, int const __imm)
444 __v16hi __b = (__v16hi)__a;
445 return __b[__imm & 15];
448 static __inline int __attribute__((__always_inline__, __nodebug__))
449 _mm256_extract_epi8(__m256i __a, int const __imm)
451 __v32qi __b = (__v32qi)__a;
452 return __b[__imm & 31];
456 static __inline long long __attribute__((__always_inline__, __nodebug__))
457 _mm256_extract_epi64(__m256i __a, const int __imm)
459 __v4di __b = (__v4di)__a;
460 return __b[__imm & 3];
465 #define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \
466 __m256d __V1 = (V1); \
467 __m128d __V2 = (V2); \
468 (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); })
470 #define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \
471 __m256 __V1 = (V1); \
472 __m128 __V2 = (V2); \
473 (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); })
475 #define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \
476 __m256i __V1 = (V1); \
477 __m128i __V2 = (V2); \
478 (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); })
480 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
481 _mm256_insert_epi32(__m256i __a, int __b, int const __imm)
483 __v8si __c = (__v8si)__a;
484 __c[__imm & 7] = __b;
488 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
489 _mm256_insert_epi16(__m256i __a, int __b, int const __imm)
491 __v16hi __c = (__v16hi)__a;
492 __c[__imm & 15] = __b;
496 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
497 _mm256_insert_epi8(__m256i __a, int __b, int const __imm)
499 __v32qi __c = (__v32qi)__a;
500 __c[__imm & 31] = __b;
505 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
506 _mm256_insert_epi64(__m256i __a, int __b, int const __imm)
508 __v4di __c = (__v4di)__a;
509 __c[__imm & 3] = __b;
515 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
516 _mm256_cvtepi32_pd(__m128i __a)
518 return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) __a);
521 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
522 _mm256_cvtepi32_ps(__m256i __a)
524 return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) __a);
527 static __inline __m128 __attribute__((__always_inline__, __nodebug__))
528 _mm256_cvtpd_ps(__m256d __a)
530 return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a);
533 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
534 _mm256_cvtps_epi32(__m256 __a)
536 return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
539 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
540 _mm256_cvtps_pd(__m128 __a)
542 return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) __a);
545 static __inline __m128i __attribute__((__always_inline__, __nodebug__))
546 _mm256_cvttpd_epi32(__m256d __a)
548 return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
551 static __inline __m128i __attribute__((__always_inline__, __nodebug__))
552 _mm256_cvtpd_epi32(__m256d __a)
554 return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
557 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
558 _mm256_cvttps_epi32(__m256 __a)
560 return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a);
563 /* Vector replicate */
564 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
565 _mm256_movehdup_ps(__m256 __a)
567 return __builtin_shufflevector(__a, __a, 1, 1, 3, 3, 5, 5, 7, 7);
570 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
571 _mm256_moveldup_ps(__m256 __a)
573 return __builtin_shufflevector(__a, __a, 0, 0, 2, 2, 4, 4, 6, 6);
576 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
577 _mm256_movedup_pd(__m256d __a)
579 return __builtin_shufflevector(__a, __a, 0, 0, 2, 2);
582 /* Unpack and Interleave */
583 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
584 _mm256_unpackhi_pd(__m256d __a, __m256d __b)
586 return __builtin_shufflevector(__a, __b, 1, 5, 1+2, 5+2);
589 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
590 _mm256_unpacklo_pd(__m256d __a, __m256d __b)
592 return __builtin_shufflevector(__a, __b, 0, 4, 0+2, 4+2);
595 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
596 _mm256_unpackhi_ps(__m256 __a, __m256 __b)
598 return __builtin_shufflevector(__a, __b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
601 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
602 _mm256_unpacklo_ps(__m256 __a, __m256 __b)
604 return __builtin_shufflevector(__a, __b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
608 static __inline int __attribute__((__always_inline__, __nodebug__))
609 _mm_testz_pd(__m128d __a, __m128d __b)
611 return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
614 static __inline int __attribute__((__always_inline__, __nodebug__))
615 _mm_testc_pd(__m128d __a, __m128d __b)
617 return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
620 static __inline int __attribute__((__always_inline__, __nodebug__))
621 _mm_testnzc_pd(__m128d __a, __m128d __b)
623 return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
626 static __inline int __attribute__((__always_inline__, __nodebug__))
627 _mm_testz_ps(__m128 __a, __m128 __b)
629 return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
632 static __inline int __attribute__((__always_inline__, __nodebug__))
633 _mm_testc_ps(__m128 __a, __m128 __b)
635 return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
638 static __inline int __attribute__((__always_inline__, __nodebug__))
639 _mm_testnzc_ps(__m128 __a, __m128 __b)
641 return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
644 static __inline int __attribute__((__always_inline__, __nodebug__))
645 _mm256_testz_pd(__m256d __a, __m256d __b)
647 return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
650 static __inline int __attribute__((__always_inline__, __nodebug__))
651 _mm256_testc_pd(__m256d __a, __m256d __b)
653 return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
656 static __inline int __attribute__((__always_inline__, __nodebug__))
657 _mm256_testnzc_pd(__m256d __a, __m256d __b)
659 return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
662 static __inline int __attribute__((__always_inline__, __nodebug__))
663 _mm256_testz_ps(__m256 __a, __m256 __b)
665 return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
668 static __inline int __attribute__((__always_inline__, __nodebug__))
669 _mm256_testc_ps(__m256 __a, __m256 __b)
671 return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
674 static __inline int __attribute__((__always_inline__, __nodebug__))
675 _mm256_testnzc_ps(__m256 __a, __m256 __b)
677 return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
680 static __inline int __attribute__((__always_inline__, __nodebug__))
681 _mm256_testz_si256(__m256i __a, __m256i __b)
683 return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
686 static __inline int __attribute__((__always_inline__, __nodebug__))
687 _mm256_testc_si256(__m256i __a, __m256i __b)
689 return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
692 static __inline int __attribute__((__always_inline__, __nodebug__))
693 _mm256_testnzc_si256(__m256i __a, __m256i __b)
695 return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b);
698 /* Vector extract sign mask */
699 static __inline int __attribute__((__always_inline__, __nodebug__))
700 _mm256_movemask_pd(__m256d __a)
702 return __builtin_ia32_movmskpd256((__v4df)__a);
705 static __inline int __attribute__((__always_inline__, __nodebug__))
706 _mm256_movemask_ps(__m256 __a)
708 return __builtin_ia32_movmskps256((__v8sf)__a);
712 static __inline void __attribute__((__always_inline__, __nodebug__))
715 __builtin_ia32_vzeroall();
718 static __inline void __attribute__((__always_inline__, __nodebug__))
719 _mm256_zeroupper(void)
721 __builtin_ia32_vzeroupper();
724 /* Vector load with broadcast */
725 static __inline __m128 __attribute__((__always_inline__, __nodebug__))
726 _mm_broadcast_ss(float const *__a)
728 return (__m128)__builtin_ia32_vbroadcastss(__a);
731 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
732 _mm256_broadcast_sd(double const *__a)
734 return (__m256d)__builtin_ia32_vbroadcastsd256(__a);
737 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
738 _mm256_broadcast_ss(float const *__a)
740 return (__m256)__builtin_ia32_vbroadcastss256(__a);
743 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
744 _mm256_broadcast_pd(__m128d const *__a)
746 return (__m256d)__builtin_ia32_vbroadcastf128_pd256(__a);
749 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
750 _mm256_broadcast_ps(__m128 const *__a)
752 return (__m256)__builtin_ia32_vbroadcastf128_ps256(__a);
756 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
757 _mm256_load_pd(double const *__p)
759 return *(__m256d *)__p;
762 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
763 _mm256_load_ps(float const *__p)
765 return *(__m256 *)__p;
768 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
769 _mm256_loadu_pd(double const *__p)
773 } __attribute__((packed, may_alias));
774 return ((struct __loadu_pd*)__p)->__v;
777 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
778 _mm256_loadu_ps(float const *__p)
782 } __attribute__((packed, may_alias));
783 return ((struct __loadu_ps*)__p)->__v;
786 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
787 _mm256_load_si256(__m256i const *__p)
792 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
793 _mm256_loadu_si256(__m256i const *__p)
795 struct __loadu_si256 {
797 } __attribute__((packed, may_alias));
798 return ((struct __loadu_si256*)__p)->__v;
801 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
802 _mm256_lddqu_si256(__m256i const *__p)
804 return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
808 static __inline void __attribute__((__always_inline__, __nodebug__))
809 _mm256_store_pd(double *__p, __m256d __a)
811 *(__m256d *)__p = __a;
814 static __inline void __attribute__((__always_inline__, __nodebug__))
815 _mm256_store_ps(float *__p, __m256 __a)
817 *(__m256 *)__p = __a;
820 static __inline void __attribute__((__always_inline__, __nodebug__))
821 _mm256_storeu_pd(double *__p, __m256d __a)
823 __builtin_ia32_storeupd256(__p, (__v4df)__a);
826 static __inline void __attribute__((__always_inline__, __nodebug__))
827 _mm256_storeu_ps(float *__p, __m256 __a)
829 __builtin_ia32_storeups256(__p, (__v8sf)__a);
832 static __inline void __attribute__((__always_inline__, __nodebug__))
833 _mm256_store_si256(__m256i *__p, __m256i __a)
838 static __inline void __attribute__((__always_inline__, __nodebug__))
839 _mm256_storeu_si256(__m256i *__p, __m256i __a)
841 __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a);
844 /* Conditional load ops */
845 static __inline __m128d __attribute__((__always_inline__, __nodebug__))
846 _mm_maskload_pd(double const *__p, __m128d __m)
848 return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2df)__m);
851 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
852 _mm256_maskload_pd(double const *__p, __m256d __m)
854 return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p,
858 static __inline __m128 __attribute__((__always_inline__, __nodebug__))
859 _mm_maskload_ps(float const *__p, __m128 __m)
861 return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4sf)__m);
864 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
865 _mm256_maskload_ps(float const *__p, __m256 __m)
867 return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8sf)__m);
870 /* Conditional store ops */
871 static __inline void __attribute__((__always_inline__, __nodebug__))
872 _mm256_maskstore_ps(float *__p, __m256 __m, __m256 __a)
874 __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8sf)__m, (__v8sf)__a);
877 static __inline void __attribute__((__always_inline__, __nodebug__))
878 _mm_maskstore_pd(double *__p, __m128d __m, __m128d __a)
880 __builtin_ia32_maskstorepd((__v2df *)__p, (__v2df)__m, (__v2df)__a);
883 static __inline void __attribute__((__always_inline__, __nodebug__))
884 _mm256_maskstore_pd(double *__p, __m256d __m, __m256d __a)
886 __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4df)__m, (__v4df)__a);
889 static __inline void __attribute__((__always_inline__, __nodebug__))
890 _mm_maskstore_ps(float *__p, __m128 __m, __m128 __a)
892 __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4sf)__m, (__v4sf)__a);
895 /* Cacheability support ops */
896 static __inline void __attribute__((__always_inline__, __nodebug__))
897 _mm256_stream_si256(__m256i *__a, __m256i __b)
899 __builtin_ia32_movntdq256((__v4di *)__a, (__v4di)__b);
902 static __inline void __attribute__((__always_inline__, __nodebug__))
903 _mm256_stream_pd(double *__a, __m256d __b)
905 __builtin_ia32_movntpd256(__a, (__v4df)__b);
908 static __inline void __attribute__((__always_inline__, __nodebug__))
909 _mm256_stream_ps(float *__p, __m256 __a)
911 __builtin_ia32_movntps256(__p, (__v8sf)__a);
915 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
916 _mm256_set_pd(double __a, double __b, double __c, double __d)
918 return (__m256d){ __d, __c, __b, __a };
921 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
922 _mm256_set_ps(float __a, float __b, float __c, float __d,
923 float __e, float __f, float __g, float __h)
925 return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
928 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
929 _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
930 int __i4, int __i5, int __i6, int __i7)
932 return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
935 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
936 _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
937 short __w11, short __w10, short __w09, short __w08,
938 short __w07, short __w06, short __w05, short __w04,
939 short __w03, short __w02, short __w01, short __w00)
941 return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06,
942 __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
945 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
946 _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
947 char __b27, char __b26, char __b25, char __b24,
948 char __b23, char __b22, char __b21, char __b20,
949 char __b19, char __b18, char __b17, char __b16,
950 char __b15, char __b14, char __b13, char __b12,
951 char __b11, char __b10, char __b09, char __b08,
952 char __b07, char __b06, char __b05, char __b04,
953 char __b03, char __b02, char __b01, char __b00)
955 return (__m256i)(__v32qi){
956 __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07,
957 __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15,
958 __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23,
959 __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31
963 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
964 _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
966 return (__m256i)(__v4di){ __d, __c, __b, __a };
969 /* Create vectors with elements in reverse order */
970 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
971 _mm256_setr_pd(double __a, double __b, double __c, double __d)
973 return (__m256d){ __a, __b, __c, __d };
976 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
977 _mm256_setr_ps(float __a, float __b, float __c, float __d,
978 float __e, float __f, float __g, float __h)
980 return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h };
983 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
984 _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
985 int __i4, int __i5, int __i6, int __i7)
987 return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 };
990 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
991 _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
992 short __w11, short __w10, short __w09, short __w08,
993 short __w07, short __w06, short __w05, short __w04,
994 short __w03, short __w02, short __w01, short __w00)
996 return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09,
997 __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 };
1000 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1001 _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
1002 char __b27, char __b26, char __b25, char __b24,
1003 char __b23, char __b22, char __b21, char __b20,
1004 char __b19, char __b18, char __b17, char __b16,
1005 char __b15, char __b14, char __b13, char __b12,
1006 char __b11, char __b10, char __b09, char __b08,
1007 char __b07, char __b06, char __b05, char __b04,
1008 char __b03, char __b02, char __b01, char __b00)
1010 return (__m256i)(__v32qi){
1011 __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24,
1012 __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16,
1013 __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08,
1014 __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 };
1017 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1018 _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
1020 return (__m256i)(__v4di){ __a, __b, __c, __d };
1023 /* Create vectors with repeated elements */
1024 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
1025 _mm256_set1_pd(double __w)
1027 return (__m256d){ __w, __w, __w, __w };
1030 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
1031 _mm256_set1_ps(float __w)
1033 return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w };
1036 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1037 _mm256_set1_epi32(int __i)
1039 return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i };
1042 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1043 _mm256_set1_epi16(short __w)
1045 return (__m256i)(__v16hi){ __w, __w, __w, __w, __w, __w, __w, __w, __w, __w,
1046 __w, __w, __w, __w, __w, __w };
1049 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1050 _mm256_set1_epi8(char __b)
1052 return (__m256i)(__v32qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
1053 __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
1054 __b, __b, __b, __b, __b, __b, __b };
1057 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1058 _mm256_set1_epi64x(long long __q)
1060 return (__m256i)(__v4di){ __q, __q, __q, __q };
1063 /* Create __zeroed vectors */
1064 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
1065 _mm256_setzero_pd(void)
1067 return (__m256d){ 0, 0, 0, 0 };
1070 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
1071 _mm256_setzero_ps(void)
1073 return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
1076 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1077 _mm256_setzero_si256(void)
1079 return (__m256i){ 0LL, 0LL, 0LL, 0LL };
1082 /* Cast between vector types */
1083 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
1084 _mm256_castpd_ps(__m256d __a)
1089 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1090 _mm256_castpd_si256(__m256d __a)
1092 return (__m256i)__a;
1095 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
1096 _mm256_castps_pd(__m256 __a)
1098 return (__m256d)__a;
1101 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1102 _mm256_castps_si256(__m256 __a)
1104 return (__m256i)__a;
1107 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
1108 _mm256_castsi256_ps(__m256i __a)
1113 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
1114 _mm256_castsi256_pd(__m256i __a)
1116 return (__m256d)__a;
1119 static __inline __m128d __attribute__((__always_inline__, __nodebug__))
1120 _mm256_castpd256_pd128(__m256d __a)
1122 return __builtin_shufflevector(__a, __a, 0, 1);
1125 static __inline __m128 __attribute__((__always_inline__, __nodebug__))
1126 _mm256_castps256_ps128(__m256 __a)
1128 return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
1131 static __inline __m128i __attribute__((__always_inline__, __nodebug__))
1132 _mm256_castsi256_si128(__m256i __a)
1134 return __builtin_shufflevector(__a, __a, 0, 1);
1137 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
1138 _mm256_castpd128_pd256(__m128d __a)
1140 return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
1143 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
1144 _mm256_castps128_ps256(__m128 __a)
1146 return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
1149 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1150 _mm256_castsi128_si256(__m128i __a)
1152 return __builtin_shufflevector(__a, __a, 0, 1, -1, -1);
1155 /* SIMD load ops (unaligned) */
1156 static __inline __m256 __attribute__((__always_inline__, __nodebug__))
1157 _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
1161 } __attribute__((__packed__, __may_alias__));
1163 __m256 __v256 = _mm256_castps128_ps256(((struct __loadu_ps*)__addr_lo)->__v);
1164 return _mm256_insertf128_ps(__v256, ((struct __loadu_ps*)__addr_hi)->__v, 1);
1167 static __inline __m256d __attribute__((__always_inline__, __nodebug__))
1168 _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
1172 } __attribute__((__packed__, __may_alias__));
1174 __m256d __v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)__addr_lo)->__v);
1175 return _mm256_insertf128_pd(__v256, ((struct __loadu_pd*)__addr_hi)->__v, 1);
1178 static __inline __m256i __attribute__((__always_inline__, __nodebug__))
1179 _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
1181 struct __loadu_si128 {
1183 } __attribute__((packed, may_alias));
1184 __m256i __v256 = _mm256_castsi128_si256(
1185 ((struct __loadu_si128*)__addr_lo)->__v);
1186 return _mm256_insertf128_si256(__v256,
1187 ((struct __loadu_si128*)__addr_hi)->__v, 1);
1190 /* SIMD store ops (unaligned) */
1191 static __inline void __attribute__((__always_inline__, __nodebug__))
1192 _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
1196 __v128 = _mm256_castps256_ps128(__a);
1197 __builtin_ia32_storeups(__addr_lo, __v128);
1198 __v128 = _mm256_extractf128_ps(__a, 1);
1199 __builtin_ia32_storeups(__addr_hi, __v128);
1202 static __inline void __attribute__((__always_inline__, __nodebug__))
1203 _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
1207 __v128 = _mm256_castpd256_pd128(__a);
1208 __builtin_ia32_storeupd(__addr_lo, __v128);
1209 __v128 = _mm256_extractf128_pd(__a, 1);
1210 __builtin_ia32_storeupd(__addr_hi, __v128);
1213 static __inline void __attribute__((__always_inline__, __nodebug__))
1214 _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
1218 __v128 = _mm256_castsi256_si128(__a);
1219 __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128);
1220 __v128 = _mm256_extractf128_si256(__a, 1);
1221 __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128);
1224 #endif /* __AVXINTRIN_H */