1 /*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 *===-----------------------------------------------------------------------===
28 #error "SSE4.1 instruction set not enabled"
31 #include <tmmintrin.h>
33 /* SSE4 Rounding macros. */
34 #define _MM_FROUND_TO_NEAREST_INT 0x00
35 #define _MM_FROUND_TO_NEG_INF 0x01
36 #define _MM_FROUND_TO_POS_INF 0x02
37 #define _MM_FROUND_TO_ZERO 0x03
38 #define _MM_FROUND_CUR_DIRECTION 0x04
40 #define _MM_FROUND_RAISE_EXC 0x00
41 #define _MM_FROUND_NO_EXC 0x08
43 #define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
44 #define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
45 #define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
46 #define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
47 #define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
48 #define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
50 #define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
51 #define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
52 #define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
53 #define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
55 #define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
56 #define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
57 #define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
58 #define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
60 #define _mm_round_ps(X, M) __extension__ ({ \
62 (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); })
64 #define _mm_round_ss(X, Y, M) __extension__ ({ \
67 (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); })
69 #define _mm_round_pd(X, M) __extension__ ({ \
71 (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); })
73 #define _mm_round_sd(X, Y, M) __extension__ ({ \
76 (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); })
78 /* SSE4 Packed Blending Intrinsics. */
79 #define _mm_blend_pd(V1, V2, M) __extension__ ({ \
80 __m128d __V1 = (V1); \
81 __m128d __V2 = (V2); \
82 (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, (M)); })
84 #define _mm_blend_ps(V1, V2, M) __extension__ ({ \
87 (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, (M)); })
89 static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
90 _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
92 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
96 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
97 _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
99 return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
103 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
104 _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
106 return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
110 #define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
111 __m128i __V1 = (V1); \
112 __m128i __V2 = (V2); \
113 (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, (M)); })
115 /* SSE4 Dword Multiply Instructions. */
116 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
117 _mm_mullo_epi32 (__m128i __V1, __m128i __V2)
119 return (__m128i) ((__v4si)__V1 * (__v4si)__V2);
122 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
123 _mm_mul_epi32 (__m128i __V1, __m128i __V2)
125 return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
128 /* SSE4 Floating Point Dot Product Instructions. */
129 #define _mm_dp_ps(X, Y, M) __extension__ ({ \
132 (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); })
134 #define _mm_dp_pd(X, Y, M) __extension__ ({\
137 (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); })
139 /* SSE4 Streaming Load Hint Instruction. */
140 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
141 _mm_stream_load_si128 (__m128i *__V)
143 return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V);
146 /* SSE4 Packed Integer Min/Max Instructions. */
147 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
148 _mm_min_epi8 (__m128i __V1, __m128i __V2)
150 return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
153 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
154 _mm_max_epi8 (__m128i __V1, __m128i __V2)
156 return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
159 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
160 _mm_min_epu16 (__m128i __V1, __m128i __V2)
162 return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
165 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
166 _mm_max_epu16 (__m128i __V1, __m128i __V2)
168 return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
171 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
172 _mm_min_epi32 (__m128i __V1, __m128i __V2)
174 return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
177 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
178 _mm_max_epi32 (__m128i __V1, __m128i __V2)
180 return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
183 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
184 _mm_min_epu32 (__m128i __V1, __m128i __V2)
186 return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
189 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
190 _mm_max_epu32 (__m128i __V1, __m128i __V2)
192 return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
195 /* SSE4 Insertion and Extraction from XMM Register Instructions. */
196 #define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
197 #define _mm_extract_ps(X, N) (__extension__ \
198 ({ union { int __i; float __f; } __t; \
199 __v4sf __a = (__v4sf)(X); \
200 __t.__f = __a[(N) & 3]; \
203 /* Miscellaneous insert and extract macros. */
204 /* Extract a single-precision float from X at index N into D. */
205 #define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
208 /* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
209 an index suitable for _mm_insert_ps. */
210 #define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
212 /* Extract a float from X at index N into the first index of the return. */
213 #define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
214 _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
216 /* Insert int into packed integer array at index. */
217 #define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
218 __a[(N) & 15] = (I); \
220 #define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
221 __a[(N) & 3] = (I); \
224 #define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
225 __a[(N) & 1] = (I); \
227 #endif /* __x86_64__ */
229 /* Extract int from packed integer array at index. This returns the element
230 * as a zero extended value, so it is unsigned.
232 #define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
233 (int)(unsigned char) \
235 #define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
238 #define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
240 #endif /* __x86_64 */
242 /* SSE4 128-bit Packed Integer Comparisons. */
243 static __inline__ int __attribute__((__always_inline__, __nodebug__))
244 _mm_testz_si128(__m128i __M, __m128i __V)
246 return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
249 static __inline__ int __attribute__((__always_inline__, __nodebug__))
250 _mm_testc_si128(__m128i __M, __m128i __V)
252 return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
255 static __inline__ int __attribute__((__always_inline__, __nodebug__))
256 _mm_testnzc_si128(__m128i __M, __m128i __V)
258 return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
261 #define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
262 #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
263 #define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
265 /* SSE4 64-bit Packed Integer Comparisons. */
266 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
267 _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
269 return (__m128i)((__v2di)__V1 == (__v2di)__V2);
272 /* SSE4 Packed Integer Sign-Extension. */
273 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
274 _mm_cvtepi8_epi16(__m128i __V)
276 return (__m128i) __builtin_ia32_pmovsxbw128((__v16qi) __V);
279 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
280 _mm_cvtepi8_epi32(__m128i __V)
282 return (__m128i) __builtin_ia32_pmovsxbd128((__v16qi) __V);
285 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
286 _mm_cvtepi8_epi64(__m128i __V)
288 return (__m128i) __builtin_ia32_pmovsxbq128((__v16qi) __V);
291 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
292 _mm_cvtepi16_epi32(__m128i __V)
294 return (__m128i) __builtin_ia32_pmovsxwd128((__v8hi) __V);
297 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
298 _mm_cvtepi16_epi64(__m128i __V)
300 return (__m128i) __builtin_ia32_pmovsxwq128((__v8hi)__V);
303 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
304 _mm_cvtepi32_epi64(__m128i __V)
306 return (__m128i) __builtin_ia32_pmovsxdq128((__v4si)__V);
309 /* SSE4 Packed Integer Zero-Extension. */
310 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
311 _mm_cvtepu8_epi16(__m128i __V)
313 return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V);
316 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
317 _mm_cvtepu8_epi32(__m128i __V)
319 return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V);
322 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
323 _mm_cvtepu8_epi64(__m128i __V)
325 return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V);
328 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
329 _mm_cvtepu16_epi32(__m128i __V)
331 return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V);
334 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
335 _mm_cvtepu16_epi64(__m128i __V)
337 return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V);
340 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
341 _mm_cvtepu32_epi64(__m128i __V)
343 return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V);
346 /* SSE4 Pack with Unsigned Saturation. */
347 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
348 _mm_packus_epi32(__m128i __V1, __m128i __V2)
350 return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
353 /* SSE4 Multiple Packed Sums of Absolute Difference. */
354 #define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
357 (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); })
359 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
360 _mm_minpos_epu16(__m128i __V)
362 return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
365 /* These definitions are normally in nmmintrin.h, but gcc puts them in here
366 so we'll do the same. */
369 /* These specify the type of data that we're comparing. */
370 #define _SIDD_UBYTE_OPS 0x00
371 #define _SIDD_UWORD_OPS 0x01
372 #define _SIDD_SBYTE_OPS 0x02
373 #define _SIDD_SWORD_OPS 0x03
375 /* These specify the type of comparison operation. */
376 #define _SIDD_CMP_EQUAL_ANY 0x00
377 #define _SIDD_CMP_RANGES 0x04
378 #define _SIDD_CMP_EQUAL_EACH 0x08
379 #define _SIDD_CMP_EQUAL_ORDERED 0x0c
381 /* These macros specify the polarity of the operation. */
382 #define _SIDD_POSITIVE_POLARITY 0x00
383 #define _SIDD_NEGATIVE_POLARITY 0x10
384 #define _SIDD_MASKED_POSITIVE_POLARITY 0x20
385 #define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
387 /* These macros are used in _mm_cmpXstri() to specify the return. */
388 #define _SIDD_LEAST_SIGNIFICANT 0x00
389 #define _SIDD_MOST_SIGNIFICANT 0x40
391 /* These macros are used in _mm_cmpXstri() to specify the return. */
392 #define _SIDD_BIT_MASK 0x00
393 #define _SIDD_UNIT_MASK 0x40
395 /* SSE4.2 Packed Comparison Intrinsics. */
396 #define _mm_cmpistrm(A, B, M) __builtin_ia32_pcmpistrm128((A), (B), (M))
397 #define _mm_cmpistri(A, B, M) __builtin_ia32_pcmpistri128((A), (B), (M))
399 #define _mm_cmpestrm(A, LA, B, LB, M) \
400 __builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M))
401 #define _mm_cmpestri(A, LA, B, LB, M) \
402 __builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M))
404 /* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
405 #define _mm_cmpistra(A, B, M) \
406 __builtin_ia32_pcmpistria128((A), (B), (M))
407 #define _mm_cmpistrc(A, B, M) \
408 __builtin_ia32_pcmpistric128((A), (B), (M))
409 #define _mm_cmpistro(A, B, M) \
410 __builtin_ia32_pcmpistrio128((A), (B), (M))
411 #define _mm_cmpistrs(A, B, M) \
412 __builtin_ia32_pcmpistris128((A), (B), (M))
413 #define _mm_cmpistrz(A, B, M) \
414 __builtin_ia32_pcmpistriz128((A), (B), (M))
416 #define _mm_cmpestra(A, LA, B, LB, M) \
417 __builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M))
418 #define _mm_cmpestrc(A, LA, B, LB, M) \
419 __builtin_ia32_pcmpestric128((A), (LA), (B), (LB), (M))
420 #define _mm_cmpestro(A, LA, B, LB, M) \
421 __builtin_ia32_pcmpestrio128((A), (LA), (B), (LB), (M))
422 #define _mm_cmpestrs(A, LA, B, LB, M) \
423 __builtin_ia32_pcmpestris128((A), (LA), (B), (LB), (M))
424 #define _mm_cmpestrz(A, LA, B, LB, M) \
425 __builtin_ia32_pcmpestriz128((A), (LA), (B), (LB), (M))
427 /* SSE4.2 Compare Packed Data -- Greater Than. */
428 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
429 _mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
431 return (__m128i)((__v2di)__V1 > (__v2di)__V2);
434 /* SSE4.2 Accumulate CRC32. */
435 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
436 _mm_crc32_u8(unsigned int __C, unsigned char __D)
438 return __builtin_ia32_crc32qi(__C, __D);
441 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
442 _mm_crc32_u16(unsigned int __C, unsigned short __D)
444 return __builtin_ia32_crc32hi(__C, __D);
447 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
448 _mm_crc32_u32(unsigned int __C, unsigned int __D)
450 return __builtin_ia32_crc32si(__C, __D);
454 static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
455 _mm_crc32_u64(unsigned long long __C, unsigned long long __D)
457 return __builtin_ia32_crc32di(__C, __D);
459 #endif /* __x86_64__ */
462 #include <popcntintrin.h>
465 #endif /* __SSE4_2__ */
466 #endif /* __SSE4_1__ */
468 #endif /* _SMMINTRIN_H */