1 /* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
2 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to
18 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19 Boston, MA 02110-1301, USA. */
21 /* As a special exception, if you include this header file into source
22 files compiled by GCC, this header file does not by itself cause
23 the resulting executable to be covered by the GNU General Public
24 License. This exception does not however invalidate any other
25 reasons why the executable file might be covered by the GNU General
28 /* Implemented from the specification included in the Intel C++ Compiler
29 User Guide and Reference, version 9.0. */
31 #ifndef _XMMINTRIN_H_INCLUDED
32 #define _XMMINTRIN_H_INCLUDED
35 # error "SSE instruction set not enabled"
38 /* We need type definitions from the MMX header file. */
41 /* Get _mm_malloc () and _mm_free (). */
43 #include <mm_malloc.h>
46 /* The Intel API is flexible enough that we must allow aliasing with other
47 vector types, and their scalar components. */
48 typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
50 /* Internal data types for implementing the intrinsics. */
51 typedef float __v4sf __attribute__ ((__vector_size__ (16)));
53 /* Create a selector for use with the SHUFPS instruction. */
54 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
55 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
57 /* Constants for use with _mm_prefetch. */
66 /* Bits in the MXCSR. */
67 #define _MM_EXCEPT_MASK 0x003f
68 #define _MM_EXCEPT_INVALID 0x0001
69 #define _MM_EXCEPT_DENORM 0x0002
70 #define _MM_EXCEPT_DIV_ZERO 0x0004
71 #define _MM_EXCEPT_OVERFLOW 0x0008
72 #define _MM_EXCEPT_UNDERFLOW 0x0010
73 #define _MM_EXCEPT_INEXACT 0x0020
75 #define _MM_MASK_MASK 0x1f80
76 #define _MM_MASK_INVALID 0x0080
77 #define _MM_MASK_DENORM 0x0100
78 #define _MM_MASK_DIV_ZERO 0x0200
79 #define _MM_MASK_OVERFLOW 0x0400
80 #define _MM_MASK_UNDERFLOW 0x0800
81 #define _MM_MASK_INEXACT 0x1000
83 #define _MM_ROUND_MASK 0x6000
84 #define _MM_ROUND_NEAREST 0x0000
85 #define _MM_ROUND_DOWN 0x2000
86 #define _MM_ROUND_UP 0x4000
87 #define _MM_ROUND_TOWARD_ZERO 0x6000
89 #define _MM_FLUSH_ZERO_MASK 0x8000
90 #define _MM_FLUSH_ZERO_ON 0x8000
91 #define _MM_FLUSH_ZERO_OFF 0x0000
93 /* Create a vector of zeros. */
94 static __inline __m128 __attribute__((__always_inline__))
97 return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
100 /* Perform the respective operation on the lower SPFP (single-precision
101 floating-point) values of A and B; the upper three SPFP values are
102 passed through from A. */
104 static __inline __m128 __attribute__((__always_inline__))
105 _mm_add_ss (__m128 __A, __m128 __B)
107 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
110 static __inline __m128 __attribute__((__always_inline__))
111 _mm_sub_ss (__m128 __A, __m128 __B)
113 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
116 static __inline __m128 __attribute__((__always_inline__))
117 _mm_mul_ss (__m128 __A, __m128 __B)
119 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
122 static __inline __m128 __attribute__((__always_inline__))
123 _mm_div_ss (__m128 __A, __m128 __B)
125 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
128 static __inline __m128 __attribute__((__always_inline__))
129 _mm_sqrt_ss (__m128 __A)
131 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
134 static __inline __m128 __attribute__((__always_inline__))
135 _mm_rcp_ss (__m128 __A)
137 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
140 static __inline __m128 __attribute__((__always_inline__))
141 _mm_rsqrt_ss (__m128 __A)
143 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
146 static __inline __m128 __attribute__((__always_inline__))
147 _mm_min_ss (__m128 __A, __m128 __B)
149 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
152 static __inline __m128 __attribute__((__always_inline__))
153 _mm_max_ss (__m128 __A, __m128 __B)
155 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
158 /* Perform the respective operation on the four SPFP values in A and B. */
160 static __inline __m128 __attribute__((__always_inline__))
161 _mm_add_ps (__m128 __A, __m128 __B)
163 return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
166 static __inline __m128 __attribute__((__always_inline__))
167 _mm_sub_ps (__m128 __A, __m128 __B)
169 return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
172 static __inline __m128 __attribute__((__always_inline__))
173 _mm_mul_ps (__m128 __A, __m128 __B)
175 return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
178 static __inline __m128 __attribute__((__always_inline__))
179 _mm_div_ps (__m128 __A, __m128 __B)
181 return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
184 static __inline __m128 __attribute__((__always_inline__))
185 _mm_sqrt_ps (__m128 __A)
187 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
190 static __inline __m128 __attribute__((__always_inline__))
191 _mm_rcp_ps (__m128 __A)
193 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
196 static __inline __m128 __attribute__((__always_inline__))
197 _mm_rsqrt_ps (__m128 __A)
199 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
202 static __inline __m128 __attribute__((__always_inline__))
203 _mm_min_ps (__m128 __A, __m128 __B)
205 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
208 static __inline __m128 __attribute__((__always_inline__))
209 _mm_max_ps (__m128 __A, __m128 __B)
211 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
214 /* Perform logical bit-wise operations on 128-bit values. */
216 static __inline __m128 __attribute__((__always_inline__))
217 _mm_and_ps (__m128 __A, __m128 __B)
219 return __builtin_ia32_andps (__A, __B);
222 static __inline __m128 __attribute__((__always_inline__))
223 _mm_andnot_ps (__m128 __A, __m128 __B)
225 return __builtin_ia32_andnps (__A, __B);
228 static __inline __m128 __attribute__((__always_inline__))
229 _mm_or_ps (__m128 __A, __m128 __B)
231 return __builtin_ia32_orps (__A, __B);
234 static __inline __m128 __attribute__((__always_inline__))
235 _mm_xor_ps (__m128 __A, __m128 __B)
237 return __builtin_ia32_xorps (__A, __B);
240 /* Perform a comparison on the lower SPFP values of A and B. If the
241 comparison is true, place a mask of all ones in the result, otherwise a
242 mask of zeros. The upper three SPFP values are passed through from A. */
244 static __inline __m128 __attribute__((__always_inline__))
245 _mm_cmpeq_ss (__m128 __A, __m128 __B)
247 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
250 static __inline __m128 __attribute__((__always_inline__))
251 _mm_cmplt_ss (__m128 __A, __m128 __B)
253 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
256 static __inline __m128 __attribute__((__always_inline__))
257 _mm_cmple_ss (__m128 __A, __m128 __B)
259 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
262 static __inline __m128 __attribute__((__always_inline__))
263 _mm_cmpgt_ss (__m128 __A, __m128 __B)
265 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
267 __builtin_ia32_cmpltss ((__v4sf) __B,
272 static __inline __m128 __attribute__((__always_inline__))
273 _mm_cmpge_ss (__m128 __A, __m128 __B)
275 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
277 __builtin_ia32_cmpless ((__v4sf) __B,
282 static __inline __m128 __attribute__((__always_inline__))
283 _mm_cmpneq_ss (__m128 __A, __m128 __B)
285 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
288 static __inline __m128 __attribute__((__always_inline__))
289 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
291 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
294 static __inline __m128 __attribute__((__always_inline__))
295 _mm_cmpnle_ss (__m128 __A, __m128 __B)
297 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
300 static __inline __m128 __attribute__((__always_inline__))
301 _mm_cmpngt_ss (__m128 __A, __m128 __B)
303 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
305 __builtin_ia32_cmpnltss ((__v4sf) __B,
310 static __inline __m128 __attribute__((__always_inline__))
311 _mm_cmpnge_ss (__m128 __A, __m128 __B)
313 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
315 __builtin_ia32_cmpnless ((__v4sf) __B,
320 static __inline __m128 __attribute__((__always_inline__))
321 _mm_cmpord_ss (__m128 __A, __m128 __B)
323 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
326 static __inline __m128 __attribute__((__always_inline__))
327 _mm_cmpunord_ss (__m128 __A, __m128 __B)
329 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
332 /* Perform a comparison on the four SPFP values of A and B. For each
333 element, if the comparison is true, place a mask of all ones in the
334 result, otherwise a mask of zeros. */
336 static __inline __m128 __attribute__((__always_inline__))
337 _mm_cmpeq_ps (__m128 __A, __m128 __B)
339 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
342 static __inline __m128 __attribute__((__always_inline__))
343 _mm_cmplt_ps (__m128 __A, __m128 __B)
345 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
348 static __inline __m128 __attribute__((__always_inline__))
349 _mm_cmple_ps (__m128 __A, __m128 __B)
351 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
354 static __inline __m128 __attribute__((__always_inline__))
355 _mm_cmpgt_ps (__m128 __A, __m128 __B)
357 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
360 static __inline __m128 __attribute__((__always_inline__))
361 _mm_cmpge_ps (__m128 __A, __m128 __B)
363 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
366 static __inline __m128 __attribute__((__always_inline__))
367 _mm_cmpneq_ps (__m128 __A, __m128 __B)
369 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
372 static __inline __m128 __attribute__((__always_inline__))
373 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
375 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
378 static __inline __m128 __attribute__((__always_inline__))
379 _mm_cmpnle_ps (__m128 __A, __m128 __B)
381 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
384 static __inline __m128 __attribute__((__always_inline__))
385 _mm_cmpngt_ps (__m128 __A, __m128 __B)
387 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
390 static __inline __m128 __attribute__((__always_inline__))
391 _mm_cmpnge_ps (__m128 __A, __m128 __B)
393 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
396 static __inline __m128 __attribute__((__always_inline__))
397 _mm_cmpord_ps (__m128 __A, __m128 __B)
399 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
402 static __inline __m128 __attribute__((__always_inline__))
403 _mm_cmpunord_ps (__m128 __A, __m128 __B)
405 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
408 /* Compare the lower SPFP values of A and B and return 1 if true
411 static __inline int __attribute__((__always_inline__))
412 _mm_comieq_ss (__m128 __A, __m128 __B)
414 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
417 static __inline int __attribute__((__always_inline__))
418 _mm_comilt_ss (__m128 __A, __m128 __B)
420 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
423 static __inline int __attribute__((__always_inline__))
424 _mm_comile_ss (__m128 __A, __m128 __B)
426 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
429 static __inline int __attribute__((__always_inline__))
430 _mm_comigt_ss (__m128 __A, __m128 __B)
432 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
435 static __inline int __attribute__((__always_inline__))
436 _mm_comige_ss (__m128 __A, __m128 __B)
438 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
441 static __inline int __attribute__((__always_inline__))
442 _mm_comineq_ss (__m128 __A, __m128 __B)
444 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
447 static __inline int __attribute__((__always_inline__))
448 _mm_ucomieq_ss (__m128 __A, __m128 __B)
450 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
453 static __inline int __attribute__((__always_inline__))
454 _mm_ucomilt_ss (__m128 __A, __m128 __B)
456 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
459 static __inline int __attribute__((__always_inline__))
460 _mm_ucomile_ss (__m128 __A, __m128 __B)
462 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
465 static __inline int __attribute__((__always_inline__))
466 _mm_ucomigt_ss (__m128 __A, __m128 __B)
468 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
471 static __inline int __attribute__((__always_inline__))
472 _mm_ucomige_ss (__m128 __A, __m128 __B)
474 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
477 static __inline int __attribute__((__always_inline__))
478 _mm_ucomineq_ss (__m128 __A, __m128 __B)
480 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
483 /* Convert the lower SPFP value to a 32-bit integer according to the current
485 static __inline int __attribute__((__always_inline__))
486 _mm_cvtss_si32 (__m128 __A)
488 return __builtin_ia32_cvtss2si ((__v4sf) __A);
491 static __inline int __attribute__((__always_inline__))
492 _mm_cvt_ss2si (__m128 __A)
494 return _mm_cvtss_si32 (__A);
498 /* Convert the lower SPFP value to a 32-bit integer according to the
499 current rounding mode. */
501 /* Intel intrinsic. */
502 static __inline long long __attribute__((__always_inline__))
503 _mm_cvtss_si64 (__m128 __A)
505 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
508 /* Microsoft intrinsic. */
509 static __inline long long __attribute__((__always_inline__))
510 _mm_cvtss_si64x (__m128 __A)
512 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
516 /* Convert the two lower SPFP values to 32-bit integers according to the
517 current rounding mode. Return the integers in packed form. */
518 static __inline __m64 __attribute__((__always_inline__))
519 _mm_cvtps_pi32 (__m128 __A)
521 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
524 static __inline __m64 __attribute__((__always_inline__))
525 _mm_cvt_ps2pi (__m128 __A)
527 return _mm_cvtps_pi32 (__A);
530 /* Truncate the lower SPFP value to a 32-bit integer. */
531 static __inline int __attribute__((__always_inline__))
532 _mm_cvttss_si32 (__m128 __A)
534 return __builtin_ia32_cvttss2si ((__v4sf) __A);
537 static __inline int __attribute__((__always_inline__))
538 _mm_cvtt_ss2si (__m128 __A)
540 return _mm_cvttss_si32 (__A);
544 /* Truncate the lower SPFP value to a 32-bit integer. */
546 /* Intel intrinsic. */
547 static __inline long long __attribute__((__always_inline__))
548 _mm_cvttss_si64 (__m128 __A)
550 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
553 /* Microsoft intrinsic. */
554 static __inline long long __attribute__((__always_inline__))
555 _mm_cvttss_si64x (__m128 __A)
557 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
561 /* Truncate the two lower SPFP values to 32-bit integers. Return the
562 integers in packed form. */
563 static __inline __m64 __attribute__((__always_inline__))
564 _mm_cvttps_pi32 (__m128 __A)
566 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
569 static __inline __m64 __attribute__((__always_inline__))
570 _mm_cvtt_ps2pi (__m128 __A)
572 return _mm_cvttps_pi32 (__A);
575 /* Convert B to a SPFP value and insert it as element zero in A. */
576 static __inline __m128 __attribute__((__always_inline__))
577 _mm_cvtsi32_ss (__m128 __A, int __B)
579 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
582 static __inline __m128 __attribute__((__always_inline__))
583 _mm_cvt_si2ss (__m128 __A, int __B)
585 return _mm_cvtsi32_ss (__A, __B);
589 /* Convert B to a SPFP value and insert it as element zero in A. */
591 /* Intel intrinsic. */
592 static __inline __m128 __attribute__((__always_inline__))
593 _mm_cvtsi64_ss (__m128 __A, long long __B)
595 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
598 /* Microsoft intrinsic. */
599 static __inline __m128 __attribute__((__always_inline__))
600 _mm_cvtsi64x_ss (__m128 __A, long long __B)
602 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
606 /* Convert the two 32-bit values in B to SPFP form and insert them
607 as the two lower elements in A. */
608 static __inline __m128 __attribute__((__always_inline__))
609 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
611 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
614 static __inline __m128 __attribute__((__always_inline__))
615 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
617 return _mm_cvtpi32_ps (__A, __B);
620 /* Convert the four signed 16-bit values in A to SPFP form. */
621 static __inline __m128 __attribute__((__always_inline__))
622 _mm_cvtpi16_ps (__m64 __A)
625 __v2si __hisi, __losi;
628 /* This comparison against zero gives us a mask that can be used to
629 fill in the missing sign bits in the unpack operations below, so
630 that we get signed values after unpacking. */
631 __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
633 /* Convert the four words to doublewords. */
634 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
635 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
637 /* Convert the doublewords to floating point two at a time. */
638 __r = (__v4sf) _mm_setzero_ps ();
639 __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
640 __r = __builtin_ia32_movlhps (__r, __r);
641 __r = __builtin_ia32_cvtpi2ps (__r, __losi);
646 /* Convert the four unsigned 16-bit values in A to SPFP form. */
647 static __inline __m128 __attribute__((__always_inline__))
648 _mm_cvtpu16_ps (__m64 __A)
650 __v2si __hisi, __losi;
653 /* Convert the four words to doublewords. */
654 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
655 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
657 /* Convert the doublewords to floating point two at a time. */
658 __r = (__v4sf) _mm_setzero_ps ();
659 __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
660 __r = __builtin_ia32_movlhps (__r, __r);
661 __r = __builtin_ia32_cvtpi2ps (__r, __losi);
666 /* Convert the low four signed 8-bit values in A to SPFP form. */
667 static __inline __m128 __attribute__((__always_inline__))
668 _mm_cvtpi8_ps (__m64 __A)
672 /* This comparison against zero gives us a mask that can be used to
673 fill in the missing sign bits in the unpack operations below, so
674 that we get signed values after unpacking. */
675 __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
677 /* Convert the four low bytes to words. */
678 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
680 return _mm_cvtpi16_ps(__A);
683 /* Convert the low four unsigned 8-bit values in A to SPFP form. */
684 static __inline __m128 __attribute__((__always_inline__))
685 _mm_cvtpu8_ps(__m64 __A)
687 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
688 return _mm_cvtpu16_ps(__A);
691 /* Convert the four signed 32-bit values in A and B to SPFP form. */
692 static __inline __m128 __attribute__((__always_inline__))
693 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
695 __v4sf __zero = (__v4sf) _mm_setzero_ps ();
696 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
697 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
698 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
701 /* Convert the four SPFP values in A to four signed 16-bit integers. */
702 static __inline __m64 __attribute__((__always_inline__))
703 _mm_cvtps_pi16(__m128 __A)
705 __v4sf __hisf = (__v4sf)__A;
706 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
707 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
708 __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
709 return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
712 /* Convert the four SPFP values in A to four signed 8-bit integers. */
713 static __inline __m64 __attribute__((__always_inline__))
714 _mm_cvtps_pi8(__m128 __A)
716 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
717 return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
720 /* Selects four specific SPFP values from A and B based on MASK. */
722 static __inline __m128 __attribute__((__always_inline__))
723 _mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
725 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
728 #define _mm_shuffle_ps(A, B, MASK) \
729 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
733 /* Selects and interleaves the upper two SPFP values from A and B. */
734 static __inline __m128 __attribute__((__always_inline__))
735 _mm_unpackhi_ps (__m128 __A, __m128 __B)
737 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
740 /* Selects and interleaves the lower two SPFP values from A and B. */
741 static __inline __m128 __attribute__((__always_inline__))
742 _mm_unpacklo_ps (__m128 __A, __m128 __B)
744 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
747 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
748 the lower two values are passed through from A. */
749 static __inline __m128 __attribute__((__always_inline__))
750 _mm_loadh_pi (__m128 __A, __m64 const *__P)
752 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
755 /* Stores the upper two SPFP values of A into P. */
756 static __inline void __attribute__((__always_inline__))
757 _mm_storeh_pi (__m64 *__P, __m128 __A)
759 __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
762 /* Moves the upper two values of B into the lower two values of A. */
763 static __inline __m128 __attribute__((__always_inline__))
764 _mm_movehl_ps (__m128 __A, __m128 __B)
766 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
769 /* Moves the lower two values of B into the upper two values of A. */
770 static __inline __m128 __attribute__((__always_inline__))
771 _mm_movelh_ps (__m128 __A, __m128 __B)
773 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
776 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
777 the upper two values are passed through from A. */
778 static __inline __m128 __attribute__((__always_inline__))
779 _mm_loadl_pi (__m128 __A, __m64 const *__P)
781 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
784 /* Stores the lower two SPFP values of A into P. */
785 static __inline void __attribute__((__always_inline__))
786 _mm_storel_pi (__m64 *__P, __m128 __A)
788 __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
791 /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
792 static __inline int __attribute__((__always_inline__))
793 _mm_movemask_ps (__m128 __A)
795 return __builtin_ia32_movmskps ((__v4sf)__A);
798 /* Return the contents of the control register. */
799 static __inline unsigned int __attribute__((__always_inline__))
802 return __builtin_ia32_stmxcsr ();
805 /* Read exception bits from the control register. */
806 static __inline unsigned int __attribute__((__always_inline__))
807 _MM_GET_EXCEPTION_STATE (void)
809 return _mm_getcsr() & _MM_EXCEPT_MASK;
812 static __inline unsigned int __attribute__((__always_inline__))
813 _MM_GET_EXCEPTION_MASK (void)
815 return _mm_getcsr() & _MM_MASK_MASK;
818 static __inline unsigned int __attribute__((__always_inline__))
819 _MM_GET_ROUNDING_MODE (void)
821 return _mm_getcsr() & _MM_ROUND_MASK;
824 static __inline unsigned int __attribute__((__always_inline__))
825 _MM_GET_FLUSH_ZERO_MODE (void)
827 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
830 /* Set the control register to I. */
831 static __inline void __attribute__((__always_inline__))
832 _mm_setcsr (unsigned int __I)
834 __builtin_ia32_ldmxcsr (__I);
837 /* Set exception bits in the control register. */
838 static __inline void __attribute__((__always_inline__))
839 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
841 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
844 static __inline void __attribute__((__always_inline__))
845 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
847 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
850 static __inline void __attribute__((__always_inline__))
851 _MM_SET_ROUNDING_MODE (unsigned int __mode)
853 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
856 static __inline void __attribute__((__always_inline__))
857 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
859 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
862 /* Create a vector with element 0 as F and the rest zero. */
863 static __inline __m128 __attribute__((__always_inline__))
864 _mm_set_ss (float __F)
866 return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 };
869 /* Create a vector with all four elements equal to F. */
870 static __inline __m128 __attribute__((__always_inline__))
871 _mm_set1_ps (float __F)
873 return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
876 static __inline __m128 __attribute__((__always_inline__))
877 _mm_set_ps1 (float __F)
879 return _mm_set1_ps (__F);
882 /* Create a vector with element 0 as *P and the rest zero. */
883 static __inline __m128 __attribute__((__always_inline__))
884 _mm_load_ss (float const *__P)
886 return _mm_set_ss (*__P);
889 /* Create a vector with all four elements equal to *P. */
890 static __inline __m128 __attribute__((__always_inline__))
891 _mm_load1_ps (float const *__P)
893 return _mm_set1_ps (*__P);
896 static __inline __m128 __attribute__((__always_inline__))
897 _mm_load_ps1 (float const *__P)
899 return _mm_load1_ps (__P);
902 /* Load four SPFP values from P. The address must be 16-byte aligned. */
903 static __inline __m128 __attribute__((__always_inline__))
904 _mm_load_ps (float const *__P)
906 return (__m128) *(__v4sf *)__P;
909 /* Load four SPFP values from P. The address need not be 16-byte aligned. */
910 static __inline __m128 __attribute__((__always_inline__))
911 _mm_loadu_ps (float const *__P)
913 return (__m128) __builtin_ia32_loadups (__P);
916 /* Load four SPFP values in reverse order. The address must be aligned. */
917 static __inline __m128 __attribute__((__always_inline__))
918 _mm_loadr_ps (float const *__P)
920 __v4sf __tmp = *(__v4sf *)__P;
921 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
924 /* Create the vector [Z Y X W]. */
925 static __inline __m128 __attribute__((__always_inline__))
926 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
928 return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
931 /* Create the vector [W X Y Z]. */
932 static __inline __m128 __attribute__((__always_inline__))
933 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
935 return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
938 /* Stores the lower SPFP value. */
939 static __inline void __attribute__((__always_inline__))
940 _mm_store_ss (float *__P, __m128 __A)
942 *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
945 static __inline float __attribute__((__always_inline__))
946 _mm_cvtss_f32 (__m128 __A)
948 return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
951 /* Store four SPFP values. The address must be 16-byte aligned. */
952 static __inline void __attribute__((__always_inline__))
953 _mm_store_ps (float *__P, __m128 __A)
955 *(__v4sf *)__P = (__v4sf)__A;
958 /* Store four SPFP values. The address need not be 16-byte aligned. */
959 static __inline void __attribute__((__always_inline__))
960 _mm_storeu_ps (float *__P, __m128 __A)
962 __builtin_ia32_storeups (__P, (__v4sf)__A);
965 /* Store the lower SPFP value across four words. */
966 static __inline void __attribute__((__always_inline__))
967 _mm_store1_ps (float *__P, __m128 __A)
969 __v4sf __va = (__v4sf)__A;
970 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
971 _mm_storeu_ps (__P, __tmp);
974 static __inline void __attribute__((__always_inline__))
975 _mm_store_ps1 (float *__P, __m128 __A)
977 _mm_store1_ps (__P, __A);
980 /* Store four SPFP values in reverse order. The address must be aligned. */
981 static __inline void __attribute__((__always_inline__))
982 _mm_storer_ps (float *__P, __m128 __A)
984 __v4sf __va = (__v4sf)__A;
985 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
986 _mm_store_ps (__P, __tmp);
989 /* Sets the low SPFP value of A from the low value of B. */
990 static __inline __m128 __attribute__((__always_inline__))
991 _mm_move_ss (__m128 __A, __m128 __B)
993 return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
996 /* Extracts one of the four words of A. The selector N must be immediate. */
998 static __inline int __attribute__((__always_inline__))
999 _mm_extract_pi16 (__m64 const __A, int const __N)
1001 return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
1004 static __inline int __attribute__((__always_inline__))
1005 _m_pextrw (__m64 const __A, int const __N)
1007 return _mm_extract_pi16 (__A, __N);
1010 #define _mm_extract_pi16(A, N) __builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N))
1011 #define _m_pextrw(A, N) _mm_extract_pi16((A), (N))
1014 /* Inserts word D into one of four words of A. The selector N must be
1017 static __inline __m64 __attribute__((__always_inline__))
1018 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1020 return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1023 static __inline __m64 __attribute__((__always_inline__))
1024 _m_pinsrw (__m64 const __A, int const __D, int const __N)
1026 return _mm_insert_pi16 (__A, __D, __N);
1029 #define _mm_insert_pi16(A, D, N) \
1030 ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N)))
1031 #define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N))
1034 /* Compute the element-wise maximum of signed 16-bit values. */
1035 static __inline __m64 __attribute__((__always_inline__))
1036 _mm_max_pi16 (__m64 __A, __m64 __B)
1038 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1041 static __inline __m64 __attribute__((__always_inline__))
1042 _m_pmaxsw (__m64 __A, __m64 __B)
1044 return _mm_max_pi16 (__A, __B);
1047 /* Compute the element-wise maximum of unsigned 8-bit values. */
1048 static __inline __m64 __attribute__((__always_inline__))
1049 _mm_max_pu8 (__m64 __A, __m64 __B)
1051 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1054 static __inline __m64 __attribute__((__always_inline__))
1055 _m_pmaxub (__m64 __A, __m64 __B)
1057 return _mm_max_pu8 (__A, __B);
1060 /* Compute the element-wise minimum of signed 16-bit values. */
1061 static __inline __m64 __attribute__((__always_inline__))
1062 _mm_min_pi16 (__m64 __A, __m64 __B)
1064 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1067 static __inline __m64 __attribute__((__always_inline__))
1068 _m_pminsw (__m64 __A, __m64 __B)
1070 return _mm_min_pi16 (__A, __B);
1073 /* Compute the element-wise minimum of unsigned 8-bit values. */
1074 static __inline __m64 __attribute__((__always_inline__))
1075 _mm_min_pu8 (__m64 __A, __m64 __B)
1077 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1080 static __inline __m64 __attribute__((__always_inline__))
1081 _m_pminub (__m64 __A, __m64 __B)
1083 return _mm_min_pu8 (__A, __B);
1086 /* Create an 8-bit mask of the signs of 8-bit values. */
1087 static __inline int __attribute__((__always_inline__))
1088 _mm_movemask_pi8 (__m64 __A)
1090 return __builtin_ia32_pmovmskb ((__v8qi)__A);
1093 static __inline int __attribute__((__always_inline__))
1094 _m_pmovmskb (__m64 __A)
1096 return _mm_movemask_pi8 (__A);
1099 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1100 in B and produce the high 16 bits of the 32-bit results. */
1101 static __inline __m64 __attribute__((__always_inline__))
1102 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1104 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1107 static __inline __m64 __attribute__((__always_inline__))
1108 _m_pmulhuw (__m64 __A, __m64 __B)
1110 return _mm_mulhi_pu16 (__A, __B);
1113 /* Return a combination of the four 16-bit values in A. The selector
1114 must be an immediate. */
1116 static __inline __m64 __attribute__((__always_inline__))
1117 _mm_shuffle_pi16 (__m64 __A, int __N)
1119 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1122 static __inline __m64 __attribute__((__always_inline__))
1123 _m_pshufw (__m64 __A, int __N)
1125 return _mm_shuffle_pi16 (__A, __N);
1128 #define _mm_shuffle_pi16(A, N) \
1129 ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
1130 #define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N))
1133 /* Conditionally store byte elements of A into P. The high bit of each
1134 byte in the selector N determines whether the corresponding byte from
1136 static __inline void __attribute__((__always_inline__))
1137 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1139 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1142 static __inline void __attribute__((__always_inline__))
1143 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1145 _mm_maskmove_si64 (__A, __N, __P);
1148 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1149 static __inline __m64 __attribute__((__always_inline__))
1150 _mm_avg_pu8 (__m64 __A, __m64 __B)
1152 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1155 static __inline __m64 __attribute__((__always_inline__))
1156 _m_pavgb (__m64 __A, __m64 __B)
1158 return _mm_avg_pu8 (__A, __B);
1161 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1162 static __inline __m64 __attribute__((__always_inline__))
1163 _mm_avg_pu16 (__m64 __A, __m64 __B)
1165 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1168 static __inline __m64 __attribute__((__always_inline__))
1169 _m_pavgw (__m64 __A, __m64 __B)
1171 return _mm_avg_pu16 (__A, __B);
1174 /* Compute the sum of the absolute differences of the unsigned 8-bit
1175 values in A and B. Return the value in the lower 16-bit word; the
1176 upper words are cleared. */
1177 static __inline __m64 __attribute__((__always_inline__))
1178 _mm_sad_pu8 (__m64 __A, __m64 __B)
1180 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1183 static __inline __m64 __attribute__((__always_inline__))
1184 _m_psadbw (__m64 __A, __m64 __B)
1186 return _mm_sad_pu8 (__A, __B);
1189 /* Loads one cache line from address P to a location "closer" to the
1190 processor. The selector I specifies the type of prefetch operation. */
1192 static __inline void __attribute__((__always_inline__))
1193 _mm_prefetch (void *__P, enum _mm_hint __I)
1195 __builtin_prefetch (__P, 0, __I);
1198 #define _mm_prefetch(P, I) \
1199 __builtin_prefetch ((P), 0, (I))
1202 /* Stores the data in A to the address P without polluting the caches. */
1203 static __inline void __attribute__((__always_inline__))
1204 _mm_stream_pi (__m64 *__P, __m64 __A)
1206 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1209 /* Likewise. The address must be 16-byte aligned. */
1210 static __inline void __attribute__((__always_inline__))
1211 _mm_stream_ps (float *__P, __m128 __A)
1213 __builtin_ia32_movntps (__P, (__v4sf)__A);
1216 /* Guarantees that every preceding store is globally visible before
1217 any subsequent store. */
1218 static __inline void __attribute__((__always_inline__))
1221 __builtin_ia32_sfence ();
1224 /* The execution of the next instruction is delayed by an implementation
1225 specific amount of time. The instruction does not modify the
1226 architectural state. */
1227 static __inline void __attribute__((__always_inline__))
1230 __asm__ __volatile__ ("rep; nop" : : );
1233 /* Transpose the 4x4 matrix composed of row[0-3]. */
1234 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1236 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1237 __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1); \
1238 __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3); \
1239 __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1); \
1240 __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3); \
1241 (row0) = __builtin_ia32_movlhps (__t0, __t1); \
1242 (row1) = __builtin_ia32_movhlps (__t1, __t0); \
1243 (row2) = __builtin_ia32_movlhps (__t2, __t3); \
1244 (row3) = __builtin_ia32_movhlps (__t3, __t2); \
1247 /* For backward source compatibility. */
1249 #include <emmintrin.h>
1252 #endif /* __SSE__ */
1253 #endif /* _XMMINTRIN_H_INCLUDED */