1 /*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 *===-----------------------------------------------------------------------===
28 #error "ACLE intrinsics support not enabled."
33 #if defined(__cplusplus)
37 /* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
38 /* 8.3 Memory barriers */
39 #if !defined(_MSC_VER)
40 #define __dmb(i) __builtin_arm_dmb(i)
41 #define __dsb(i) __builtin_arm_dsb(i)
42 #define __isb(i) __builtin_arm_isb(i)
47 #if !defined(_MSC_VER)
48 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
52 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
56 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
60 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
64 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
65 __builtin_arm_yield();
70 #define __dbg(t) __builtin_arm_dbg(t)
74 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
75 __swp(uint32_t __x, volatile uint32_t *__p) {
78 v = __builtin_arm_ldrex(__p);
79 while (__builtin_arm_strex(__x, __p));
83 /* 8.6 Memory prefetch intrinsics */
84 /* 8.6.1 Data prefetch */
85 #define __pld(addr) __pldx(0, 0, 0, addr)
88 #define __pldx(access_kind, cache_level, retention_policy, addr) \
89 __builtin_arm_prefetch(addr, access_kind, 1)
91 #define __pldx(access_kind, cache_level, retention_policy, addr) \
92 __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
95 /* 8.6.2 Instruction prefetch */
96 #define __pli(addr) __plix(0, 0, addr)
99 #define __plix(cache_level, retention_policy, addr) \
100 __builtin_arm_prefetch(addr, 0, 0)
102 #define __plix(cache_level, retention_policy, addr) \
103 __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
107 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
111 /* 9 DATA-PROCESSING INTRINSICS */
112 /* 9.2 Miscellaneous data-processing intrinsics */
114 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
115 __ror(uint32_t __x, uint32_t __y) {
119 return (__x >> __y) | (__x << (32 - __y));
122 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
123 __rorll(uint64_t __x, uint32_t __y) {
127 return (__x >> __y) | (__x << (64 - __y));
130 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
131 __rorl(unsigned long __x, uint32_t __y) {
132 #if __SIZEOF_LONG__ == 4
133 return __ror(__x, __y);
135 return __rorll(__x, __y);
141 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
142 __clz(uint32_t __t) {
143 return __builtin_clz(__t);
146 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
147 __clzl(unsigned long __t) {
148 return __builtin_clzl(__t);
151 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
152 __clzll(uint64_t __t) {
153 return __builtin_clzll(__t);
157 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
158 __rev(uint32_t __t) {
159 return __builtin_bswap32(__t);
162 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
163 __revl(unsigned long __t) {
164 #if __SIZEOF_LONG__ == 4
165 return __builtin_bswap32(__t);
167 return __builtin_bswap64(__t);
171 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
172 __revll(uint64_t __t) {
173 return __builtin_bswap64(__t);
177 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
178 __rev16(uint32_t __t) {
179 return __ror(__rev(__t), 16);
182 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
183 __rev16ll(uint64_t __t) {
184 return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
187 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
188 __rev16l(unsigned long __t) {
189 #if __SIZEOF_LONG__ == 4
192 return __rev16ll(__t);
197 static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
198 __revsh(int16_t __t) {
199 return __builtin_bswap16(__t);
203 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
204 __rbit(uint32_t __t) {
205 return __builtin_arm_rbit(__t);
208 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
209 __rbitll(uint64_t __t) {
210 #if __ARM_32BIT_STATE
211 return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
212 __builtin_arm_rbit(__t >> 32);
214 return __builtin_arm_rbit64(__t);
218 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
219 __rbitl(unsigned long __t) {
220 #if __SIZEOF_LONG__ == 4
223 return __rbitll(__t);
228 * 9.3 16-bit multiplications
230 #if __ARM_FEATURE_DSP
231 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
232 __smulbb(int32_t __a, int32_t __b) {
233 return __builtin_arm_smulbb(__a, __b);
235 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
236 __smulbt(int32_t __a, int32_t __b) {
237 return __builtin_arm_smulbt(__a, __b);
239 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
240 __smultb(int32_t __a, int32_t __b) {
241 return __builtin_arm_smultb(__a, __b);
243 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
244 __smultt(int32_t __a, int32_t __b) {
245 return __builtin_arm_smultt(__a, __b);
247 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
248 __smulwb(int32_t __a, int32_t __b) {
249 return __builtin_arm_smulwb(__a, __b);
251 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
252 __smulwt(int32_t __a, int32_t __b) {
253 return __builtin_arm_smulwt(__a, __b);
258 * 9.4 Saturating intrinsics
260 * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
261 * intrinsics are implemented and the flag is enabled.
263 /* 9.4.1 Width-specified saturation intrinsics */
264 #if __ARM_FEATURE_SAT
265 #define __ssat(x, y) __builtin_arm_ssat(x, y)
266 #define __usat(x, y) __builtin_arm_usat(x, y)
269 /* 9.4.2 Saturating addition and subtraction intrinsics */
270 #if __ARM_FEATURE_DSP
271 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
272 __qadd(int32_t __t, int32_t __v) {
273 return __builtin_arm_qadd(__t, __v);
276 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
277 __qsub(int32_t __t, int32_t __v) {
278 return __builtin_arm_qsub(__t, __v);
281 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
282 __qdbl(int32_t __t) {
283 return __builtin_arm_qadd(__t, __t);
287 /* 9.4.3 Accumultating multiplications */
288 #if __ARM_FEATURE_DSP
289 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
290 __smlabb(int32_t __a, int32_t __b, int32_t __c) {
291 return __builtin_arm_smlabb(__a, __b, __c);
293 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
294 __smlabt(int32_t __a, int32_t __b, int32_t __c) {
295 return __builtin_arm_smlabt(__a, __b, __c);
297 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
298 __smlatb(int32_t __a, int32_t __b, int32_t __c) {
299 return __builtin_arm_smlatb(__a, __b, __c);
301 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
302 __smlatt(int32_t __a, int32_t __b, int32_t __c) {
303 return __builtin_arm_smlatt(__a, __b, __c);
305 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
306 __smlawb(int32_t __a, int32_t __b, int32_t __c) {
307 return __builtin_arm_smlawb(__a, __b, __c);
309 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
310 __smlawt(int32_t __a, int32_t __b, int32_t __c) {
311 return __builtin_arm_smlawt(__a, __b, __c);
316 /* 9.5.4 Parallel 16-bit saturation */
317 #if __ARM_FEATURE_SIMD32
318 #define __ssat16(x, y) __builtin_arm_ssat16(x, y)
319 #define __usat16(x, y) __builtin_arm_usat16(x, y)
322 /* 9.5.5 Packing and unpacking */
323 #if __ARM_FEATURE_SIMD32
324 typedef int32_t int8x4_t;
325 typedef int32_t int16x2_t;
326 typedef uint32_t uint8x4_t;
327 typedef uint32_t uint16x2_t;
329 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
330 __sxtab16(int16x2_t __a, int8x4_t __b) {
331 return __builtin_arm_sxtab16(__a, __b);
333 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
334 __sxtb16(int8x4_t __a) {
335 return __builtin_arm_sxtb16(__a);
337 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
338 __uxtab16(int16x2_t __a, int8x4_t __b) {
339 return __builtin_arm_uxtab16(__a, __b);
341 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
342 __uxtb16(int8x4_t __a) {
343 return __builtin_arm_uxtb16(__a);
347 /* 9.5.6 Parallel selection */
348 #if __ARM_FEATURE_SIMD32
349 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
350 __sel(uint8x4_t __a, uint8x4_t __b) {
351 return __builtin_arm_sel(__a, __b);
355 /* 9.5.7 Parallel 8-bit addition and subtraction */
356 #if __ARM_FEATURE_SIMD32
357 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
358 __qadd8(int8x4_t __a, int8x4_t __b) {
359 return __builtin_arm_qadd8(__a, __b);
361 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
362 __qsub8(int8x4_t __a, int8x4_t __b) {
363 return __builtin_arm_qsub8(__a, __b);
365 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
366 __sadd8(int8x4_t __a, int8x4_t __b) {
367 return __builtin_arm_sadd8(__a, __b);
369 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
370 __shadd8(int8x4_t __a, int8x4_t __b) {
371 return __builtin_arm_shadd8(__a, __b);
373 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
374 __shsub8(int8x4_t __a, int8x4_t __b) {
375 return __builtin_arm_shsub8(__a, __b);
377 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
378 __ssub8(int8x4_t __a, int8x4_t __b) {
379 return __builtin_arm_ssub8(__a, __b);
381 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
382 __uadd8(uint8x4_t __a, uint8x4_t __b) {
383 return __builtin_arm_uadd8(__a, __b);
385 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
386 __uhadd8(uint8x4_t __a, uint8x4_t __b) {
387 return __builtin_arm_uhadd8(__a, __b);
389 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
390 __uhsub8(uint8x4_t __a, uint8x4_t __b) {
391 return __builtin_arm_uhsub8(__a, __b);
393 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
394 __uqadd8(uint8x4_t __a, uint8x4_t __b) {
395 return __builtin_arm_uqadd8(__a, __b);
397 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
398 __uqsub8(uint8x4_t __a, uint8x4_t __b) {
399 return __builtin_arm_uqsub8(__a, __b);
401 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
402 __usub8(uint8x4_t __a, uint8x4_t __b) {
403 return __builtin_arm_usub8(__a, __b);
407 /* 9.5.8 Sum of 8-bit absolute differences */
408 #if __ARM_FEATURE_SIMD32
409 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
410 __usad8(uint8x4_t __a, uint8x4_t __b) {
411 return __builtin_arm_usad8(__a, __b);
413 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
414 __usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
415 return __builtin_arm_usada8(__a, __b, __c);
419 /* 9.5.9 Parallel 16-bit addition and subtraction */
420 #if __ARM_FEATURE_SIMD32
421 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
422 __qadd16(int16x2_t __a, int16x2_t __b) {
423 return __builtin_arm_qadd16(__a, __b);
425 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
426 __qasx(int16x2_t __a, int16x2_t __b) {
427 return __builtin_arm_qasx(__a, __b);
429 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
430 __qsax(int16x2_t __a, int16x2_t __b) {
431 return __builtin_arm_qsax(__a, __b);
433 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
434 __qsub16(int16x2_t __a, int16x2_t __b) {
435 return __builtin_arm_qsub16(__a, __b);
437 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
438 __sadd16(int16x2_t __a, int16x2_t __b) {
439 return __builtin_arm_sadd16(__a, __b);
441 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
442 __sasx(int16x2_t __a, int16x2_t __b) {
443 return __builtin_arm_sasx(__a, __b);
445 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
446 __shadd16(int16x2_t __a, int16x2_t __b) {
447 return __builtin_arm_shadd16(__a, __b);
449 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
450 __shasx(int16x2_t __a, int16x2_t __b) {
451 return __builtin_arm_shasx(__a, __b);
453 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
454 __shsax(int16x2_t __a, int16x2_t __b) {
455 return __builtin_arm_shsax(__a, __b);
457 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
458 __shsub16(int16x2_t __a, int16x2_t __b) {
459 return __builtin_arm_shsub16(__a, __b);
461 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
462 __ssax(int16x2_t __a, int16x2_t __b) {
463 return __builtin_arm_ssax(__a, __b);
465 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
466 __ssub16(int16x2_t __a, int16x2_t __b) {
467 return __builtin_arm_ssub16(__a, __b);
469 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
470 __uadd16(uint16x2_t __a, uint16x2_t __b) {
471 return __builtin_arm_uadd16(__a, __b);
473 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
474 __uasx(uint16x2_t __a, uint16x2_t __b) {
475 return __builtin_arm_uasx(__a, __b);
477 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
478 __uhadd16(uint16x2_t __a, uint16x2_t __b) {
479 return __builtin_arm_uhadd16(__a, __b);
481 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
482 __uhasx(uint16x2_t __a, uint16x2_t __b) {
483 return __builtin_arm_uhasx(__a, __b);
485 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
486 __uhsax(uint16x2_t __a, uint16x2_t __b) {
487 return __builtin_arm_uhsax(__a, __b);
489 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
490 __uhsub16(uint16x2_t __a, uint16x2_t __b) {
491 return __builtin_arm_uhsub16(__a, __b);
493 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
494 __uqadd16(uint16x2_t __a, uint16x2_t __b) {
495 return __builtin_arm_uqadd16(__a, __b);
497 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
498 __uqasx(uint16x2_t __a, uint16x2_t __b) {
499 return __builtin_arm_uqasx(__a, __b);
501 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
502 __uqsax(uint16x2_t __a, uint16x2_t __b) {
503 return __builtin_arm_uqsax(__a, __b);
505 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
506 __uqsub16(uint16x2_t __a, uint16x2_t __b) {
507 return __builtin_arm_uqsub16(__a, __b);
509 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
510 __usax(uint16x2_t __a, uint16x2_t __b) {
511 return __builtin_arm_usax(__a, __b);
513 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
514 __usub16(uint16x2_t __a, uint16x2_t __b) {
515 return __builtin_arm_usub16(__a, __b);
519 /* 9.5.10 Parallel 16-bit multiplications */
520 #if __ARM_FEATURE_SIMD32
521 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
522 __smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
523 return __builtin_arm_smlad(__a, __b, __c);
525 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
526 __smladx(int16x2_t __a, int16x2_t __b, int32_t __c) {
527 return __builtin_arm_smladx(__a, __b, __c);
529 static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
530 __smlald(int16x2_t __a, int16x2_t __b, int64_t __c) {
531 return __builtin_arm_smlald(__a, __b, __c);
533 static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
534 __smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
535 return __builtin_arm_smlaldx(__a, __b, __c);
537 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
538 __smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) {
539 return __builtin_arm_smlsd(__a, __b, __c);
541 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
542 __smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) {
543 return __builtin_arm_smlsdx(__a, __b, __c);
545 static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
546 __smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) {
547 return __builtin_arm_smlsld(__a, __b, __c);
549 static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
550 __smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
551 return __builtin_arm_smlsldx(__a, __b, __c);
553 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
554 __smuad(int16x2_t __a, int16x2_t __b) {
555 return __builtin_arm_smuad(__a, __b);
557 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
558 __smuadx(int16x2_t __a, int16x2_t __b) {
559 return __builtin_arm_smuadx(__a, __b);
561 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
562 __smusd(int16x2_t __a, int16x2_t __b) {
563 return __builtin_arm_smusd(__a, __b);
565 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
566 __smusdx(int16x2_t __a, int16x2_t __b) {
567 return __builtin_arm_smusdx(__a, __b);
571 /* 9.7 CRC32 intrinsics */
572 #if __ARM_FEATURE_CRC32
573 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
574 __crc32b(uint32_t __a, uint8_t __b) {
575 return __builtin_arm_crc32b(__a, __b);
578 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
579 __crc32h(uint32_t __a, uint16_t __b) {
580 return __builtin_arm_crc32h(__a, __b);
583 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
584 __crc32w(uint32_t __a, uint32_t __b) {
585 return __builtin_arm_crc32w(__a, __b);
588 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
589 __crc32d(uint32_t __a, uint64_t __b) {
590 return __builtin_arm_crc32d(__a, __b);
593 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
594 __crc32cb(uint32_t __a, uint8_t __b) {
595 return __builtin_arm_crc32cb(__a, __b);
598 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
599 __crc32ch(uint32_t __a, uint16_t __b) {
600 return __builtin_arm_crc32ch(__a, __b);
603 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
604 __crc32cw(uint32_t __a, uint32_t __b) {
605 return __builtin_arm_crc32cw(__a, __b);
608 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
609 __crc32cd(uint32_t __a, uint64_t __b) {
610 return __builtin_arm_crc32cd(__a, __b);
614 /* 10.1 Special register intrinsics */
615 #define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
616 #define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
617 #define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
618 #define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
619 #define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
620 #define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
622 #if defined(__cplusplus)
626 #endif /* __ARM_ACLE_H */