2 * Copyright 2009-2015 Samy Al Bahra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #ifndef CK_PR_X86_64_H
28 #define CK_PR_X86_64_H
31 #error Do not include this file directly, use ck_pr.h
36 #include <ck_stdint.h>
39 * The following represent supported atomic operations.
40 * These operations may be emulated.
45 * Support for TSX extensions.
47 #ifdef CK_MD_RTM_ENABLE
48 #include "ck_pr_rtm.h"
51 /* Minimum requirements for the CK_PR interface are met. */
55 #define CK_PR_LOCK_PREFIX
57 #define CK_PR_LOCK_PREFIX "lock "
61 * Prevent speculative execution in busy-wait loops (P4 <=) or "predefined
64 CK_CC_INLINE static void
67 __asm__ __volatile__("pause" ::: "memory");
71 #define CK_PR_FENCE(T, I) \
72 CK_CC_INLINE static void \
73 ck_pr_fence_strict_##T(void) \
75 __asm__ __volatile__(I ::: "memory"); \
78 /* Atomic operations are always serializing. */
79 CK_PR_FENCE(atomic, "")
80 CK_PR_FENCE(atomic_store, "")
81 CK_PR_FENCE(atomic_load, "")
82 CK_PR_FENCE(store_atomic, "")
83 CK_PR_FENCE(load_atomic, "")
85 /* Traditional fence interface. */
86 CK_PR_FENCE(load, "lfence")
87 CK_PR_FENCE(load_store, "mfence")
88 CK_PR_FENCE(store, "sfence")
89 CK_PR_FENCE(store_load, "mfence")
90 CK_PR_FENCE(memory, "mfence")
92 /* Below are stdatomic-style fences. */
95 * Provides load-store and store-store ordering. However, Intel specifies that
96 * the WC memory model is relaxed. It is likely an sfence *is* sufficient (in
97 * particular, stores are not re-ordered with respect to prior loads and it is
98 * really just the stores that are subject to re-ordering). However, we take
99 * the conservative route as the manuals are too ambiguous for my taste.
101 CK_PR_FENCE(release, "mfence")
104 * Provides load-load and load-store ordering. The lfence instruction ensures
105 * all prior load operations are complete before any subsequent instructions
106 * actually begin execution. However, the manual also ends up going to describe
107 * WC memory as a relaxed model.
109 CK_PR_FENCE(acquire, "mfence")
111 CK_PR_FENCE(acqrel, "mfence")
112 CK_PR_FENCE(lock, "mfence")
113 CK_PR_FENCE(unlock, "mfence")
118 * Read for ownership. Older compilers will generate the 32-bit
119 * 3DNow! variant which is binary compatible with x86-64 variant
124 CK_CC_INLINE static void
125 ck_pr_rfo(const void *m)
128 __asm__ __volatile__("prefetchw (%0)"
135 #endif /* CK_F_PR_RFO */
138 * Atomic fetch-and-store operations.
140 #define CK_PR_FAS(S, M, T, C, I) \
141 CK_CC_INLINE static T \
142 ck_pr_fas_##S(M *target, T v) \
144 __asm__ __volatile__(I " %0, %1" \
145 : "+m" (*(C *)target), \
152 CK_PR_FAS(ptr, void, void *, char, "xchgq")
154 #define CK_PR_FAS_S(S, T, I) CK_PR_FAS(S, T, T, T, I)
156 #ifndef CK_PR_DISABLE_DOUBLE
157 CK_PR_FAS_S(double, double, "xchgq")
159 CK_PR_FAS_S(char, char, "xchgb")
160 CK_PR_FAS_S(uint, unsigned int, "xchgl")
161 CK_PR_FAS_S(int, int, "xchgl")
162 CK_PR_FAS_S(64, uint64_t, "xchgq")
163 CK_PR_FAS_S(32, uint32_t, "xchgl")
164 CK_PR_FAS_S(16, uint16_t, "xchgw")
165 CK_PR_FAS_S(8, uint8_t, "xchgb")
171 * Atomic load-from-memory operations.
173 #define CK_PR_LOAD(S, M, T, C, I) \
174 CK_CC_INLINE static T \
175 ck_pr_md_load_##S(const M *target) \
178 __asm__ __volatile__(I " %1, %0" \
180 : "m" (*(const C *)target) \
185 CK_PR_LOAD(ptr, void, void *, char, "movq")
187 #define CK_PR_LOAD_S(S, T, I) CK_PR_LOAD(S, T, T, T, I)
189 CK_PR_LOAD_S(char, char, "movb")
190 CK_PR_LOAD_S(uint, unsigned int, "movl")
191 CK_PR_LOAD_S(int, int, "movl")
192 #ifndef CK_PR_DISABLE_DOUBLE
193 CK_PR_LOAD_S(double, double, "movq")
195 CK_PR_LOAD_S(64, uint64_t, "movq")
196 CK_PR_LOAD_S(32, uint32_t, "movl")
197 CK_PR_LOAD_S(16, uint16_t, "movw")
198 CK_PR_LOAD_S(8, uint8_t, "movb")
203 CK_CC_INLINE static void
204 ck_pr_load_64_2(const uint64_t target[2], uint64_t v[2])
206 __asm__ __volatile__("movq %%rdx, %%rcx;"
208 CK_PR_LOCK_PREFIX "cmpxchg16b %2;"
211 : "m" (*(const uint64_t *)target)
212 : "rbx", "rcx", "memory", "cc");
216 CK_CC_INLINE static void
217 ck_pr_load_ptr_2(const void *t, void *v)
219 ck_pr_load_64_2(CK_CPP_CAST(const uint64_t *, t),
220 CK_CPP_CAST(uint64_t *, v));
224 #define CK_PR_LOAD_2(S, W, T) \
225 CK_CC_INLINE static void \
226 ck_pr_md_load_##S##_##W(const T t[2], T v[2]) \
228 ck_pr_load_64_2((const uint64_t *)(const void *)t, \
229 (uint64_t *)(void *)v); \
233 CK_PR_LOAD_2(char, 16, char)
234 CK_PR_LOAD_2(int, 4, int)
235 CK_PR_LOAD_2(uint, 4, unsigned int)
236 CK_PR_LOAD_2(32, 4, uint32_t)
237 CK_PR_LOAD_2(16, 8, uint16_t)
238 CK_PR_LOAD_2(8, 16, uint8_t)
243 * Atomic store-to-memory operations.
245 #define CK_PR_STORE_IMM(S, M, T, C, I, K) \
246 CK_CC_INLINE static void \
247 ck_pr_md_store_##S(M *target, T v) \
249 __asm__ __volatile__(I " %1, %0" \
250 : "=m" (*(C *)target) \
256 #define CK_PR_STORE(S, M, T, C, I) \
257 CK_CC_INLINE static void \
258 ck_pr_md_store_##S(M *target, T v) \
260 __asm__ __volatile__(I " %1, %0" \
261 : "=m" (*(C *)target) \
267 CK_PR_STORE_IMM(ptr, void, const void *, char, "movq", CK_CC_IMM_U32)
268 #ifndef CK_PR_DISABLE_DOUBLE
269 CK_PR_STORE(double, double, double, double, "movq")
272 #define CK_PR_STORE_S(S, T, I, K) CK_PR_STORE_IMM(S, T, T, T, I, K)
274 CK_PR_STORE_S(char, char, "movb", CK_CC_IMM_S32)
275 CK_PR_STORE_S(int, int, "movl", CK_CC_IMM_S32)
276 CK_PR_STORE_S(uint, unsigned int, "movl", CK_CC_IMM_U32)
277 CK_PR_STORE_S(64, uint64_t, "movq", CK_CC_IMM_U32)
278 CK_PR_STORE_S(32, uint32_t, "movl", CK_CC_IMM_U32)
279 CK_PR_STORE_S(16, uint16_t, "movw", CK_CC_IMM_U32)
280 CK_PR_STORE_S(8, uint8_t, "movb", CK_CC_IMM_U32)
283 #undef CK_PR_STORE_IMM
287 * Atomic fetch-and-add operations.
289 #define CK_PR_FAA(S, M, T, C, I) \
290 CK_CC_INLINE static T \
291 ck_pr_faa_##S(M *target, T d) \
293 __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
294 : "+m" (*(C *)target), \
301 CK_PR_FAA(ptr, void, uintptr_t, char, "xaddq")
303 #define CK_PR_FAA_S(S, T, I) CK_PR_FAA(S, T, T, T, I)
305 CK_PR_FAA_S(char, char, "xaddb")
306 CK_PR_FAA_S(uint, unsigned int, "xaddl")
307 CK_PR_FAA_S(int, int, "xaddl")
308 CK_PR_FAA_S(64, uint64_t, "xaddq")
309 CK_PR_FAA_S(32, uint32_t, "xaddl")
310 CK_PR_FAA_S(16, uint16_t, "xaddw")
311 CK_PR_FAA_S(8, uint8_t, "xaddb")
317 * Atomic store-only unary operations.
319 #define CK_PR_UNARY(K, S, T, C, I) \
320 CK_PR_UNARY_R(K, S, T, C, I) \
321 CK_PR_UNARY_V(K, S, T, C, I)
323 #define CK_PR_UNARY_R(K, S, T, C, I) \
324 CK_CC_INLINE static void \
325 ck_pr_##K##_##S(T *target) \
327 __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0" \
328 : "+m" (*(C *)target) \
334 #define CK_PR_UNARY_V(K, S, T, C, I) \
335 CK_CC_INLINE static void \
336 ck_pr_##K##_##S##_zero(T *target, bool *r) \
338 __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %0; setz %1" \
339 : "+m" (*(C *)target), \
347 #define CK_PR_UNARY_S(K, S, T, I) CK_PR_UNARY(K, S, T, T, I)
349 #define CK_PR_GENERATE(K) \
350 CK_PR_UNARY(K, ptr, void, char, #K "q") \
351 CK_PR_UNARY_S(K, char, char, #K "b") \
352 CK_PR_UNARY_S(K, int, int, #K "l") \
353 CK_PR_UNARY_S(K, uint, unsigned int, #K "l") \
354 CK_PR_UNARY_S(K, 64, uint64_t, #K "q") \
355 CK_PR_UNARY_S(K, 32, uint32_t, #K "l") \
356 CK_PR_UNARY_S(K, 16, uint16_t, #K "w") \
357 CK_PR_UNARY_S(K, 8, uint8_t, #K "b")
363 /* not does not affect condition flags. */
365 #define CK_PR_UNARY_V(a, b, c, d, e)
368 #undef CK_PR_GENERATE
375 * Atomic store-only binary operations.
377 #define CK_PR_BINARY(K, S, M, T, C, I, O) \
378 CK_CC_INLINE static void \
379 ck_pr_##K##_##S(M *target, T d) \
381 __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %1, %0" \
382 : "+m" (*(C *)target) \
388 #define CK_PR_BINARY_S(K, S, T, I, O) CK_PR_BINARY(K, S, T, T, T, I, O)
390 #define CK_PR_GENERATE(K) \
391 CK_PR_BINARY(K, ptr, void, uintptr_t, char, #K "q", CK_CC_IMM_U32) \
392 CK_PR_BINARY_S(K, char, char, #K "b", CK_CC_IMM_S32) \
393 CK_PR_BINARY_S(K, int, int, #K "l", CK_CC_IMM_S32) \
394 CK_PR_BINARY_S(K, uint, unsigned int, #K "l", CK_CC_IMM_U32) \
395 CK_PR_BINARY_S(K, 64, uint64_t, #K "q", CK_CC_IMM_U32) \
396 CK_PR_BINARY_S(K, 32, uint32_t, #K "l", CK_CC_IMM_U32) \
397 CK_PR_BINARY_S(K, 16, uint16_t, #K "w", CK_CC_IMM_U32) \
398 CK_PR_BINARY_S(K, 8, uint8_t, #K "b", CK_CC_IMM_U32)
406 #undef CK_PR_GENERATE
407 #undef CK_PR_BINARY_S
411 * Atomic compare and swap.
413 #define CK_PR_CAS(S, M, T, C, I) \
414 CK_CC_INLINE static bool \
415 ck_pr_cas_##S(M *target, T compare, T set) \
418 __asm__ __volatile__(CK_PR_LOCK_PREFIX I " %2, %0; setz %1" \
419 : "+m" (*(C *)target), \
427 CK_PR_CAS(ptr, void, void *, char, "cmpxchgq")
429 #define CK_PR_CAS_S(S, T, I) CK_PR_CAS(S, T, T, T, I)
431 CK_PR_CAS_S(char, char, "cmpxchgb")
432 CK_PR_CAS_S(int, int, "cmpxchgl")
433 CK_PR_CAS_S(uint, unsigned int, "cmpxchgl")
434 #ifndef CK_PR_DISABLE_DOUBLE
435 CK_PR_CAS_S(double, double, "cmpxchgq")
437 CK_PR_CAS_S(64, uint64_t, "cmpxchgq")
438 CK_PR_CAS_S(32, uint32_t, "cmpxchgl")
439 CK_PR_CAS_S(16, uint16_t, "cmpxchgw")
440 CK_PR_CAS_S(8, uint8_t, "cmpxchgb")
446 * Compare and swap, set *v to old value of target.
448 #define CK_PR_CAS_O(S, M, T, C, I, R) \
449 CK_CC_INLINE static bool \
450 ck_pr_cas_##S##_value(M *target, T compare, T set, M *v) \
453 __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg" I " %3, %0;" \
454 "mov %% " R ", %2;" \
456 : "+m" (*(C *)target), \
465 CK_PR_CAS_O(ptr, void, void *, char, "q", "rax")
467 #define CK_PR_CAS_O_S(S, T, I, R) \
468 CK_PR_CAS_O(S, T, T, T, I, R)
470 CK_PR_CAS_O_S(char, char, "b", "al")
471 CK_PR_CAS_O_S(int, int, "l", "eax")
472 CK_PR_CAS_O_S(uint, unsigned int, "l", "eax")
473 #ifndef CK_PR_DISABLE_DOUBLE
474 CK_PR_CAS_O_S(double, double, "q", "rax")
476 CK_PR_CAS_O_S(64, uint64_t, "q", "rax")
477 CK_PR_CAS_O_S(32, uint32_t, "l", "eax")
478 CK_PR_CAS_O_S(16, uint16_t, "w", "ax")
479 CK_PR_CAS_O_S(8, uint8_t, "b", "al")
485 * Contrary to C-interface, alignment requirements are that of uint64_t[2].
487 CK_CC_INLINE static bool
488 ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
492 __asm__ __volatile__("movq 0(%4), %%rax;"
494 CK_PR_LOCK_PREFIX "cmpxchg16b %0; setz %1"
500 : "memory", "cc", "%rax", "%rdx");
504 CK_CC_INLINE static bool
505 ck_pr_cas_ptr_2(void *t, void *c, void *s)
507 return ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, t),
508 CK_CPP_CAST(uint64_t *, c),
509 CK_CPP_CAST(uint64_t *, s));
512 CK_CC_INLINE static bool
513 ck_pr_cas_64_2_value(uint64_t target[2],
520 __asm__ __volatile__(CK_PR_LOCK_PREFIX "cmpxchg16b %0;"
534 CK_CC_INLINE static bool
535 ck_pr_cas_ptr_2_value(void *t, void *c, void *s, void *v)
537 return ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *,t),
538 CK_CPP_CAST(uint64_t *,c),
539 CK_CPP_CAST(uint64_t *,s),
540 CK_CPP_CAST(uint64_t *,v));
543 #define CK_PR_CAS_V(S, W, T) \
544 CK_CC_INLINE static bool \
545 ck_pr_cas_##S##_##W(T t[W], T c[W], T s[W]) \
547 return ck_pr_cas_64_2((uint64_t *)(void *)t, \
548 (uint64_t *)(void *)c, \
549 (uint64_t *)(void *)s); \
551 CK_CC_INLINE static bool \
552 ck_pr_cas_##S##_##W##_value(T *t, T c[W], T s[W], T *v) \
554 return ck_pr_cas_64_2_value((uint64_t *)(void *)t, \
555 (uint64_t *)(void *)c, \
556 (uint64_t *)(void *)s, \
557 (uint64_t *)(void *)v); \
560 #ifndef CK_PR_DISABLE_DOUBLE
561 CK_PR_CAS_V(double, 2, double)
563 CK_PR_CAS_V(char, 16, char)
564 CK_PR_CAS_V(int, 4, int)
565 CK_PR_CAS_V(uint, 4, unsigned int)
566 CK_PR_CAS_V(32, 4, uint32_t)
567 CK_PR_CAS_V(16, 8, uint16_t)
568 CK_PR_CAS_V(8, 16, uint8_t)
573 * Atomic bit test operations.
575 #define CK_PR_BT(K, S, T, P, C, I) \
576 CK_CC_INLINE static bool \
577 ck_pr_##K##_##S(T *target, unsigned int b) \
580 __asm__ __volatile__(CK_PR_LOCK_PREFIX I "; setc %1" \
581 : "+m" (*(C *)target), \
588 #define CK_PR_BT_S(K, S, T, I) CK_PR_BT(K, S, T, T, T, I)
590 #define CK_PR_GENERATE(K) \
591 CK_PR_BT(K, ptr, void, uint64_t, char, #K "q %2, %0") \
592 CK_PR_BT_S(K, uint, unsigned int, #K "l %2, %0") \
593 CK_PR_BT_S(K, int, int, #K "l %2, %0") \
594 CK_PR_BT_S(K, 64, uint64_t, #K "q %2, %0") \
595 CK_PR_BT_S(K, 32, uint32_t, #K "l %2, %0") \
596 CK_PR_BT_S(K, 16, uint16_t, #K "w %w2, %0")
602 #undef CK_PR_GENERATE
605 #endif /* CK_PR_X86_64_H */