]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/include/atomic.h
Import Amazon Elastic Network Adapter (ENA) HAL to sys/contrib/
[FreeBSD/FreeBSD.git] / sys / arm64 / include / atomic.h
1 /*-
2  * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #ifndef _MACHINE_ATOMIC_H_
30 #define _MACHINE_ATOMIC_H_
31
32 #define isb()           __asm __volatile("isb" : : : "memory")
33
34 /*
35  * Options for DMB and DSB:
36  *      oshld   Outer Shareable, load
37  *      oshst   Outer Shareable, store
38  *      osh     Outer Shareable, all
39  *      nshld   Non-shareable, load
40  *      nshst   Non-shareable, store
41  *      nsh     Non-shareable, all
42  *      ishld   Inner Shareable, load
43  *      ishst   Inner Shareable, store
44  *      ish     Inner Shareable, all
45  *      ld      Full system, load
46  *      st      Full system, store
47  *      sy      Full system, all
48  */
49 #define dsb(opt)        __asm __volatile("dsb " __STRING(opt) : : : "memory")
50 #define dmb(opt)        __asm __volatile("dmb " __STRING(opt) : : : "memory")
51
52 #define mb()    dmb(sy) /* Full system memory barrier all */
53 #define wmb()   dmb(st) /* Full system memory barrier store */
54 #define rmb()   dmb(ld) /* Full system memory barrier load */
55
56 #define ATOMIC_OP(op, asm_op, bar, a, l)                                \
57 static __inline void                                                    \
58 atomic_##op##_##bar##32(volatile uint32_t *p, uint32_t val)             \
59 {                                                                       \
60         uint32_t tmp;                                                   \
61         int res;                                                        \
62                                                                         \
63         __asm __volatile(                                               \
64             "1: ld"#a"xr   %w0, [%2]      \n"                           \
65             "   "#asm_op"  %w0, %w0, %w3  \n"                           \
66             "   st"#l"xr   %w1, %w0, [%2] \n"                           \
67             "   cbnz       %w1, 1b        \n"                           \
68             : "=&r"(tmp), "=&r"(res)                                    \
69             : "r" (p), "r" (val)                                        \
70             : "memory"                                                  \
71         );                                                              \
72 }                                                                       \
73                                                                         \
74 static __inline void                                                    \
75 atomic_##op##_##bar##64(volatile uint64_t *p, uint64_t val)             \
76 {                                                                       \
77         uint64_t tmp;                                                   \
78         int res;                                                        \
79                                                                         \
80         __asm __volatile(                                               \
81             "1: ld"#a"xr   %0, [%2]      \n"                            \
82             "   "#asm_op"  %0, %0, %3    \n"                            \
83             "   st"#l"xr   %w1, %0, [%2] \n"                            \
84             "   cbnz       %w1, 1b       \n"                            \
85             : "=&r"(tmp), "=&r"(res)                                    \
86             : "r" (p), "r" (val)                                        \
87             : "memory"                                                  \
88         );                                                              \
89 }
90
91 #define ATOMIC(op, asm_op)                                              \
92     ATOMIC_OP(op, asm_op,     ,  ,  )                                   \
93     ATOMIC_OP(op, asm_op, acq_, a,  )                                   \
94     ATOMIC_OP(op, asm_op, rel_,  , l)                                   \
95
96 ATOMIC(add,      add)
97 ATOMIC(clear,    bic)
98 ATOMIC(set,      orr)
99 ATOMIC(subtract, sub)
100
101 #define ATOMIC_FCMPSET(bar, a, l)                                       \
102 static __inline int                                                     \
103 atomic_fcmpset_##bar##32(volatile uint32_t *p, uint32_t *cmpval,        \
104     uint32_t newval)                                                    \
105 {                                                                       \
106         uint32_t tmp;                                                   \
107         uint32_t _cmpval = *cmpval;                                     \
108         int res;                                                        \
109                                                                         \
110         __asm __volatile(                                               \
111             "1: mov      %w1, #1        \n"                             \
112             "   ld"#a"xr %w0, [%2]      \n"                             \
113             "   cmp      %w0, %w3       \n"                             \
114             "   b.ne     2f             \n"                             \
115             "   st"#l"xr %w1, %w4, [%2] \n"                             \
116             "2:"                                                        \
117             : "=&r"(tmp), "=&r"(res)                                    \
118             : "r" (p), "r" (_cmpval), "r" (newval)                      \
119             : "cc", "memory"                                            \
120         );                                                              \
121         *cmpval = tmp;                                                  \
122                                                                         \
123         return (!res);                                                  \
124 }                                                                       \
125                                                                         \
126 static __inline int                                                     \
127 atomic_fcmpset_##bar##64(volatile uint64_t *p, uint64_t *cmpval,        \
128     uint64_t newval)                                                    \
129 {                                                                       \
130         uint64_t tmp;                                                   \
131         uint64_t _cmpval = *cmpval;                                     \
132         int res;                                                        \
133                                                                         \
134         __asm __volatile(                                               \
135             "1: mov      %w1, #1       \n"                              \
136             "   ld"#a"xr %0, [%2]      \n"                              \
137             "   cmp      %0, %3        \n"                              \
138             "   b.ne     2f            \n"                              \
139             "   st"#l"xr %w1, %4, [%2] \n"                              \
140             "2:"                                                        \
141             : "=&r"(tmp), "=&r"(res)                                    \
142             : "r" (p), "r" (_cmpval), "r" (newval)                      \
143             : "cc", "memory"                                            \
144         );                                                              \
145         *cmpval = tmp;                                                  \
146                                                                         \
147         return (!res);                                                  \
148 }
149
150 ATOMIC_FCMPSET(    ,  , )
151 ATOMIC_FCMPSET(acq_, a, )
152 ATOMIC_FCMPSET(rel_,  ,l)
153
154 #undef ATOMIC_FCMPSET
155
156 #define ATOMIC_CMPSET(bar, a, l)                                        \
157 static __inline int                                                     \
158 atomic_cmpset_##bar##32(volatile uint32_t *p, uint32_t cmpval,          \
159     uint32_t newval)                                                    \
160 {                                                                       \
161         uint32_t tmp;                                                   \
162         int res;                                                        \
163                                                                         \
164         __asm __volatile(                                               \
165             "1: mov      %w1, #1        \n"                             \
166             "   ld"#a"xr %w0, [%2]      \n"                             \
167             "   cmp      %w0, %w3       \n"                             \
168             "   b.ne     2f             \n"                             \
169             "   st"#l"xr %w1, %w4, [%2] \n"                             \
170             "   cbnz     %w1, 1b        \n"                             \
171             "2:"                                                        \
172             : "=&r"(tmp), "=&r"(res)                                    \
173             : "r" (p), "r" (cmpval), "r" (newval)                       \
174             : "cc", "memory"                                                    \
175         );                                                              \
176                                                                         \
177         return (!res);                                                  \
178 }                                                                       \
179                                                                         \
180 static __inline int                                                     \
181 atomic_cmpset_##bar##64(volatile uint64_t *p, uint64_t cmpval,          \
182     uint64_t newval)                                                    \
183 {                                                                       \
184         uint64_t tmp;                                                   \
185         int res;                                                        \
186                                                                         \
187         __asm __volatile(                                               \
188             "1: mov      %w1, #1       \n"                              \
189             "   ld"#a"xr %0, [%2]      \n"                              \
190             "   cmp      %0, %3        \n"                              \
191             "   b.ne     2f            \n"                              \
192             "   st"#l"xr %w1, %4, [%2] \n"                              \
193             "   cbnz     %w1, 1b       \n"                              \
194             "2:"                                                        \
195             : "=&r"(tmp), "=&r"(res)                                    \
196             : "r" (p), "r" (cmpval), "r" (newval)                       \
197             : "cc", "memory"                                                    \
198         );                                                              \
199                                                                         \
200         return (!res);                                                  \
201 }
202
203 ATOMIC_CMPSET(    ,  , )
204 ATOMIC_CMPSET(acq_, a, )
205 ATOMIC_CMPSET(rel_,  ,l)
206
207 static __inline uint32_t
208 atomic_fetchadd_32(volatile uint32_t *p, uint32_t val)
209 {
210         uint32_t tmp, ret;
211         int res;
212
213         __asm __volatile(
214             "1: ldxr    %w2, [%3]      \n"
215             "   add     %w0, %w2, %w4  \n"
216             "   stxr    %w1, %w0, [%3] \n"
217             "   cbnz    %w1, 1b        \n"
218             : "=&r"(tmp), "=&r"(res), "=&r"(ret)
219             : "r" (p), "r" (val)
220             : "memory"
221         );
222
223         return (ret);
224 }
225
226 static __inline uint64_t
227 atomic_fetchadd_64(volatile uint64_t *p, uint64_t val)
228 {
229         uint64_t tmp, ret;
230         int res;
231
232         __asm __volatile(
233             "1: ldxr    %2, [%3]      \n"
234             "   add     %0, %2, %4    \n"
235             "   stxr    %w1, %0, [%3] \n"
236             "   cbnz    %w1, 1b       \n"
237             : "=&r"(tmp), "=&r"(res), "=&r"(ret)
238             : "r" (p), "r" (val)
239             : "memory"
240         );
241
242         return (ret);
243 }
244
245 static __inline uint32_t
246 atomic_readandclear_32(volatile uint32_t *p)
247 {
248         uint32_t ret;
249         int res;
250
251         __asm __volatile(
252             "1: ldxr    %w1, [%2]      \n"
253             "   stxr    %w0, wzr, [%2] \n"
254             "   cbnz    %w0, 1b        \n"
255             : "=&r"(res), "=&r"(ret)
256             : "r" (p)
257             : "memory"
258         );
259
260         return (ret);
261 }
262
263 static __inline uint64_t
264 atomic_readandclear_64(volatile uint64_t *p)
265 {
266         uint64_t ret;
267         int res;
268
269         __asm __volatile(
270             "1: ldxr    %1, [%2]      \n"
271             "   stxr    %w0, xzr, [%2] \n"
272             "   cbnz    %w0, 1b        \n"
273             : "=&r"(res), "=&r"(ret)
274             : "r" (p)
275             : "memory"
276         );
277
278         return (ret);
279 }
280
281 static __inline uint32_t
282 atomic_swap_32(volatile uint32_t *p, uint32_t val)
283 {
284         uint32_t ret;
285         int res;
286
287         __asm __volatile(
288             "1: ldxr    %w0, [%2]      \n"
289             "   stxr    %w1, %w3, [%2] \n"
290             "   cbnz    %w1, 1b        \n"
291             : "=&r"(ret), "=&r"(res)
292             : "r" (p), "r" (val)
293             : "memory"
294         );
295
296         return (ret);
297 }
298
299 static __inline uint64_t
300 atomic_swap_64(volatile uint64_t *p, uint64_t val)
301 {
302         uint64_t ret;
303         int res;
304
305         __asm __volatile(
306             "1: ldxr    %0, [%2]      \n"
307             "   stxr    %w1, %3, [%2] \n"
308             "   cbnz    %w1, 1b       \n"
309             : "=&r"(ret), "=&r"(res)
310             : "r" (p), "r" (val)
311             : "memory"
312         );
313
314         return (ret);
315 }
316
317 static __inline uint32_t
318 atomic_load_acq_32(volatile uint32_t *p)
319 {
320         uint32_t ret;
321
322         __asm __volatile(
323             "ldar       %w0, [%1] \n"
324             : "=&r" (ret)
325             : "r" (p)
326             : "memory");
327
328         return (ret);
329 }
330
331 static __inline uint64_t
332 atomic_load_acq_64(volatile uint64_t *p)
333 {
334         uint64_t ret;
335
336         __asm __volatile(
337             "ldar       %0, [%1] \n"
338             : "=&r" (ret)
339             : "r" (p)
340             : "memory");
341
342         return (ret);
343 }
344
345 static __inline void
346 atomic_store_rel_32(volatile uint32_t *p, uint32_t val)
347 {
348
349         __asm __volatile(
350             "stlr       %w0, [%1] \n"
351             :
352             : "r" (val), "r" (p)
353             : "memory");
354 }
355
356 static __inline void
357 atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
358 {
359
360         __asm __volatile(
361             "stlr       %0, [%1] \n"
362             :
363             : "r" (val), "r" (p)
364             : "memory");
365 }
366
367
368 #define atomic_add_int                  atomic_add_32
369 #define atomic_fcmpset_int              atomic_fcmpset_32
370 #define atomic_clear_int                atomic_clear_32
371 #define atomic_cmpset_int               atomic_cmpset_32
372 #define atomic_fetchadd_int             atomic_fetchadd_32
373 #define atomic_readandclear_int         atomic_readandclear_32
374 #define atomic_set_int                  atomic_set_32
375 #define atomic_swap_int                 atomic_swap_32
376 #define atomic_subtract_int             atomic_subtract_32
377
378 #define atomic_add_acq_int              atomic_add_acq_32
379 #define atomic_fcmpset_acq_int          atomic_fcmpset_acq_32
380 #define atomic_clear_acq_int            atomic_clear_acq_32
381 #define atomic_cmpset_acq_int           atomic_cmpset_acq_32
382 #define atomic_load_acq_int             atomic_load_acq_32
383 #define atomic_set_acq_int              atomic_set_acq_32
384 #define atomic_subtract_acq_int         atomic_subtract_acq_32
385
386 #define atomic_add_rel_int              atomic_add_rel_32
387 #define atomic_fcmpset_rel_int          atomic_fcmpset_rel_32
388 #define atomic_clear_rel_int            atomic_add_rel_32
389 #define atomic_cmpset_rel_int           atomic_cmpset_rel_32
390 #define atomic_set_rel_int              atomic_set_rel_32
391 #define atomic_subtract_rel_int         atomic_subtract_rel_32
392 #define atomic_store_rel_int            atomic_store_rel_32
393
394 #define atomic_add_long                 atomic_add_64
395 #define atomic_fcmpset_long             atomic_fcmpset_64
396 #define atomic_clear_long               atomic_clear_64
397 #define atomic_cmpset_long              atomic_cmpset_64
398 #define atomic_fetchadd_long            atomic_fetchadd_64
399 #define atomic_readandclear_long        atomic_readandclear_64
400 #define atomic_set_long                 atomic_set_64
401 #define atomic_swap_long                atomic_swap_64
402 #define atomic_subtract_long            atomic_subtract_64
403
404 #define atomic_add_ptr                  atomic_add_64
405 #define atomic_fcmpset_ptr              atomic_fcmpset_64
406 #define atomic_clear_ptr                atomic_clear_64
407 #define atomic_cmpset_ptr               atomic_cmpset_64
408 #define atomic_fetchadd_ptr             atomic_fetchadd_64
409 #define atomic_readandclear_ptr         atomic_readandclear_64
410 #define atomic_set_ptr                  atomic_set_64
411 #define atomic_swap_ptr                 atomic_swap_64
412 #define atomic_subtract_ptr             atomic_subtract_64
413
414 #define atomic_add_acq_long             atomic_add_acq_64
415 #define atomic_fcmpset_acq_long         atomic_fcmpset_acq_64
416 #define atomic_clear_acq_long           atomic_add_acq_64
417 #define atomic_cmpset_acq_long          atomic_cmpset_acq_64
418 #define atomic_load_acq_long            atomic_load_acq_64
419 #define atomic_set_acq_long             atomic_set_acq_64
420 #define atomic_subtract_acq_long        atomic_subtract_acq_64
421
422 #define atomic_add_acq_ptr              atomic_add_acq_64
423 #define atomic_fcmpset_acq_ptr          atomic_fcmpset_acq_64
424 #define atomic_clear_acq_ptr            atomic_add_acq_64
425 #define atomic_cmpset_acq_ptr           atomic_cmpset_acq_64
426 #define atomic_load_acq_ptr             atomic_load_acq_64
427 #define atomic_set_acq_ptr              atomic_set_acq_64
428 #define atomic_subtract_acq_ptr         atomic_subtract_acq_64
429
430 #define atomic_add_rel_long             atomic_add_rel_64
431 #define atomic_fcmpset_rel_long         atomic_fcmpset_rel_64
432 #define atomic_clear_rel_long           atomic_clear_rel_64
433 #define atomic_cmpset_rel_long          atomic_cmpset_rel_64
434 #define atomic_set_rel_long             atomic_set_rel_64
435 #define atomic_subtract_rel_long        atomic_subtract_rel_64
436 #define atomic_store_rel_long           atomic_store_rel_64
437
438 #define atomic_add_rel_ptr              atomic_add_rel_64
439 #define atomic_fcmpset_rel_ptr          atomic_fcmpset_rel_64
440 #define atomic_clear_rel_ptr            atomic_clear_rel_64
441 #define atomic_cmpset_rel_ptr           atomic_cmpset_rel_64
442 #define atomic_set_rel_ptr              atomic_set_rel_64
443 #define atomic_subtract_rel_ptr         atomic_subtract_rel_64
444 #define atomic_store_rel_ptr            atomic_store_rel_64
445
446 static __inline void
447 atomic_thread_fence_acq(void)
448 {
449
450         dmb(ld);
451 }
452
453 static __inline void
454 atomic_thread_fence_rel(void)
455 {
456
457         dmb(sy);
458 }
459
460 static __inline void
461 atomic_thread_fence_acq_rel(void)
462 {
463
464         dmb(sy);
465 }
466
467 static __inline void
468 atomic_thread_fence_seq_cst(void)
469 {
470
471         dmb(sy);
472 }
473
474 #endif /* _MACHINE_ATOMIC_H_ */
475