]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_atomic64.c
amd64: use register macros for gdb_cpu_getreg()
[FreeBSD/FreeBSD.git] / sys / kern / subr_atomic64.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2019 Justin Hibbits
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/mutex.h>
35 #include <sys/smp.h>
36 #include <sys/systm.h>
37
38 #include <machine/atomic.h>
39 #include <machine/param.h>
40
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43
44 enum {
45     ATOMIC64_ADD,
46     ATOMIC64_CLEAR,
47     ATOMIC64_CMPSET,
48     ATOMIC64_FCMPSET,
49     ATOMIC64_FETCHADD,
50     ATOMIC64_LOAD,
51     ATOMIC64_SET,
52     ATOMIC64_SUBTRACT,
53     ATOMIC64_STORE,
54     ATOMIC64_SWAP
55 };
56
57 #ifdef _KERNEL
58 #ifdef SMP
59
60 #define A64_POOL_SIZE   MAXCPU
61 /* Estimated size of a cacheline */
62 #define CACHE_ALIGN     CACHE_LINE_SIZE
63 static struct mtx a64_mtx_pool[A64_POOL_SIZE];
64
65 #define GET_MUTEX(p) \
66     (&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)])
67
68 #define LOCK_A64()                      \
69     struct mtx *_amtx = GET_MUTEX(p);   \
70     if (smp_started) mtx_lock(_amtx)
71
72 #define UNLOCK_A64()    if (smp_started) mtx_unlock(_amtx)
73
74 #else   /* !SMP */
75
76 #define LOCK_A64()      { register_t s = intr_disable()
77 #define UNLOCK_A64()    intr_restore(s); }
78
79 #endif  /* SMP */
80
81 #define ATOMIC64_EMU_UN(op, rt, block, ret) \
82     rt \
83     atomic_##op##_64(volatile u_int64_t *p) {                   \
84         u_int64_t tmp __unused;                                 \
85         LOCK_A64();                                             \
86         block;                                                  \
87         UNLOCK_A64();                                           \
88         ret; } struct hack
89
90 #define ATOMIC64_EMU_BIN(op, rt, block, ret) \
91     rt \
92     atomic_##op##_64(volatile u_int64_t *p, u_int64_t v) {      \
93         u_int64_t tmp __unused;                                 \
94         LOCK_A64();                                             \
95         block;                                                  \
96         UNLOCK_A64();                                           \
97         ret; } struct hack
98
99 ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return);
100 ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return);
101 ATOMIC64_EMU_BIN(fetchadd, u_int64_t, (*p = *p + v, v = *p - v), return (v));
102 ATOMIC64_EMU_UN(load, u_int64_t, (tmp = *p), return (tmp));
103 ATOMIC64_EMU_BIN(set, void, *p |= v, return);
104 ATOMIC64_EMU_BIN(subtract, void, (*p = *p - v), return);
105 ATOMIC64_EMU_BIN(store, void, *p = v, return);
106 ATOMIC64_EMU_BIN(swap, u_int64_t, tmp = *p; *p = v; v = tmp, return(v));
107
108 int atomic_cmpset_64(volatile u_int64_t *p, u_int64_t old, u_int64_t new)
109 {
110         u_int64_t tmp;
111
112         LOCK_A64();
113         tmp = *p;
114         if (tmp == old)
115                 *p = new;
116         UNLOCK_A64();
117
118         return (tmp == old);
119 }
120
121 int atomic_fcmpset_64(volatile u_int64_t *p, u_int64_t *old, u_int64_t new)
122 {
123         u_int64_t tmp, tmp_old;
124
125         LOCK_A64();
126         tmp = *p;
127         tmp_old = *old;
128         if (tmp == tmp_old)
129                 *p = new;
130         else
131                 *old = tmp;
132         UNLOCK_A64();
133
134         return (tmp == tmp_old);
135 }
136
137 #ifdef SMP
138 static void
139 atomic64_mtxinit(void *x __unused)
140 {
141         int i;
142
143         for (i = 0; i < A64_POOL_SIZE; i++)
144                 mtx_init(&a64_mtx_pool[i], "atomic64 mutex", NULL, MTX_DEF);
145 }
146
147 SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL);
148 #endif  /* SMP */
149
150 #endif  /* _KERNEL */