]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/sys/refcount.h
zfs: merge openzfs/zfs@043c6ee3b
[FreeBSD/FreeBSD.git] / sys / sys / refcount.h
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #ifndef __SYS_REFCOUNT_H__
29 #define __SYS_REFCOUNT_H__
30
31 #include <machine/atomic.h>
32
33 #if defined(_KERNEL) || defined(_STANDALONE)
34 #include <sys/systm.h>
35 #else
36 #include <stdbool.h>
37 #define KASSERT(exp, msg)       /* */
38 #endif
39
40 #define REFCOUNT_SATURATED(val)         (((val) & (1U << 31)) != 0)
41 #define REFCOUNT_SATURATION_VALUE       (3U << 30)
42
43 /*
44  * Attempt to handle reference count overflow and underflow.  Force the counter
45  * to stay at the saturation value so that a counter overflow cannot trigger
46  * destruction of the containing object and instead leads to a less harmful
47  * memory leak.
48  */
49 static __inline void
50 _refcount_update_saturated(volatile u_int *count)
51 {
52 #ifdef INVARIANTS
53         panic("refcount %p wraparound", count);
54 #else
55         atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
56 #endif
57 }
58
59 static __inline void
60 refcount_init(volatile u_int *count, u_int value)
61 {
62         KASSERT(!REFCOUNT_SATURATED(value),
63             ("invalid initial refcount value %u", value));
64         atomic_store_int(count, value);
65 }
66
67 static __inline u_int
68 refcount_load(volatile u_int *count)
69 {
70         return (atomic_load_int(count));
71 }
72
73 static __inline u_int
74 refcount_acquire(volatile u_int *count)
75 {
76         u_int old;
77
78         old = atomic_fetchadd_int(count, 1);
79         if (__predict_false(REFCOUNT_SATURATED(old)))
80                 _refcount_update_saturated(count);
81
82         return (old);
83 }
84
85 static __inline u_int
86 refcount_acquiren(volatile u_int *count, u_int n)
87 {
88         u_int old;
89
90         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
91             ("refcount_acquiren: n=%u too large", n));
92         old = atomic_fetchadd_int(count, n);
93         if (__predict_false(REFCOUNT_SATURATED(old)))
94                 _refcount_update_saturated(count);
95
96         return (old);
97 }
98
99 static __inline __result_use_check bool
100 refcount_acquire_checked(volatile u_int *count)
101 {
102         u_int old;
103
104         old = atomic_load_int(count);
105         for (;;) {
106                 if (__predict_false(REFCOUNT_SATURATED(old + 1)))
107                         return (false);
108                 if (__predict_true(atomic_fcmpset_int(count, &old,
109                     old + 1) == 1))
110                         return (true);
111         }
112 }
113
114 /*
115  * This functions returns non-zero if the refcount was
116  * incremented. Else zero is returned.
117  */
118 static __inline __result_use_check bool
119 refcount_acquire_if_gt(volatile u_int *count, u_int n)
120 {
121         u_int old;
122
123         old = atomic_load_int(count);
124         for (;;) {
125                 if (old <= n)
126                         return (false);
127                 if (__predict_false(REFCOUNT_SATURATED(old)))
128                         return (true);
129                 if (atomic_fcmpset_int(count, &old, old + 1))
130                         return (true);
131         }
132 }
133
134 static __inline __result_use_check bool
135 refcount_acquire_if_not_zero(volatile u_int *count)
136 {
137
138         return (refcount_acquire_if_gt(count, 0));
139 }
140
141 static __inline bool
142 refcount_releasen(volatile u_int *count, u_int n)
143 {
144         u_int old;
145
146         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
147             ("refcount_releasen: n=%u too large", n));
148
149         atomic_thread_fence_rel();
150         old = atomic_fetchadd_int(count, -n);
151         if (__predict_false(old < n || REFCOUNT_SATURATED(old))) {
152                 _refcount_update_saturated(count);
153                 return (false);
154         }
155         if (old > n)
156                 return (false);
157
158         /*
159          * Last reference.  Signal the user to call the destructor.
160          *
161          * Ensure that the destructor sees all updates. This synchronizes with
162          * release fences from all routines which drop the count.
163          */
164         atomic_thread_fence_acq();
165         return (true);
166 }
167
168 static __inline bool
169 refcount_release(volatile u_int *count)
170 {
171
172         return (refcount_releasen(count, 1));
173 }
174
175 #define _refcount_release_if_cond(cond, name)                           \
176 static __inline __result_use_check bool                                 \
177 _refcount_release_if_##name(volatile u_int *count, u_int n)             \
178 {                                                                       \
179         u_int old;                                                      \
180                                                                         \
181         KASSERT(n > 0, ("%s: zero increment", __func__));               \
182         old = atomic_load_int(count);                                   \
183         for (;;) {                                                      \
184                 if (!(cond))                                            \
185                         return (false);                                 \
186                 if (__predict_false(REFCOUNT_SATURATED(old)))           \
187                         return (false);                                 \
188                 if (atomic_fcmpset_rel_int(count, &old, old - 1))       \
189                         return (true);                                  \
190         }                                                               \
191 }
192 _refcount_release_if_cond(old > n, gt)
193 _refcount_release_if_cond(old == n, eq)
194
195 static __inline __result_use_check bool
196 refcount_release_if_gt(volatile u_int *count, u_int n)
197 {
198
199         return (_refcount_release_if_gt(count, n));
200 }
201
202 static __inline __result_use_check bool
203 refcount_release_if_last(volatile u_int *count)
204 {
205
206         if (_refcount_release_if_eq(count, 1)) {
207                 /* See the comment in refcount_releasen(). */
208                 atomic_thread_fence_acq();
209                 return (true);
210         }
211         return (false);
212 }
213
214 static __inline __result_use_check bool
215 refcount_release_if_not_last(volatile u_int *count)
216 {
217
218         return (_refcount_release_if_gt(count, 1));
219 }
220
221 #endif /* !__SYS_REFCOUNT_H__ */