]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/sys/refcount.h
ident(1): Normalizing date format
[FreeBSD/FreeBSD.git] / sys / sys / refcount.h
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29
30 #ifndef __SYS_REFCOUNT_H__
31 #define __SYS_REFCOUNT_H__
32
33 #include <machine/atomic.h>
34
35 #if defined(_KERNEL) || defined(_STANDALONE)
36 #include <sys/systm.h>
37 #else
38 #include <stdbool.h>
39 #define KASSERT(exp, msg)       /* */
40 #endif
41
42 #define REFCOUNT_SATURATED(val)         (((val) & (1U << 31)) != 0)
43 #define REFCOUNT_SATURATION_VALUE       (3U << 30)
44
45 /*
46  * Attempt to handle reference count overflow and underflow.  Force the counter
47  * to stay at the saturation value so that a counter overflow cannot trigger
48  * destruction of the containing object and instead leads to a less harmful
49  * memory leak.
50  */
51 static __inline void
52 _refcount_update_saturated(volatile u_int *count)
53 {
54 #ifdef INVARIANTS
55         panic("refcount %p wraparound", count);
56 #else
57         atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
58 #endif
59 }
60
61 static __inline void
62 refcount_init(volatile u_int *count, u_int value)
63 {
64         KASSERT(!REFCOUNT_SATURATED(value),
65             ("invalid initial refcount value %u", value));
66         atomic_store_int(count, value);
67 }
68
69 static __inline u_int
70 refcount_load(volatile u_int *count)
71 {
72         return (atomic_load_int(count));
73 }
74
75 static __inline u_int
76 refcount_acquire(volatile u_int *count)
77 {
78         u_int old;
79
80         old = atomic_fetchadd_int(count, 1);
81         if (__predict_false(REFCOUNT_SATURATED(old)))
82                 _refcount_update_saturated(count);
83
84         return (old);
85 }
86
87 static __inline u_int
88 refcount_acquiren(volatile u_int *count, u_int n)
89 {
90         u_int old;
91
92         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
93             ("refcount_acquiren: n=%u too large", n));
94         old = atomic_fetchadd_int(count, n);
95         if (__predict_false(REFCOUNT_SATURATED(old)))
96                 _refcount_update_saturated(count);
97
98         return (old);
99 }
100
101 static __inline __result_use_check bool
102 refcount_acquire_checked(volatile u_int *count)
103 {
104         u_int old;
105
106         old = atomic_load_int(count);
107         for (;;) {
108                 if (__predict_false(REFCOUNT_SATURATED(old + 1)))
109                         return (false);
110                 if (__predict_true(atomic_fcmpset_int(count, &old,
111                     old + 1) == 1))
112                         return (true);
113         }
114 }
115
116 /*
117  * This functions returns non-zero if the refcount was
118  * incremented. Else zero is returned.
119  */
120 static __inline __result_use_check bool
121 refcount_acquire_if_gt(volatile u_int *count, u_int n)
122 {
123         u_int old;
124
125         old = atomic_load_int(count);
126         for (;;) {
127                 if (old <= n)
128                         return (false);
129                 if (__predict_false(REFCOUNT_SATURATED(old)))
130                         return (true);
131                 if (atomic_fcmpset_int(count, &old, old + 1))
132                         return (true);
133         }
134 }
135
136 static __inline __result_use_check bool
137 refcount_acquire_if_not_zero(volatile u_int *count)
138 {
139
140         return (refcount_acquire_if_gt(count, 0));
141 }
142
143 static __inline bool
144 refcount_releasen(volatile u_int *count, u_int n)
145 {
146         u_int old;
147
148         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
149             ("refcount_releasen: n=%u too large", n));
150
151         atomic_thread_fence_rel();
152         old = atomic_fetchadd_int(count, -n);
153         if (__predict_false(old < n || REFCOUNT_SATURATED(old))) {
154                 _refcount_update_saturated(count);
155                 return (false);
156         }
157         if (old > n)
158                 return (false);
159
160         /*
161          * Last reference.  Signal the user to call the destructor.
162          *
163          * Ensure that the destructor sees all updates. This synchronizes with
164          * release fences from all routines which drop the count.
165          */
166         atomic_thread_fence_acq();
167         return (true);
168 }
169
170 static __inline bool
171 refcount_release(volatile u_int *count)
172 {
173
174         return (refcount_releasen(count, 1));
175 }
176
177 #define _refcount_release_if_cond(cond, name)                           \
178 static __inline __result_use_check bool                                 \
179 _refcount_release_if_##name(volatile u_int *count, u_int n)             \
180 {                                                                       \
181         u_int old;                                                      \
182                                                                         \
183         KASSERT(n > 0, ("%s: zero increment", __func__));               \
184         old = atomic_load_int(count);                                   \
185         for (;;) {                                                      \
186                 if (!(cond))                                            \
187                         return (false);                                 \
188                 if (__predict_false(REFCOUNT_SATURATED(old)))           \
189                         return (false);                                 \
190                 if (atomic_fcmpset_rel_int(count, &old, old - 1))       \
191                         return (true);                                  \
192         }                                                               \
193 }
194 _refcount_release_if_cond(old > n, gt)
195 _refcount_release_if_cond(old == n, eq)
196
197 static __inline __result_use_check bool
198 refcount_release_if_gt(volatile u_int *count, u_int n)
199 {
200
201         return (_refcount_release_if_gt(count, n));
202 }
203
204 static __inline __result_use_check bool
205 refcount_release_if_last(volatile u_int *count)
206 {
207
208         if (_refcount_release_if_eq(count, 1)) {
209                 /* See the comment in refcount_releasen(). */
210                 atomic_thread_fence_acq();
211                 return (true);
212         }
213         return (false);
214 }
215
216 static __inline __result_use_check bool
217 refcount_release_if_not_last(volatile u_int *count)
218 {
219
220         return (_refcount_release_if_gt(count, 1));
221 }
222
223 #endif /* !__SYS_REFCOUNT_H__ */