]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/sys/refcount.h
refcount: update comments about fencing when releasing counts after r357989
[FreeBSD/FreeBSD.git] / sys / sys / refcount.h
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29
30 #ifndef __SYS_REFCOUNT_H__
31 #define __SYS_REFCOUNT_H__
32
33 #include <machine/atomic.h>
34
35 #ifdef _KERNEL
36 #include <sys/systm.h>
37 #else
38 #include <stdbool.h>
39 #define KASSERT(exp, msg)       /* */
40 #endif
41
42 #define REFCOUNT_WAITER                 (1U << 31) /* Refcount has waiter. */
43 #define REFCOUNT_SATURATION_VALUE       (3U << 29)
44
45 #define REFCOUNT_SATURATED(val)         (((val) & (1U << 30)) != 0)
46 #define REFCOUNT_COUNT(x)               ((x) & ~REFCOUNT_WAITER)
47
48 bool refcount_release_last(volatile u_int *count, u_int n, u_int old);
49
50 /*
51  * Attempt to handle reference count overflow and underflow.  Force the counter
52  * to stay at the saturation value so that a counter overflow cannot trigger
53  * destruction of the containing object and instead leads to a less harmful
54  * memory leak.
55  */
56 static __inline void
57 _refcount_update_saturated(volatile u_int *count)
58 {
59 #ifdef INVARIANTS
60         panic("refcount %p wraparound", count);
61 #else
62         atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
63 #endif
64 }
65
66 static __inline void
67 refcount_init(volatile u_int *count, u_int value)
68 {
69         KASSERT(!REFCOUNT_SATURATED(value),
70             ("invalid initial refcount value %u", value));
71         *count = value;
72 }
73
74 static __inline u_int
75 refcount_acquire(volatile u_int *count)
76 {
77         u_int old;
78
79         old = atomic_fetchadd_int(count, 1);
80         if (__predict_false(REFCOUNT_SATURATED(old)))
81                 _refcount_update_saturated(count);
82
83         return (old);
84 }
85
86 static __inline u_int
87 refcount_acquiren(volatile u_int *count, u_int n)
88 {
89         u_int old;
90
91         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
92             ("refcount_acquiren: n=%u too large", n));
93         old = atomic_fetchadd_int(count, n);
94         if (__predict_false(REFCOUNT_SATURATED(old)))
95                 _refcount_update_saturated(count);
96
97         return (old);
98 }
99
100 static __inline __result_use_check bool
101 refcount_acquire_checked(volatile u_int *count)
102 {
103         u_int lcount;
104
105         for (lcount = *count;;) {
106                 if (__predict_false(REFCOUNT_SATURATED(lcount + 1)))
107                         return (false);
108                 if (__predict_true(atomic_fcmpset_int(count, &lcount,
109                     lcount + 1) == 1))
110                         return (true);
111         }
112 }
113
114 static __inline bool
115 refcount_releasen(volatile u_int *count, u_int n)
116 {
117         u_int old;
118
119         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
120             ("refcount_releasen: n=%u too large", n));
121
122         /*
123          * Paired with acquire fence in refcount_release_last.
124          */
125         atomic_thread_fence_rel();
126         old = atomic_fetchadd_int(count, -n);
127         if (__predict_false(n >= REFCOUNT_COUNT(old) ||
128             REFCOUNT_SATURATED(old)))
129                 return (refcount_release_last(count, n, old));
130         return (false);
131 }
132
133 static __inline bool
134 refcount_release(volatile u_int *count)
135 {
136
137         return (refcount_releasen(count, 1));
138 }
139
140 #ifdef _KERNEL
141 struct lock_object;
142 void _refcount_sleep(volatile u_int *count, struct lock_object *,
143     const char *wmesg, int prio);
144
145 static __inline void
146 refcount_sleep(volatile u_int *count, const char *wmesg, int prio)
147 {
148
149         _refcount_sleep(count, NULL, wmesg, prio);
150 }
151
152 #define refcount_sleep_interlock(count, lock, wmesg, prio)              \
153         _refcount_sleep((count), (struct lock_object *)(lock), (wmesg), (prio))
154
155 static __inline void
156 refcount_wait(volatile u_int *count, const char *wmesg, int prio)
157 {
158
159         while (*count != 0)
160                 refcount_sleep(count, wmesg, prio);
161 }
162 #endif
163
164 /*
165  * This functions returns non-zero if the refcount was
166  * incremented. Else zero is returned.
167  */
168 static __inline __result_use_check bool
169 refcount_acquire_if_gt(volatile u_int *count, u_int n)
170 {
171         u_int old;
172
173         old = *count;
174         for (;;) {
175                 if (REFCOUNT_COUNT(old) <= n)
176                         return (false);
177                 if (__predict_false(REFCOUNT_SATURATED(old)))
178                         return (true);
179                 if (atomic_fcmpset_int(count, &old, old + 1))
180                         return (true);
181         }
182 }
183
184 static __inline __result_use_check bool
185 refcount_acquire_if_not_zero(volatile u_int *count)
186 {
187
188         return refcount_acquire_if_gt(count, 0);
189 }
190
191 static __inline __result_use_check bool
192 refcount_release_if_gt(volatile u_int *count, u_int n)
193 {
194         u_int old;
195
196         KASSERT(n > 0,
197             ("refcount_release_if_gt: Use refcount_release for final ref"));
198         old = *count;
199         for (;;) {
200                 if (REFCOUNT_COUNT(old) <= n)
201                         return (false);
202                 if (__predict_false(REFCOUNT_SATURATED(old)))
203                         return (true);
204                 /*
205                  * Paired with acquire fence in refcount_release_last.
206                  */
207                 if (atomic_fcmpset_rel_int(count, &old, old - 1))
208                         return (true);
209         }
210 }
211
212 static __inline __result_use_check bool
213 refcount_release_if_not_last(volatile u_int *count)
214 {
215
216         return refcount_release_if_gt(count, 1);
217 }
218 #endif  /* ! __SYS_REFCOUNT_H__ */