]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/sys/refcount.h
Merge ^/vendor/llvm-project/master until just before r356843.
[FreeBSD/FreeBSD.git] / sys / sys / refcount.h
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29
30 #ifndef __SYS_REFCOUNT_H__
31 #define __SYS_REFCOUNT_H__
32
33 #include <machine/atomic.h>
34
35 #ifdef _KERNEL
36 #include <sys/systm.h>
37 #else
38 #include <stdbool.h>
39 #define KASSERT(exp, msg)       /* */
40 #endif
41
42 #define REFCOUNT_WAITER                 (1U << 31) /* Refcount has waiter. */
43 #define REFCOUNT_SATURATION_VALUE       (3U << 29)
44
45 #define REFCOUNT_SATURATED(val)         (((val) & (1U << 30)) != 0)
46 #define REFCOUNT_COUNT(x)               ((x) & ~REFCOUNT_WAITER)
47
48 bool refcount_release_last(volatile u_int *count, u_int n, u_int old);
49
50 /*
51  * Attempt to handle reference count overflow and underflow.  Force the counter
52  * to stay at the saturation value so that a counter overflow cannot trigger
53  * destruction of the containing object and instead leads to a less harmful
54  * memory leak.
55  */
56 static __inline void
57 _refcount_update_saturated(volatile u_int *count)
58 {
59 #ifdef INVARIANTS
60         panic("refcount %p wraparound", count);
61 #else
62         atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
63 #endif
64 }
65
66 static __inline void
67 refcount_init(volatile u_int *count, u_int value)
68 {
69         KASSERT(!REFCOUNT_SATURATED(value),
70             ("invalid initial refcount value %u", value));
71         *count = value;
72 }
73
74 static __inline u_int
75 refcount_acquire(volatile u_int *count)
76 {
77         u_int old;
78
79         old = atomic_fetchadd_int(count, 1);
80         if (__predict_false(REFCOUNT_SATURATED(old)))
81                 _refcount_update_saturated(count);
82
83         return (old);
84 }
85
86 static __inline u_int
87 refcount_acquiren(volatile u_int *count, u_int n)
88 {
89         u_int old;
90
91         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
92             ("refcount_acquiren: n=%u too large", n));
93         old = atomic_fetchadd_int(count, n);
94         if (__predict_false(REFCOUNT_SATURATED(old)))
95                 _refcount_update_saturated(count);
96
97         return (old);
98 }
99
100 static __inline __result_use_check bool
101 refcount_acquire_checked(volatile u_int *count)
102 {
103         u_int lcount;
104
105         for (lcount = *count;;) {
106                 if (__predict_false(REFCOUNT_SATURATED(lcount + 1)))
107                         return (false);
108                 if (__predict_true(atomic_fcmpset_int(count, &lcount,
109                     lcount + 1) == 1))
110                         return (true);
111         }
112 }
113
114 static __inline bool
115 refcount_releasen(volatile u_int *count, u_int n)
116 {
117         u_int old;
118
119         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
120             ("refcount_releasen: n=%u too large", n));
121
122         atomic_thread_fence_rel();
123         old = atomic_fetchadd_int(count, -n);
124         if (__predict_false(n >= REFCOUNT_COUNT(old) ||
125             REFCOUNT_SATURATED(old)))
126                 return (refcount_release_last(count, n, old));
127         return (false);
128 }
129
130 static __inline bool
131 refcount_release(volatile u_int *count)
132 {
133
134         return (refcount_releasen(count, 1));
135 }
136
137 #ifdef _KERNEL
138 struct lock_object;
139 void _refcount_sleep(volatile u_int *count, struct lock_object *,
140     const char *wmesg, int prio);
141
142 static __inline void
143 refcount_sleep(volatile u_int *count, const char *wmesg, int prio)
144 {
145
146         _refcount_sleep(count, NULL, wmesg, prio);
147 }
148
149 #define refcount_sleep_interlock(count, lock, wmesg, prio)              \
150         _refcount_sleep((count), (struct lock_object *)(lock), (wmesg), (prio))
151
152 static __inline void
153 refcount_wait(volatile u_int *count, const char *wmesg, int prio)
154 {
155
156         while (*count != 0)
157                 refcount_sleep(count, wmesg, prio);
158 }
159 #endif
160
161 /*
162  * This functions returns non-zero if the refcount was
163  * incremented. Else zero is returned.
164  */
165 static __inline __result_use_check bool
166 refcount_acquire_if_gt(volatile u_int *count, u_int n)
167 {
168         u_int old;
169
170         old = *count;
171         for (;;) {
172                 if (REFCOUNT_COUNT(old) <= n)
173                         return (false);
174                 if (__predict_false(REFCOUNT_SATURATED(old)))
175                         return (true);
176                 if (atomic_fcmpset_int(count, &old, old + 1))
177                         return (true);
178         }
179 }
180
181 static __inline __result_use_check bool
182 refcount_acquire_if_not_zero(volatile u_int *count)
183 {
184
185         return refcount_acquire_if_gt(count, 0);
186 }
187
188 static __inline __result_use_check bool
189 refcount_release_if_gt(volatile u_int *count, u_int n)
190 {
191         u_int old;
192
193         KASSERT(n > 0,
194             ("refcount_release_if_gt: Use refcount_release for final ref"));
195         old = *count;
196         for (;;) {
197                 if (REFCOUNT_COUNT(old) <= n)
198                         return (false);
199                 if (__predict_false(REFCOUNT_SATURATED(old)))
200                         return (true);
201                 if (atomic_fcmpset_int(count, &old, old - 1))
202                         return (true);
203         }
204 }
205
206 static __inline __result_use_check bool
207 refcount_release_if_not_last(volatile u_int *count)
208 {
209
210         return refcount_release_if_gt(count, 1);
211 }
212 #endif  /* ! __SYS_REFCOUNT_H__ */