]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/sys/refcount.h
MFV r353143 (phillip):
[FreeBSD/FreeBSD.git] / sys / sys / refcount.h
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29
30 #ifndef __SYS_REFCOUNT_H__
31 #define __SYS_REFCOUNT_H__
32
33 #include <machine/atomic.h>
34
35 #ifdef _KERNEL
36 #include <sys/systm.h>
37 #else
38 #include <stdbool.h>
39 #define KASSERT(exp, msg)       /* */
40 #endif
41
42 #define REFCOUNT_WAITER                 (1U << 31) /* Refcount has waiter. */
43 #define REFCOUNT_SATURATION_VALUE       (3U << 29)
44
45 #define REFCOUNT_SATURATED(val)         (((val) & (1U << 30)) != 0)
46 #define REFCOUNT_COUNT(x)               ((x) & ~REFCOUNT_WAITER)
47
48 bool refcount_release_last(volatile u_int *count, u_int n, u_int old);
49 void refcount_sleep(volatile u_int *count, const char *wmesg, int prio);
50
51 /*
52  * Attempt to handle reference count overflow and underflow.  Force the counter
53  * to stay at the saturation value so that a counter overflow cannot trigger
54  * destruction of the containing object and instead leads to a less harmful
55  * memory leak.
56  */
57 static __inline void
58 _refcount_update_saturated(volatile u_int *count)
59 {
60 #ifdef INVARIANTS
61         panic("refcount %p wraparound", count);
62 #else
63         atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
64 #endif
65 }
66
67 static __inline void
68 refcount_init(volatile u_int *count, u_int value)
69 {
70         KASSERT(!REFCOUNT_SATURATED(value),
71             ("invalid initial refcount value %u", value));
72         *count = value;
73 }
74
75 static __inline u_int
76 refcount_acquire(volatile u_int *count)
77 {
78         u_int old;
79
80         old = atomic_fetchadd_int(count, 1);
81         if (__predict_false(REFCOUNT_SATURATED(old)))
82                 _refcount_update_saturated(count);
83
84         return (old);
85 }
86
87 static __inline u_int
88 refcount_acquiren(volatile u_int *count, u_int n)
89 {
90         u_int old;
91
92         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
93             ("refcount_acquiren: n=%u too large", n));
94         old = atomic_fetchadd_int(count, n);
95         if (__predict_false(REFCOUNT_SATURATED(old)))
96                 _refcount_update_saturated(count);
97
98         return (old);
99 }
100
101 static __inline __result_use_check bool
102 refcount_acquire_checked(volatile u_int *count)
103 {
104         u_int lcount;
105
106         for (lcount = *count;;) {
107                 if (__predict_false(REFCOUNT_SATURATED(lcount + 1)))
108                         return (false);
109                 if (__predict_true(atomic_fcmpset_int(count, &lcount,
110                     lcount + 1) == 1))
111                         return (true);
112         }
113 }
114
115 static __inline bool
116 refcount_releasen(volatile u_int *count, u_int n)
117 {
118         u_int old;
119
120         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
121             ("refcount_releasen: n=%u too large", n));
122
123         atomic_thread_fence_rel();
124         old = atomic_fetchadd_int(count, -n);
125         if (__predict_false(n >= REFCOUNT_COUNT(old) ||
126             REFCOUNT_SATURATED(old)))
127                 return (refcount_release_last(count, n, old));
128         return (false);
129 }
130
131 static __inline bool
132 refcount_release(volatile u_int *count)
133 {
134
135         return (refcount_releasen(count, 1));
136 }
137
138 static __inline void
139 refcount_wait(volatile u_int *count, const char *wmesg, int prio)
140 {
141
142         while (*count != 0)
143                 refcount_sleep(count, wmesg, prio);
144 }
145
146 /*
147  * This functions returns non-zero if the refcount was
148  * incremented. Else zero is returned.
149  */
150 static __inline __result_use_check bool
151 refcount_acquire_if_gt(volatile u_int *count, u_int n)
152 {
153         u_int old;
154
155         old = *count;
156         for (;;) {
157                 if (REFCOUNT_COUNT(old) <= n)
158                         return (false);
159                 if (__predict_false(REFCOUNT_SATURATED(old)))
160                         return (true);
161                 if (atomic_fcmpset_int(count, &old, old + 1))
162                         return (true);
163         }
164 }
165
166 static __inline __result_use_check bool
167 refcount_acquire_if_not_zero(volatile u_int *count)
168 {
169
170         return refcount_acquire_if_gt(count, 0);
171 }
172
173 static __inline __result_use_check bool
174 refcount_release_if_gt(volatile u_int *count, u_int n)
175 {
176         u_int old;
177
178         KASSERT(n > 0,
179             ("refcount_release_if_gt: Use refcount_release for final ref"));
180         old = *count;
181         for (;;) {
182                 if (REFCOUNT_COUNT(old) <= n)
183                         return (false);
184                 if (__predict_false(REFCOUNT_SATURATED(old)))
185                         return (true);
186                 if (atomic_fcmpset_int(count, &old, old - 1))
187                         return (true);
188         }
189 }
190
191 static __inline __result_use_check bool
192 refcount_release_if_not_last(volatile u_int *count)
193 {
194
195         return refcount_release_if_gt(count, 1);
196 }
197 #endif  /* ! __SYS_REFCOUNT_H__ */