]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/sys/refcount.h
Update to Zstandard 1.4.4
[FreeBSD/FreeBSD.git] / sys / sys / refcount.h
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29
30 #ifndef __SYS_REFCOUNT_H__
31 #define __SYS_REFCOUNT_H__
32
33 #include <machine/atomic.h>
34
35 #ifdef _KERNEL
36 #include <sys/systm.h>
37 #else
38 #include <stdbool.h>
39 #define KASSERT(exp, msg)       /* */
40 #endif
41
42 #define REFCOUNT_WAITER                 (1U << 31) /* Refcount has waiter. */
43 #define REFCOUNT_SATURATION_VALUE       (3U << 29)
44
45 #define REFCOUNT_SATURATED(val)         (((val) & (1U << 30)) != 0)
46 #define REFCOUNT_COUNT(x)               ((x) & ~REFCOUNT_WAITER)
47
48 bool refcount_release_last(volatile u_int *count, u_int n, u_int old);
49 void refcount_sleep(volatile u_int *count, const char *wmesg, int prio);
50
51 /*
52  * Attempt to handle reference count overflow and underflow.  Force the counter
53  * to stay at the saturation value so that a counter overflow cannot trigger
54  * destruction of the containing object and instead leads to a less harmful
55  * memory leak.
56  */
57 static __inline void
58 _refcount_update_saturated(volatile u_int *count)
59 {
60 #ifdef INVARIANTS
61         panic("refcount %p wraparound", count);
62 #else
63         atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
64 #endif
65 }
66
67 static __inline void
68 refcount_init(volatile u_int *count, u_int value)
69 {
70         KASSERT(!REFCOUNT_SATURATED(value),
71             ("invalid initial refcount value %u", value));
72         *count = value;
73 }
74
75 static __inline void
76 refcount_acquire(volatile u_int *count)
77 {
78         u_int old;
79
80         old = atomic_fetchadd_int(count, 1);
81         if (__predict_false(REFCOUNT_SATURATED(old)))
82                 _refcount_update_saturated(count);
83 }
84
85 static __inline void
86 refcount_acquiren(volatile u_int *count, u_int n)
87 {
88         u_int old;
89
90         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
91             ("refcount_acquiren: n=%u too large", n));
92         old = atomic_fetchadd_int(count, n);
93         if (__predict_false(REFCOUNT_SATURATED(old)))
94                 _refcount_update_saturated(count);
95 }
96
97 static __inline __result_use_check bool
98 refcount_acquire_checked(volatile u_int *count)
99 {
100         u_int lcount;
101
102         for (lcount = *count;;) {
103                 if (__predict_false(REFCOUNT_SATURATED(lcount + 1)))
104                         return (false);
105                 if (__predict_true(atomic_fcmpset_int(count, &lcount,
106                     lcount + 1) == 1))
107                         return (true);
108         }
109 }
110
111 static __inline bool
112 refcount_releasen(volatile u_int *count, u_int n)
113 {
114         u_int old;
115
116         KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
117             ("refcount_releasen: n=%u too large", n));
118
119         atomic_thread_fence_rel();
120         old = atomic_fetchadd_int(count, -n);
121         if (__predict_false(n >= REFCOUNT_COUNT(old) ||
122             REFCOUNT_SATURATED(old)))
123                 return (refcount_release_last(count, n, old));
124         return (false);
125 }
126
127 static __inline bool
128 refcount_release(volatile u_int *count)
129 {
130
131         return (refcount_releasen(count, 1));
132 }
133
134 static __inline void
135 refcount_wait(volatile u_int *count, const char *wmesg, int prio)
136 {
137
138         while (*count != 0)
139                 refcount_sleep(count, wmesg, prio);
140 }
141
142 /*
143  * This functions returns non-zero if the refcount was
144  * incremented. Else zero is returned.
145  */
146 static __inline __result_use_check bool
147 refcount_acquire_if_not_zero(volatile u_int *count)
148 {
149         u_int old;
150
151         old = *count;
152         for (;;) {
153                 if (REFCOUNT_COUNT(old) == 0)
154                         return (false);
155                 if (__predict_false(REFCOUNT_SATURATED(old)))
156                         return (true);
157                 if (atomic_fcmpset_int(count, &old, old + 1))
158                         return (true);
159         }
160 }
161
162 static __inline __result_use_check bool
163 refcount_release_if_not_last(volatile u_int *count)
164 {
165         u_int old;
166
167         old = *count;
168         for (;;) {
169                 if (REFCOUNT_COUNT(old) == 1)
170                         return (false);
171                 if (__predict_false(REFCOUNT_SATURATED(old)))
172                         return (true);
173                 if (atomic_fcmpset_int(count, &old, old - 1))
174                         return (true);
175         }
176 }
177
178 static __inline __result_use_check bool
179 refcount_release_if_gt(volatile u_int *count, u_int n)
180 {
181         u_int old;
182
183         KASSERT(n > 0,
184             ("refcount_release_if_gt: Use refcount_release for final ref"));
185         old = *count;
186         for (;;) {
187                 if (REFCOUNT_COUNT(old) <= n)
188                         return (false);
189                 if (__predict_false(REFCOUNT_SATURATED(old)))
190                         return (true);
191                 if (atomic_fcmpset_int(count, &old, old - 1))
192                         return (true);
193         }
194 }
195
196 #endif  /* ! __SYS_REFCOUNT_H__ */