2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/limits.h>
34 #include <sys/kernel.h>
42 * This is a novel safe memory reclamation technique inspired by
43 * epoch based reclamation from Samy Al Bahra's concurrency kit which
44 * in turn was based on work described in:
45 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
46 * of Cambridge Computing Laboratory.
47 * And shares some similarities with:
48 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
51 * This is not an implementation of hazard pointers or related
52 * techniques. The term safe memory reclamation is used as a
53 * generic descriptor for algorithms that defer frees to avoid
54 * use-after-free errors with lockless datastructures.
56 * The basic approach is to maintain a monotonic write sequence
57 * number that is updated on some application defined granularity.
58 * Readers record the most recent write sequence number they have
59 * observed. A shared read sequence number records the lowest
60 * sequence number observed by any reader as of the last poll. Any
61 * write older than this value has been observed by all readers
62 * and memory can be reclaimed. Like Epoch we also detect idle
63 * readers by storing an invalid sequence number in the per-cpu
64 * state when the read section exits. Like Parsec we establish
65 * a global write clock that is used to mark memory on free.
67 * The write and read sequence numbers can be thought of as a two
68 * handed clock with readers always advancing towards writers. SMR
69 * maintains the invariant that all readers can safely access memory
70 * that was visible at the time they loaded their copy of the sequence
71 * number. Periodically the read sequence or hand is polled and
72 * advanced as far towards the write sequence as active readers allow.
73 * Memory which was freed between the old and new global read sequence
74 * number can now be reclaimed. When the system is idle the two hands
75 * meet and no deferred memory is outstanding. Readers never advance
76 * any sequence number, they only observe them. The shared read
77 * sequence number is consequently never higher than the write sequence.
78 * A stored sequence number that falls outside of this range has expired
79 * and needs no scan to reclaim.
81 * A notable distinction between this SMR and Epoch, qsbr, rcu, etc. is
82 * that advancing the sequence number is decoupled from detecting its
83 * observation. This results in a more granular assignment of sequence
84 * numbers even as read latencies prohibit all or some expiration.
85 * It also allows writers to advance the sequence number and save the
86 * poll for expiration until a later time when it is likely to
87 * complete without waiting. The batch granularity and free-to-use
88 * latency is dynamic and can be significantly smaller than in more
91 * This mechanism is primarily intended to be used in coordination with
92 * UMA. By integrating with the allocator we avoid all of the callout
93 * queue machinery and are provided with an efficient way to batch
94 * sequence advancement and waiting. The allocator accumulates a full
95 * per-cpu cache of memory before advancing the sequence. It then
96 * delays waiting for this sequence to expire until the memory is
97 * selected for reuse. In this way we only increment the sequence
98 * value once for n=cache-size frees and the waits are done long
99 * after the sequence has been expired so they need only be verified
100 * to account for pathological conditions and to advance the read
101 * sequence. Tying the sequence number to the bucket size has the
102 * nice property that as the zone gets busier the buckets get larger
103 * and the sequence writes become fewer. If the coherency of advancing
104 * the write sequence number becomes too costly we can advance
105 * it for every N buckets in exchange for higher free-to-use
106 * latency and consequently higher memory consumption.
108 * If the read overhead of accessing the shared cacheline becomes
109 * especially burdensome an invariant TSC could be used in place of the
110 * sequence. The algorithm would then only need to maintain the minimum
111 * observed tsc. This would trade potential cache synchronization
112 * overhead for local serialization and cpu timestamp overhead.
116 * A simplified diagram:
119 * | -------------------- sequence number space -------------------- |
121 * | ----- valid sequence numbers ---- |
123 * | -- free -- | --------- deferred frees -------- | ---- free ---- |
126 * In this example cpuA has the lowest sequence number and poll can
127 * advance rd seq. cpuB is not running and is considered to observe
130 * Freed memory that is tagged with a sequence number between rd seq and
131 * wr seq can not be safely reclaimed because cpuA may hold a reference to
132 * it. Any other memory is guaranteed to be unreferenced.
134 * Any writer is free to advance wr seq at any time however it may busy
135 * poll in pathological cases.
138 static uma_zone_t smr_shared_zone;
139 static uma_zone_t smr_zone;
142 #define SMR_SEQ_INIT 1 /* All valid sequence numbers are odd. */
143 #define SMR_SEQ_INCR 2
146 * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and
147 * wr_seq. For the modular arithmetic to work a value of UNIT_MAX / 2
148 * would be possible but it is checked after we increment the wr_seq so
149 * a safety margin is left to prevent overflow.
151 * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed
152 * to prevent integer wrapping. See smr_advance() for more details.
154 #define SMR_SEQ_MAX_DELTA (UINT_MAX / 4)
155 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024)
157 /* We want to test the wrapping feature in invariants kernels. */
158 #define SMR_SEQ_INCR (UINT_MAX / 10000)
159 #define SMR_SEQ_INIT (UINT_MAX - 100000)
160 /* Force extra polls to test the integer overflow detection. */
161 #define SMR_SEQ_MAX_DELTA (1000)
162 #define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2
166 * Advance the write sequence and return the new value for use as the
167 * wait goal. This guarantees that any changes made by the calling
168 * thread prior to this call will be visible to all threads after
169 * rd_seq meets or exceeds the return value.
171 * This function may busy loop if the readers are roughly 1 billion
172 * sequence numbers behind the writers.
175 smr_advance(smr_t smr)
181 * It is illegal to enter while in an smr section.
183 KASSERT(curthread->td_critnest == 0,
184 ("smr_advance: Not allowed in a critical section."));
187 * Modifications not done in a smr section need to be visible
188 * before advancing the seq.
190 atomic_thread_fence_rel();
193 * Increment the shared write sequence by 2. Since it is
194 * initialized to 1 this means the only valid values are
195 * odd and an observed value of 0 in a particular CPU means
196 * it is not currently in a read section.
198 s = zpcpu_get(smr)->c_shared;
199 goal = atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR;
202 * Force a synchronization here if the goal is getting too
203 * far ahead of the read sequence number. This keeps the
204 * wrap detecting arithmetic working in pathological cases.
206 if (goal - atomic_load_int(&s->s_rd_seq) >= SMR_SEQ_MAX_DELTA)
207 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE);
213 * Poll to determine whether all readers have observed the 'goal' write
216 * If wait is true this will spin until the goal is met.
218 * This routine will updated the minimum observed read sequence number in
219 * s_rd_seq if it does a scan. It may not do a scan if another call has
220 * advanced s_rd_seq beyond the callers goal already.
222 * Returns true if the goal is met and false if not.
225 smr_poll(smr_t smr, smr_seq_t goal, bool wait)
229 smr_seq_t s_wr_seq, s_rd_seq, rd_seq, c_seq;
234 * It is illegal to enter while in an smr section.
236 KASSERT(!wait || curthread->td_critnest == 0,
237 ("smr_poll: Blocking not allowed in a critical section."));
240 * Use a critical section so that we can avoid ABA races
241 * caused by long preemption sleeps.
245 s = zpcpu_get(smr)->c_shared;
248 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not
249 * observe an updated read sequence that is larger than write.
251 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
254 * wr_seq must be loaded prior to any c_seq value so that a stale
255 * c_seq can only reference time after this wr_seq.
257 s_wr_seq = atomic_load_acq_int(&s->s_wr_seq);
260 * Detect whether the goal is valid and has already been observed.
262 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for
263 * it to be valid. If it is not then the caller held on to it and
264 * the integer wrapped. If we wrapped back within range the caller
265 * will harmlessly scan.
267 * A valid goal must be greater than s_rd_seq or we have not verified
268 * that it has been observed and must fall through to polling.
270 if (SMR_SEQ_GEQ(s_rd_seq, goal) || SMR_SEQ_LT(s_wr_seq, goal))
274 * Loop until all cores have observed the goal sequence or have
275 * gone inactive. Keep track of the oldest sequence currently
280 c = zpcpu_get_cpu(smr, i);
281 c_seq = SMR_SEQ_INVALID;
283 c_seq = atomic_load_int(&c->c_seq);
284 if (c_seq == SMR_SEQ_INVALID)
288 * There is a race described in smr.h:smr_enter that
289 * can lead to a stale seq value but not stale data
290 * access. If we find a value out of range here we
291 * pin it to the current min to prevent it from
292 * advancing until that stale section has expired.
294 * The race is created when a cpu loads the s_wr_seq
295 * value in a local register and then another thread
296 * advances s_wr_seq and calls smr_poll() which will
297 * oberve no value yet in c_seq and advance s_rd_seq
298 * up to s_wr_seq which is beyond the register
299 * cached value. This is only likely to happen on
300 * hypervisor or with a system management interrupt.
302 if (SMR_SEQ_LT(c_seq, s_rd_seq))
306 * If the sequence number meets the goal we are
307 * done with this cpu.
309 if (SMR_SEQ_GEQ(c_seq, goal))
313 * If we're not waiting we will still scan the rest
314 * of the cpus and update s_rd_seq before returning
325 * Limit the minimum observed rd_seq whether we met the goal
328 if (c_seq != SMR_SEQ_INVALID && SMR_SEQ_GT(rd_seq, c_seq))
333 * Advance the rd_seq as long as we observed the most recent one.
335 s_rd_seq = atomic_load_int(&s->s_rd_seq);
337 if (SMR_SEQ_LEQ(rd_seq, s_rd_seq))
339 } while (atomic_fcmpset_int(&s->s_rd_seq, &s_rd_seq, rd_seq) == 0);
345 * Serialize with smr_advance()/smr_exit(). The caller is now free
346 * to modify memory as expected.
348 atomic_thread_fence_acq();
354 smr_create(const char *name)
360 s = uma_zalloc(smr_shared_zone, M_WAITOK);
361 smr = uma_zalloc(smr_zone, M_WAITOK);
364 s->s_rd_seq = s->s_wr_seq = SMR_SEQ_INIT;
366 /* Initialize all CPUS, not just those running. */
367 for (i = 0; i <= mp_maxid; i++) {
368 c = zpcpu_get_cpu(smr, i);
369 c->c_seq = SMR_SEQ_INVALID;
372 atomic_thread_fence_seq_cst();
378 smr_destroy(smr_t smr)
381 smr_synchronize(smr);
382 uma_zfree(smr_shared_zone, smr->c_shared);
383 uma_zfree(smr_zone, smr);
387 * Initialize the UMA slab zone.
393 smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared),
394 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0);
395 smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr),
396 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU);