2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2019,2020 Jeffrey Roberson <jeff@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/counter.h>
32 #include <sys/kernel.h>
33 #include <sys/limits.h>
37 #include <sys/sysctl.h>
42 * Global Unbounded Sequences (GUS)
44 * This is a novel safe memory reclamation technique inspired by
45 * epoch based reclamation from Samy Al Bahra's concurrency kit which
46 * in turn was based on work described in:
47 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
48 * of Cambridge Computing Laboratory.
49 * And shares some similarities with:
50 * Wang, Stamler, Parmer. 2016 Parallel Sections: Scaling System-Level
53 * This is not an implementation of hazard pointers or related
54 * techniques. The term safe memory reclamation is used as a
55 * generic descriptor for algorithms that defer frees to avoid
56 * use-after-free errors with lockless datastructures or as
57 * a mechanism to detect quiescence for writer synchronization.
59 * The basic approach is to maintain a monotonic write sequence
60 * number that is updated on some application defined granularity.
61 * Readers record the most recent write sequence number they have
62 * observed. A shared read sequence number records the lowest
63 * sequence number observed by any reader as of the last poll. Any
64 * write older than this value has been observed by all readers
65 * and memory can be reclaimed. Like Epoch we also detect idle
66 * readers by storing an invalid sequence number in the per-cpu
67 * state when the read section exits. Like Parsec we establish
68 * a global write clock that is used to mark memory on free.
70 * The write and read sequence numbers can be thought of as a two
71 * handed clock with readers always advancing towards writers. GUS
72 * maintains the invariant that all readers can safely access memory
73 * that was visible at the time they loaded their copy of the sequence
74 * number. Periodically the read sequence or hand is polled and
75 * advanced as far towards the write sequence as active readers allow.
76 * Memory which was freed between the old and new global read sequence
77 * number can now be reclaimed. When the system is idle the two hands
78 * meet and no deferred memory is outstanding. Readers never advance
79 * any sequence number, they only observe them. The shared read
80 * sequence number is consequently never higher than the write sequence.
81 * A stored sequence number that falls outside of this range has expired
82 * and needs no scan to reclaim.
84 * A notable distinction between GUS and Epoch, qsbr, rcu, etc. is
85 * that advancing the sequence number is decoupled from detecting its
86 * observation. That is to say, the delta between read and write
87 * sequence numbers is not bound. This can be thought of as a more
88 * generalized form of epoch which requires them at most one step
89 * apart. This results in a more granular assignment of sequence
90 * numbers even as read latencies prohibit all or some expiration.
91 * It also allows writers to advance the sequence number and save the
92 * poll for expiration until a later time when it is likely to
93 * complete without waiting. The batch granularity and free-to-use
94 * latency is dynamic and can be significantly smaller than in more
97 * This mechanism is primarily intended to be used in coordination with
98 * UMA. By integrating with the allocator we avoid all of the callout
99 * queue machinery and are provided with an efficient way to batch
100 * sequence advancement and waiting. The allocator accumulates a full
101 * per-cpu cache of memory before advancing the sequence. It then
102 * delays waiting for this sequence to expire until the memory is
103 * selected for reuse. In this way we only increment the sequence
104 * value once for n=cache-size frees and the waits are done long
105 * after the sequence has been expired so they need only be verified
106 * to account for pathological conditions and to advance the read
107 * sequence. Tying the sequence number to the bucket size has the
108 * nice property that as the zone gets busier the buckets get larger
109 * and the sequence writes become fewer. If the coherency of advancing
110 * the write sequence number becomes too costly we can advance
111 * it for every N buckets in exchange for higher free-to-use
112 * latency and consequently higher memory consumption.
114 * If the read overhead of accessing the shared cacheline becomes
115 * especially burdensome an invariant TSC could be used in place of the
116 * sequence. The algorithm would then only need to maintain the minimum
117 * observed tsc. This would trade potential cache synchronization
118 * overhead for local serialization and cpu timestamp overhead.
122 * A simplified diagram:
125 * | -------------------- sequence number space -------------------- |
127 * | ----- valid sequence numbers ---- |
129 * | -- free -- | --------- deferred frees -------- | ---- free ---- |
132 * In this example cpuA has the lowest sequence number and poll can
133 * advance rd seq. cpuB is not running and is considered to observe
136 * Freed memory that is tagged with a sequence number between rd seq and
137 * wr seq can not be safely reclaimed because cpuA may hold a reference to
138 * it. Any other memory is guaranteed to be unreferenced.
140 * Any writer is free to advance wr seq at any time however it may busy
141 * poll in pathological cases.
144 static uma_zone_t smr_shared_zone;
145 static uma_zone_t smr_zone;
148 #define SMR_SEQ_INIT 1 /* All valid sequence numbers are odd. */
149 #define SMR_SEQ_INCR 2
152 * SMR_SEQ_MAX_DELTA is the maximum distance allowed between rd_seq and
153 * wr_seq. For the modular arithmetic to work a value of UNIT_MAX / 2
154 * would be possible but it is checked after we increment the wr_seq so
155 * a safety margin is left to prevent overflow.
157 * We will block until SMR_SEQ_MAX_ADVANCE sequence numbers have progressed
158 * to prevent integer wrapping. See smr_advance() for more details.
160 #define SMR_SEQ_MAX_DELTA (UINT_MAX / 4)
161 #define SMR_SEQ_MAX_ADVANCE (SMR_SEQ_MAX_DELTA - 1024)
163 /* We want to test the wrapping feature in invariants kernels. */
164 #define SMR_SEQ_INCR (UINT_MAX / 10000)
165 #define SMR_SEQ_INIT (UINT_MAX - 100000)
166 /* Force extra polls to test the integer overflow detection. */
167 #define SMR_SEQ_MAX_DELTA (SMR_SEQ_INCR * 32)
168 #define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2
172 * The grace period for lazy (tick based) SMR.
174 * Hardclock is responsible for advancing ticks on a single CPU while every
175 * CPU receives a regular clock interrupt. The clock interrupts are flushing
176 * the store buffers and any speculative loads that may violate our invariants.
177 * Because these interrupts are not synchronized we must wait one additional
178 * tick in the future to be certain that all processors have had their state
179 * synchronized by an interrupt.
181 * This assumes that the clock interrupt will only be delayed by other causes
182 * that will flush the store buffer or prevent access to the section protected
183 * data. For example, an idle processor, or an system management interrupt,
186 #define SMR_LAZY_GRACE 2
187 #define SMR_LAZY_INCR (SMR_LAZY_GRACE * SMR_SEQ_INCR)
190 * The maximum sequence number ahead of wr_seq that may still be valid. The
191 * sequence may not be advanced on write for lazy or deferred SMRs. In this
192 * case poll needs to attempt to forward the sequence number if the goal is
193 * within wr_seq + SMR_SEQ_ADVANCE.
195 #define SMR_SEQ_ADVANCE SMR_LAZY_INCR
197 static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
199 static COUNTER_U64_DEFINE_EARLY(advance);
200 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance, CTLFLAG_RW, &advance, "");
201 static COUNTER_U64_DEFINE_EARLY(advance_wait);
202 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance_wait, CTLFLAG_RW, &advance_wait, "");
203 static COUNTER_U64_DEFINE_EARLY(poll);
204 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll, CTLFLAG_RW, &poll, "");
205 static COUNTER_U64_DEFINE_EARLY(poll_scan);
206 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_scan, CTLFLAG_RW, &poll_scan, "");
207 static COUNTER_U64_DEFINE_EARLY(poll_fail);
208 SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_fail, CTLFLAG_RW, &poll_fail, "");
211 * Advance a lazy write sequence number. These move forward at the rate of
212 * ticks. Grace is SMR_LAZY_INCR (2 ticks) in the future.
214 * This returns the goal write sequence number.
217 smr_lazy_advance(smr_t smr, smr_shared_t s)
219 union s_wr s_wr, old;
222 CRITICAL_ASSERT(curthread);
225 * Load the stored ticks value before the current one. This way the
226 * current value can only be the same or larger.
228 old._pair = s_wr._pair = atomic_load_acq_64(&s->s_wr._pair);
232 * The most probable condition that the update already took place.
235 if (__predict_true(d == 0))
237 /* Cap the rate of advancement and handle long idle periods. */
238 if (d > SMR_LAZY_GRACE || d < 0)
241 s_wr.seq += d * SMR_SEQ_INCR;
244 * This can only fail if another thread races to call advance().
245 * Strong cmpset semantics mean we are guaranteed that the update
248 atomic_cmpset_64(&s->s_wr._pair, old._pair, s_wr._pair);
250 return (s_wr.seq + SMR_LAZY_INCR);
254 * Increment the shared write sequence by 2. Since it is initialized
255 * to 1 this means the only valid values are odd and an observed value
256 * of 0 in a particular CPU means it is not currently in a read section.
259 smr_shared_advance(smr_shared_t s)
262 return (atomic_fetchadd_int(&s->s_wr.seq, SMR_SEQ_INCR) + SMR_SEQ_INCR);
266 * Advance the write sequence number for a normal smr section. If the
267 * write sequence is too far behind the read sequence we have to poll
268 * to advance rd_seq and prevent undetectable wraps.
271 smr_default_advance(smr_t smr, smr_shared_t s)
273 smr_seq_t goal, s_rd_seq;
275 CRITICAL_ASSERT(curthread);
276 KASSERT((zpcpu_get(smr)->c_flags & SMR_LAZY) == 0,
277 ("smr_default_advance: called with lazy smr."));
280 * Load the current read seq before incrementing the goal so
281 * we are guaranteed it is always < goal.
283 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
284 goal = smr_shared_advance(s);
287 * Force a synchronization here if the goal is getting too
288 * far ahead of the read sequence number. This keeps the
289 * wrap detecting arithmetic working in pathological cases.
291 if (SMR_SEQ_DELTA(goal, s_rd_seq) >= SMR_SEQ_MAX_DELTA) {
292 counter_u64_add(advance_wait, 1);
293 smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE);
295 counter_u64_add(advance, 1);
301 * Deferred SMRs conditionally update s_wr_seq based on an
302 * cpu local interval count.
305 smr_deferred_advance(smr_t smr, smr_shared_t s, smr_t self)
308 if (++self->c_deferred < self->c_limit)
309 return (smr_shared_current(s) + SMR_SEQ_INCR);
310 self->c_deferred = 0;
311 return (smr_default_advance(smr, s));
315 * Advance the write sequence and return the value for use as the
316 * wait goal. This guarantees that any changes made by the calling
317 * thread prior to this call will be visible to all threads after
318 * rd_seq meets or exceeds the return value.
320 * This function may busy loop if the readers are roughly 1 billion
321 * sequence numbers behind the writers.
323 * Lazy SMRs will not busy loop and the wrap happens every 25 days
324 * at 1khz and 60 hours at 10khz. Readers can block for no longer
325 * than half of this for SMR_SEQ_ macros to continue working.
328 smr_advance(smr_t smr)
336 * It is illegal to enter while in an smr section.
338 SMR_ASSERT_NOT_ENTERED(smr);
341 * Modifications not done in a smr section need to be visible
342 * before advancing the seq.
344 atomic_thread_fence_rel();
347 /* Try to touch the line once. */
348 self = zpcpu_get(smr);
350 flags = self->c_flags;
351 goal = SMR_SEQ_INVALID;
352 if ((flags & (SMR_LAZY | SMR_DEFERRED)) == 0)
353 goal = smr_default_advance(smr, s);
354 else if ((flags & SMR_LAZY) != 0)
355 goal = smr_lazy_advance(smr, s);
356 else if ((flags & SMR_DEFERRED) != 0)
357 goal = smr_deferred_advance(smr, s, self);
364 * Poll to determine the currently observed sequence number on a cpu
365 * and spinwait if the 'wait' argument is true.
368 smr_poll_cpu(smr_t c, smr_seq_t s_rd_seq, smr_seq_t goal, bool wait)
372 c_seq = SMR_SEQ_INVALID;
374 c_seq = atomic_load_int(&c->c_seq);
375 if (c_seq == SMR_SEQ_INVALID)
379 * There is a race described in smr.h:smr_enter that
380 * can lead to a stale seq value but not stale data
381 * access. If we find a value out of range here we
382 * pin it to the current min to prevent it from
383 * advancing until that stale section has expired.
385 * The race is created when a cpu loads the s_wr_seq
386 * value in a local register and then another thread
387 * advances s_wr_seq and calls smr_poll() which will
388 * oberve no value yet in c_seq and advance s_rd_seq
389 * up to s_wr_seq which is beyond the register
390 * cached value. This is only likely to happen on
391 * hypervisor or with a system management interrupt.
393 if (SMR_SEQ_LT(c_seq, s_rd_seq))
397 * If the sequence number meets the goal we are done
400 if (SMR_SEQ_LEQ(goal, c_seq))
412 * Loop until all cores have observed the goal sequence or have
413 * gone inactive. Returns the oldest sequence currently active;
415 * This function assumes a snapshot of sequence values has
416 * been obtained and validated by smr_poll().
419 smr_poll_scan(smr_t smr, smr_shared_t s, smr_seq_t s_rd_seq,
420 smr_seq_t s_wr_seq, smr_seq_t goal, bool wait)
422 smr_seq_t rd_seq, c_seq;
425 CRITICAL_ASSERT(curthread);
426 counter_u64_add_protected(poll_scan, 1);
429 * The read sequence can be no larger than the write sequence at
430 * the start of the poll.
435 * Query the active sequence on this cpu. If we're not
436 * waiting and we don't meet the goal we will still scan
437 * the rest of the cpus to update s_rd_seq before returning
440 c_seq = smr_poll_cpu(zpcpu_get_cpu(smr, i), s_rd_seq, goal,
444 * Limit the minimum observed rd_seq whether we met the goal
447 if (c_seq != SMR_SEQ_INVALID)
448 rd_seq = SMR_SEQ_MIN(rd_seq, c_seq);
452 * Advance the rd_seq as long as we observed a more recent value.
454 s_rd_seq = atomic_load_int(&s->s_rd_seq);
455 if (SMR_SEQ_GT(rd_seq, s_rd_seq)) {
456 atomic_cmpset_int(&s->s_rd_seq, s_rd_seq, rd_seq);
464 * Poll to determine whether all readers have observed the 'goal' write
467 * If wait is true this will spin until the goal is met.
469 * This routine will updated the minimum observed read sequence number in
470 * s_rd_seq if it does a scan. It may not do a scan if another call has
471 * advanced s_rd_seq beyond the callers goal already.
473 * Returns true if the goal is met and false if not.
476 smr_poll(smr_t smr, smr_seq_t goal, bool wait)
480 smr_seq_t s_wr_seq, s_rd_seq;
486 * It is illegal to enter while in an smr section.
488 KASSERT(!wait || !SMR_ENTERED(smr),
489 ("smr_poll: Blocking not allowed in a SMR section."));
490 KASSERT(!wait || (zpcpu_get(smr)->c_flags & SMR_LAZY) == 0,
491 ("smr_poll: Blocking not allowed on lazy smrs."));
494 * Use a critical section so that we can avoid ABA races
495 * caused by long preemption sleeps.
499 /* Attempt to load from self only once. */
500 self = zpcpu_get(smr);
502 flags = self->c_flags;
503 counter_u64_add_protected(poll, 1);
506 * Conditionally advance the lazy write clock on any writer
509 if ((flags & SMR_LAZY) != 0)
510 smr_lazy_advance(smr, s);
513 * Acquire barrier loads s_wr_seq after s_rd_seq so that we can not
514 * observe an updated read sequence that is larger than write.
516 s_rd_seq = atomic_load_acq_int(&s->s_rd_seq);
519 * If we have already observed the sequence number we can immediately
520 * return success. Most polls should meet this criterion.
522 if (SMR_SEQ_LEQ(goal, s_rd_seq))
526 * wr_seq must be loaded prior to any c_seq value so that a
527 * stale c_seq can only reference time after this wr_seq.
529 s_wr_seq = atomic_load_acq_int(&s->s_wr.seq);
532 * This is the distance from s_wr_seq to goal. Positive values
535 delta = SMR_SEQ_DELTA(goal, s_wr_seq);
538 * Detect a stale wr_seq.
540 * This goal may have come from a deferred advance or a lazy
541 * smr. If we are not blocking we can not succeed but the
542 * sequence number is valid.
544 if (delta > 0 && delta <= SMR_SEQ_ADVANCE &&
545 (flags & (SMR_LAZY | SMR_DEFERRED)) != 0) {
550 /* LAZY is always !wait. */
551 s_wr_seq = smr_shared_advance(s);
556 * Detect an invalid goal.
558 * The goal must be in the range of s_wr_seq >= goal >= s_rd_seq for
559 * it to be valid. If it is not then the caller held on to it and
560 * the integer wrapped. If we wrapped back within range the caller
561 * will harmlessly scan.
566 /* Determine the lowest visible sequence number. */
567 s_rd_seq = smr_poll_scan(smr, s, s_rd_seq, s_wr_seq, goal, wait);
568 success = SMR_SEQ_LEQ(goal, s_rd_seq);
571 counter_u64_add_protected(poll_fail, 1);
575 * Serialize with smr_advance()/smr_exit(). The caller is now free
576 * to modify memory as expected.
578 atomic_thread_fence_acq();
580 KASSERT(success || !wait, ("%s: blocking poll failed", __func__));
585 smr_create(const char *name, int limit, int flags)
591 s = uma_zalloc(smr_shared_zone, M_WAITOK);
592 smr = uma_zalloc_pcpu(smr_zone, M_WAITOK);
595 s->s_rd_seq = s->s_wr.seq = SMR_SEQ_INIT;
596 s->s_wr.ticks = ticks;
598 /* Initialize all CPUS, not just those running. */
599 for (i = 0; i <= mp_maxid; i++) {
600 c = zpcpu_get_cpu(smr, i);
601 c->c_seq = SMR_SEQ_INVALID;
607 atomic_thread_fence_seq_cst();
613 smr_destroy(smr_t smr)
616 smr_synchronize(smr);
617 uma_zfree(smr_shared_zone, smr->c_shared);
618 uma_zfree_pcpu(smr_zone, smr);
622 * Initialize the UMA slab zone.
628 smr_shared_zone = uma_zcreate("SMR SHARED", sizeof(struct smr_shared),
629 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, 0);
630 smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr),
631 NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU);