2 .\" Copyright (C) 2018 Matthew Macy <mmacy@FreeBSD.org>.
4 .\" Redistribution and use in source and binary forms, with or without
5 .\" modification, are permitted provided that the following conditions
7 .\" 1. Redistributions of source code must retain the above copyright
8 .\" notice(s), this list of conditions and the following disclaimer as
9 .\" the first lines of this file unmodified other than the possible
10 .\" addition of one or more copyright notices.
11 .\" 2. Redistributions in binary form must reproduce the above copyright
12 .\" notice(s), this list of conditions and the following disclaimer in the
13 .\" documentation and/or other materials provided with the distribution.
15 .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 .\" EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 .\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 .\" DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 .\" DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 .\" (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 .\" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 .\" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
40 .Nm epoch_enter_preempt ,
41 .Nm epoch_exit_preempt ,
42 .Nm epoch_wait_preempt ,
44 .Nm epoch_drain_callbacks ,
46 .Nm in_epoch_verbose ,
47 .Nd kernel epoch based reclamation
54 struct epoch; /* Opaque */
56 .Vt typedef "struct epoch *epoch_t" ;
58 struct epoch_context {
62 .Vt typedef "struct epoch_context *epoch_context_t" ;
64 struct epoch_tracker; /* Opaque */
66 .Vt typedef "struct epoch_tracker *epoch_tracker_t" ;
69 .Fn epoch_alloc "const char *name" "int flags"
71 .Fn epoch_free "epoch_t epoch"
73 .Fn epoch_enter "epoch_t epoch"
75 .Fn epoch_exit "epoch_t epoch"
77 .Fn epoch_wait "epoch_t epoch"
79 .Fn epoch_enter_preempt "epoch_t epoch" "epoch_tracker_t et"
81 .Fn epoch_exit_preempt "epoch_t epoch" "epoch_tracker_t et"
83 .Fn epoch_wait_preempt "epoch_t epoch"
85 .Fn epoch_call "epoch_t epoch" "epoch_context_t ctx" "void (*callback)(epoch_context_t)"
87 .Fn epoch_drain_callbacks "epoch_t epoch"
89 .Fn in_epoch "epoch_t epoch"
91 .Fn in_epoch_verbose "epoch_t epoch" "int dump_onfail"
93 Epochs are used to guarantee liveness and immutability of data by
94 deferring reclamation and mutation until a grace period has elapsed.
95 Epochs do not have any lock ordering issues.
96 Entering and leaving an epoch section will never block.
98 Epochs are allocated with
102 argument is used for debugging convenience when the
104 kernel option is configured.
105 By default, epochs do not allow preemption during sections.
106 By default mutexes cannot be held across
107 .Fn epoch_wait_preempt .
110 specified are formed by
112 the following values:
113 .Bl -tag -offset indent -width Ds
115 Permit holding mutexes across
116 .Fn epoch_wait_preempt
118 .Dv EPOCH_PREEMPT ) .
119 When doing this one must be cautious of creating a situation where a deadlock
124 will allow preemption during sections.
125 Only non-sleepable locks may be acquired during a preemptible epoch.
127 .Fn epoch_enter_preempt ,
128 .Fn epoch_exit_preempt ,
130 .Fn epoch_wait_preempt
131 must be used in place of
143 Threads indicate the start of an epoch critical section by calling
146 .Fn epoch_enter_preempt
147 for preemptible epochs).
151 .Fn epoch_exit_preempt
152 for preemptible epochs)
153 to indicate the end of a critical section.
154 .Vt struct epoch_tracker Ns s
155 are stack objects whose pointers are passed to
156 .Fn epoch_enter_preempt
158 .Fn epoch_exit_preempt
160 .Vt struct rm_priotracker ) .
162 Threads can defer work until a grace period has expired since any thread has
163 entered the epoch either synchronously or asynchronously.
165 defers work asynchronously by invoking the provided
170 .Fn epoch_wait_preempt )
171 blocks the current thread until the grace period has expired and the work can be
174 Default, non-preemptible epoch wait
176 is guaranteed to have much shorter completion times relative to
177 preemptible epoch wait
178 .Fn ( epoch_wait_preempt ) .
179 (In the default type, none of the threads in an epoch section will be preempted
180 before completing its section.)
182 INVARIANTS can assert that a thread is in an epoch by using
185 is equivalent to invoking
186 .Fn in_epoch_verbose "epoch" "0" .
190 .Fn in_epoch_verbose "epoch" "1"
191 provides additional verbose debugging information.
193 The epoch API currently does not support sleeping in epoch_preempt sections.
194 A caller should never call
196 in the middle of an epoch section for the same epoch as this will lead to a deadlock.
199 .Fn epoch_drain_callbacks
200 function is used to drain all pending callbacks which have been invoked by prior
202 function calls on the same epoch.
203 This function is useful when there are shared memory structure(s)
204 referred to by the epoch callback(s) which are not refcounted and are
206 The typical place for calling this function is right before freeing or
207 invalidating the shared resource(s) used by the epoch callback(s).
208 This function can sleep and is not optimized for performance.
210 .Fn in_epoch curepoch
211 will return 1 if curthread is in curepoch, 0 otherwise.
213 One must be cautious when using
214 .Fn epoch_wait_preempt .
215 Threads are pinned during epoch sections, so if a thread in a section is then
216 preempted by a higher priority compute bound thread on that CPU, it can be
217 prevented from leaving the section indefinitely.
219 Epochs are not a straight replacement for read locks.
220 Callers must use safe list and tailq traversal routines in an epoch (see ck_queue).
221 When modifying a list referenced from an epoch section safe removal
222 routines must be used and the caller can no longer modify a list entry
224 An item to be modified must be handled with copy on write
225 and frees must be deferred until after a grace period has elapsed.
231 in_pcbladdr(struct inpcb *inp, struct in_addr *faddr, struct in_laddr *laddr,
235 epoch_enter(net_epoch);
236 CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
238 if (sa->sa_family != AF_INET)
240 sin = (struct sockaddr_in *)sa;
241 if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
242 ia = (struct in_ifaddr *)ifa;
246 epoch_exit(net_epoch);
253 ifa_free(struct ifaddr *ifa)
256 if (refcount_release(&ifa->ifa_refcnt))
257 epoch_call(net_epoch, &ifa->ifa_epoch_ctx, ifa_destroy);
261 if_purgeaddrs(struct ifnet *ifp)
266 CK_STAILQ_REMOVE(&ifp->if_addrhead, ifa, ifaddr, ifa_link);
267 IF_ADDR_WUNLOCK(ifp);
272 Thread 1 traverses the ifaddr list in an epoch.
273 Thread 2 unlinks with the corresponding epoch safe macro, marks as logically free,
274 and then defers deletion.
275 More general mutation or a synchronous
276 free would have to follow a call to
281 kernel programming interface is under development and is subject to change.