2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 #ifndef _NETINET_SCTP_LOCK_BSD_H_
37 #define _NETINET_SCTP_LOCK_BSD_H_
40 * General locking concepts: The goal of our locking is to of course provide
41 * consistency and yet minimize overhead. We will attempt to use
42 * non-recursive locks which are supposed to be quite inexpensive. Now in
43 * order to do this the goal is that most functions are not aware of locking.
44 * Once we have a TCB we lock it and unlock when we are through. This means
45 * that the TCB lock is kind-of a "global" lock when working on an
46 * association. Caution must be used when asserting a TCB_LOCK since if we
47 * recurse we deadlock.
49 * Most other locks (INP and INFO) attempt to localize the locking i.e. we try
50 * to contain the lock and unlock within the function that needs to lock it.
51 * This sometimes mean we do extra locks and unlocks and lose a bit of
52 * efficiency, but if the performance statements about non-recursive locks are
53 * true this should not be a problem. One issue that arises with this only
54 * lock when needed is that if an implicit association setup is done we have
55 * a problem. If at the time I lookup an association I have NULL in the tcb
56 * return, by the time I call to create the association some other processor
57 * could have created it. This is what the CREATE lock on the endpoint.
58 * Places where we will be implicitly creating the association OR just
59 * creating an association (the connect call) will assert the CREATE_INP
60 * lock. This will assure us that during all the lookup of INP and INFO if
61 * another creator is also locking/looking up we can gate the two to
62 * synchronize. So the CREATE_INP lock is also another one we must use
63 * extreme caution in locking to make sure we don't hit a re-entrancy issue.
68 * When working with the global SCTP lists we lock and unlock the INP_INFO
69 * lock. So when we go to lookup an association we will want to do a
70 * SCTP_INP_INFO_RLOCK() and then when we want to add a new association to
71 * the SCTP_BASE_INFO() list's we will do a SCTP_INP_INFO_WLOCK().
74 #define SCTP_IPI_COUNT_INIT()
76 #define SCTP_STATLOG_INIT_LOCK()
77 #define SCTP_STATLOG_DESTROY()
78 #define SCTP_STATLOG_LOCK()
79 #define SCTP_STATLOG_UNLOCK()
81 #define SCTP_INP_INFO_LOCK_INIT() do { \
82 rw_init(&SCTP_BASE_INFO(ipi_ep_mtx), "sctp-info"); \
85 #define SCTP_INP_INFO_LOCK_DESTROY() do { \
86 if (rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx))) { \
87 rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
89 rw_destroy(&SCTP_BASE_INFO(ipi_ep_mtx)); \
92 #define SCTP_INP_INFO_RLOCK() do { \
93 rw_rlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
96 #define SCTP_INP_INFO_WLOCK() do { \
97 rw_wlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
100 #define SCTP_INP_INFO_RUNLOCK() do { \
101 rw_runlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
104 #define SCTP_INP_INFO_WUNLOCK() do { \
105 rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
108 #define SCTP_INP_INFO_LOCK_ASSERT() do { \
109 rw_assert(&SCTP_BASE_INFO(ipi_ep_mtx), RA_LOCKED); \
112 #define SCTP_INP_INFO_RLOCK_ASSERT() do { \
113 rw_assert(&SCTP_BASE_INFO(ipi_ep_mtx), RA_RLOCKED); \
116 #define SCTP_INP_INFO_WLOCK_ASSERT() do { \
117 rw_assert(&SCTP_BASE_INFO(ipi_ep_mtx), RA_WLOCKED); \
120 #define SCTP_MCORE_QLOCK_INIT(cpstr) do { \
121 mtx_init(&(cpstr)->que_mtx, "sctp-mcore_queue","queue_lock", \
122 MTX_DEF | MTX_DUPOK); \
125 #define SCTP_MCORE_QDESTROY(cpstr) do { \
126 if (mtx_owned(&(cpstr)->core_mtx)) { \
127 mtx_unlock(&(cpstr)->que_mtx); \
129 mtx_destroy(&(cpstr)->que_mtx); \
132 #define SCTP_MCORE_QLOCK(cpstr) do { \
133 mtx_lock(&(cpstr)->que_mtx); \
136 #define SCTP_MCORE_QUNLOCK(cpstr) do { \
137 mtx_unlock(&(cpstr)->que_mtx); \
140 #define SCTP_MCORE_LOCK_INIT(cpstr) do { \
141 mtx_init(&(cpstr)->core_mtx, "sctp-cpulck","cpu_proc_lock", \
142 MTX_DEF | MTX_DUPOK); \
145 #define SCTP_MCORE_DESTROY(cpstr) do { \
146 if (mtx_owned(&(cpstr)->core_mtx)) { \
147 mtx_unlock(&(cpstr)->core_mtx); \
149 mtx_destroy(&(cpstr)->core_mtx); \
152 #define SCTP_MCORE_LOCK(cpstr) do { \
153 mtx_lock(&(cpstr)->core_mtx); \
156 #define SCTP_MCORE_UNLOCK(cpstr) do { \
157 mtx_unlock(&(cpstr)->core_mtx); \
160 #define SCTP_IPI_ADDR_INIT() do { \
161 rw_init(&SCTP_BASE_INFO(ipi_addr_mtx), "sctp-addr"); \
164 #define SCTP_IPI_ADDR_DESTROY() do { \
165 if (rw_wowned(&SCTP_BASE_INFO(ipi_addr_mtx))) { \
166 rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
168 rw_destroy(&SCTP_BASE_INFO(ipi_addr_mtx)); \
171 #define SCTP_IPI_ADDR_RLOCK() do { \
172 rw_rlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
175 #define SCTP_IPI_ADDR_WLOCK() do { \
176 rw_wlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
179 #define SCTP_IPI_ADDR_RUNLOCK() do { \
180 rw_runlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
183 #define SCTP_IPI_ADDR_WUNLOCK() do { \
184 rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
187 #define SCTP_IPI_ADDR_LOCK_ASSERT() do { \
188 rw_assert(&SCTP_BASE_INFO(ipi_addr_mtx), RA_LOCKED); \
191 #define SCTP_IPI_ADDR_WLOCK_ASSERT() do { \
192 rw_assert(&SCTP_BASE_INFO(ipi_addr_mtx), RA_WLOCKED); \
195 #define SCTP_IPI_ITERATOR_WQ_INIT() do { \
196 mtx_init(&sctp_it_ctl.ipi_iterator_wq_mtx, "sctp-it-wq", \
197 "sctp_it_wq", MTX_DEF); \
200 #define SCTP_IPI_ITERATOR_WQ_DESTROY() do { \
201 mtx_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx); \
204 #define SCTP_IPI_ITERATOR_WQ_LOCK() do { \
205 mtx_lock(&sctp_it_ctl.ipi_iterator_wq_mtx); \
208 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() do { \
209 mtx_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx); \
212 #define SCTP_IP_PKTLOG_INIT() do { \
213 mtx_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), "sctp-pktlog", \
214 "packetlog", MTX_DEF); \
217 #define SCTP_IP_PKTLOG_DESTROY() do { \
218 mtx_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx)); \
221 #define SCTP_IP_PKTLOG_LOCK() do { \
222 mtx_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)); \
225 #define SCTP_IP_PKTLOG_UNLOCK() do { \
226 mtx_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx)); \
230 * The INP locks we will use for locking an SCTP endpoint, so for example if
231 * we want to change something at the endpoint level for example random_store
232 * or cookie secrets we lock the INP level.
235 #define SCTP_INP_READ_INIT(_inp) do { \
236 mtx_init(&(_inp)->inp_rdata_mtx, "sctp-read", "inpr", \
237 MTX_DEF | MTX_DUPOK); \
240 #define SCTP_INP_READ_DESTROY(_inp) do { \
241 mtx_destroy(&(_inp)->inp_rdata_mtx); \
244 #define SCTP_INP_READ_LOCK(_inp) do { \
245 mtx_lock(&(_inp)->inp_rdata_mtx); \
248 #define SCTP_INP_READ_UNLOCK(_inp) do { \
249 mtx_unlock(&(_inp)->inp_rdata_mtx); \
252 #define SCTP_INP_LOCK_INIT(_inp) do { \
253 mtx_init(&(_inp)->inp_mtx, "sctp-inp", "inp", \
254 MTX_DEF | MTX_DUPOK); \
257 #define SCTP_INP_LOCK_DESTROY(_inp) do { \
258 mtx_destroy(&(_inp)->inp_mtx); \
261 #define SCTP_INP_LOCK_CONTENDED(_inp) \
262 ((_inp)->inp_mtx.mtx_lock & MTX_CONTESTED)
264 #define SCTP_INP_READ_CONTENDED(_inp) \
265 ((_inp)->inp_rdata_mtx.mtx_lock & MTX_CONTESTED)
267 #ifdef SCTP_LOCK_LOGGING
268 #define SCTP_INP_RLOCK(_inp) do { \
269 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
270 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
271 mtx_lock(&(_inp)->inp_mtx); \
274 #define SCTP_INP_WLOCK(_inp) do { \
275 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
276 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
277 mtx_lock(&(_inp)->inp_mtx); \
280 #define SCTP_INP_RLOCK(_inp) do { \
281 mtx_lock(&(_inp)->inp_mtx); \
284 #define SCTP_INP_WLOCK(_inp) do { \
285 mtx_lock(&(_inp)->inp_mtx); \
289 #define SCTP_INP_RUNLOCK(_inp) do { \
290 mtx_unlock(&(_inp)->inp_mtx); \
293 #define SCTP_INP_WUNLOCK(_inp) do { \
294 mtx_unlock(&(_inp)->inp_mtx); \
297 #define SCTP_INP_RLOCK_ASSERT(_inp) do { \
298 KASSERT(mtx_owned(&(_inp)->inp_mtx), \
299 ("Don't own INP read lock")); \
302 #define SCTP_INP_WLOCK_ASSERT(_inp) do { \
303 KASSERT(mtx_owned(&(_inp)->inp_mtx), \
304 ("Don't own INP write lock")); \
307 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
308 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
310 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) do { \
311 mtx_init(&(_inp)->inp_create_mtx, "sctp-create", "inp_create", \
312 MTX_DEF | MTX_DUPOK); \
315 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) do { \
316 mtx_destroy(&(_inp)->inp_create_mtx); \
319 #ifdef SCTP_LOCK_LOGGING
320 #define SCTP_ASOC_CREATE_LOCK(_inp) do { \
321 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
322 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
323 mtx_lock(&(_inp)->inp_create_mtx); \
326 #define SCTP_ASOC_CREATE_LOCK(_inp) do { \
327 mtx_lock(&(_inp)->inp_create_mtx); \
331 #define SCTP_ASOC_CREATE_UNLOCK(_inp) do { \
332 mtx_unlock(&(_inp)->inp_create_mtx); \
335 #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) \
336 ((_inp)->inp_create_mtx.mtx_lock & MTX_CONTESTED)
339 * For the majority of things (once we have found the association) we will
340 * lock the actual association mutex. This will protect all the assoiciation
341 * level queues and streams and such. We will need to lock the socket layer
342 * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
343 * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
346 #define SCTP_TCB_LOCK_INIT(_tcb) do { \
347 mtx_init(&(_tcb)->tcb_mtx, "sctp-tcb", "tcb", \
348 MTX_DEF | MTX_DUPOK); \
351 #define SCTP_TCB_LOCK_DESTROY(_tcb) do { \
352 mtx_destroy(&(_tcb)->tcb_mtx); \
355 #ifdef SCTP_LOCK_LOGGING
356 #define SCTP_TCB_LOCK(_tcb) do { \
357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
358 sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
359 mtx_lock(&(_tcb)->tcb_mtx); \
362 #define SCTP_TCB_LOCK(_tcb) do { \
363 mtx_lock(&(_tcb)->tcb_mtx); \
368 #define SCTP_TCB_TRYLOCK(_tcb) \
369 mtx_trylock(&(_tcb)->tcb_mtx)
371 #define SCTP_TCB_UNLOCK(_tcb) do { \
372 mtx_unlock(&(_tcb)->tcb_mtx); \
375 #define SCTP_TCB_UNLOCK_IFOWNED(_tcb) do { \
376 if (mtx_owned(&(_tcb)->tcb_mtx)) \
377 mtx_unlock(&(_tcb)->tcb_mtx); \
380 #define SCTP_TCB_LOCK_ASSERT(_tcb) do { \
381 KASSERT(mtx_owned(&(_tcb)->tcb_mtx), \
382 ("Don't own TCB lock")); \
385 #define SCTP_ITERATOR_LOCK_INIT() do { \
386 mtx_init(&sctp_it_ctl.it_mtx, "sctp-it", "iterator", MTX_DEF); \
389 #define SCTP_ITERATOR_LOCK_DESTROY() do { \
390 mtx_destroy(&sctp_it_ctl.it_mtx); \
393 #define SCTP_ITERATOR_LOCK() \
395 KASSERT(!mtx_owned(&sctp_it_ctl.it_mtx), \
396 ("Own the iterator lock")); \
397 mtx_lock(&sctp_it_ctl.it_mtx); \
400 #define SCTP_ITERATOR_UNLOCK() do { \
401 mtx_unlock(&sctp_it_ctl.it_mtx); \
404 #define SCTP_WQ_ADDR_INIT() do { \
405 mtx_init(&SCTP_BASE_INFO(wq_addr_mtx), \
406 "sctp-addr-wq","sctp_addr_wq", MTX_DEF); \
409 #define SCTP_WQ_ADDR_DESTROY() do { \
410 if (mtx_owned(&SCTP_BASE_INFO(wq_addr_mtx))) { \
411 mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx)); \
413 mtx_destroy(&SCTP_BASE_INFO(wq_addr_mtx)); \
416 #define SCTP_WQ_ADDR_LOCK() do { \
417 mtx_lock(&SCTP_BASE_INFO(wq_addr_mtx)); \
420 #define SCTP_WQ_ADDR_UNLOCK() do { \
421 mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx)); \
424 #define SCTP_WQ_ADDR_LOCK_ASSERT() do { \
425 KASSERT(mtx_owned(&SCTP_BASE_INFO(wq_addr_mtx)), \
426 ("Don't own the ADDR-WQ lock")); \
429 #define SCTP_INCR_EP_COUNT() do { \
430 atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
433 #define SCTP_DECR_EP_COUNT() do { \
434 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
437 #define SCTP_INCR_ASOC_COUNT() do { \
438 atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
441 #define SCTP_DECR_ASOC_COUNT() do { \
442 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
445 #define SCTP_INCR_LADDR_COUNT() do { \
446 atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
449 #define SCTP_DECR_LADDR_COUNT() do { \
450 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
453 #define SCTP_INCR_RADDR_COUNT() do { \
454 atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1); \
457 #define SCTP_DECR_RADDR_COUNT() do { \
458 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr),1); \
461 #define SCTP_INCR_CHK_COUNT() do { \
462 atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
465 #define SCTP_DECR_CHK_COUNT() do { \
466 KASSERT(SCTP_BASE_INFO(ipi_count_chunk) > 0, \
467 ("ipi_count_chunk would become negative")); \
468 if (SCTP_BASE_INFO(ipi_count_chunk) != 0) \
469 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), \
473 #define SCTP_INCR_READQ_COUNT() do { \
474 atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
477 #define SCTP_DECR_READQ_COUNT() do { \
478 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
481 #define SCTP_INCR_STRMOQ_COUNT() do { \
482 atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
485 #define SCTP_DECR_STRMOQ_COUNT() do { \
486 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
489 #if defined(SCTP_SO_LOCK_TESTING)
490 #define SCTP_INP_SO(sctpinp) \
491 (sctpinp)->ip_inp.inp.inp_socket
492 #define SCTP_SOCKET_LOCK(so, refcnt)
493 #define SCTP_SOCKET_UNLOCK(so, refcnt)