]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/netinet/sctp_lock_bsd.h
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / netinet / sctp_lock_bsd.h
1 #ifndef __sctp_lock_bsd_h__
2 #define __sctp_lock_bsd_h__
3 /*-
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 /*
34  * General locking concepts: The goal of our locking is to of course provide
35  * consistency and yet minimize overhead. We will attempt to use
36  * non-recursive locks which are supposed to be quite inexpensive. Now in
37  * order to do this the goal is that most functions are not aware of locking.
38  * Once we have a TCB we lock it and unlock when we are through. This means
39  * that the TCB lock is kind-of a "global" lock when working on an
40  * association. Caution must be used when asserting a TCB_LOCK since if we
41  * recurse we deadlock.
42  *
43  * Most other locks (INP and INFO) attempt to localize the locking i.e. we try
44  * to contain the lock and unlock within the function that needs to lock it.
45  * This sometimes mean we do extra locks and unlocks and lose a bit of
46  * efficency, but if the performance statements about non-recursive locks are
47  * true this should not be a problem.  One issue that arises with this only
48  * lock when needed is that if an implicit association setup is done we have
49  * a problem. If at the time I lookup an association I have NULL in the tcb
50  * return, by the time I call to create the association some other processor
51  * could have created it. This is what the CREATE lock on the endpoint.
52  * Places where we will be implicitly creating the association OR just
53  * creating an association (the connect call) will assert the CREATE_INP
54  * lock. This will assure us that during all the lookup of INP and INFO if
55  * another creator is also locking/looking up we can gate the two to
56  * synchronize. So the CREATE_INP lock is also another one we must use
57  * extreme caution in locking to make sure we don't hit a re-entrancy issue.
58  *
59  * For non FreeBSD 5.x we provide a bunch of EMPTY lock macros so we can
60  * blatantly put locks everywhere and they reduce to nothing on
61  * NetBSD/OpenBSD and FreeBSD 4.x
62  *
63  */
64
65 /*
66  * When working with the global SCTP lists we lock and unlock the INP_INFO
67  * lock. So when we go to lookup an association we will want to do a
68  * SCTP_INP_INFO_RLOCK() and then when we want to add a new association to
69  * the SCTP_BASE_INFO() list's we will do a SCTP_INP_INFO_WLOCK().
70  */
71 #include <sys/cdefs.h>
72 __FBSDID("$FreeBSD$");
73
74
75 extern struct sctp_foo_stuff sctp_logoff[];
76 extern int sctp_logoff_stuff;
77
78 #define SCTP_IPI_COUNT_INIT()
79
80 #define SCTP_STATLOG_INIT_LOCK()
81 #define SCTP_STATLOG_LOCK()
82 #define SCTP_STATLOG_UNLOCK()
83 #define SCTP_STATLOG_DESTROY()
84
85 #define SCTP_INP_INFO_LOCK_DESTROY() do { \
86         if(rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx))) { \
87              rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx)); \
88         } \
89         rw_destroy(&SCTP_BASE_INFO(ipi_ep_mtx)); \
90       }  while (0)
91
92 #define SCTP_INP_INFO_LOCK_INIT() \
93         rw_init(&SCTP_BASE_INFO(ipi_ep_mtx), "sctp-info");
94
95
96 #define SCTP_INP_INFO_RLOCK()   do {                                    \
97              rw_rlock(&SCTP_BASE_INFO(ipi_ep_mtx));                         \
98 } while (0)
99
100
101 #define SCTP_INP_INFO_WLOCK()   do {                                    \
102             rw_wlock(&SCTP_BASE_INFO(ipi_ep_mtx));                         \
103 } while (0)
104
105
106 #define SCTP_INP_INFO_RUNLOCK()         rw_runlock(&SCTP_BASE_INFO(ipi_ep_mtx))
107 #define SCTP_INP_INFO_WUNLOCK()         rw_wunlock(&SCTP_BASE_INFO(ipi_ep_mtx))
108
109
110 #define SCTP_IPI_ADDR_INIT()                                                            \
111         rw_init(&SCTP_BASE_INFO(ipi_addr_mtx), "sctp-addr")
112 #define SCTP_IPI_ADDR_DESTROY() do  { \
113         if(rw_wowned(&SCTP_BASE_INFO(ipi_addr_mtx))) { \
114              rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx)); \
115         } \
116         rw_destroy(&SCTP_BASE_INFO(ipi_addr_mtx)); \
117       }  while (0)
118 #define SCTP_IPI_ADDR_RLOCK()   do {                                    \
119              rw_rlock(&SCTP_BASE_INFO(ipi_addr_mtx));                         \
120 } while (0)
121 #define SCTP_IPI_ADDR_WLOCK()   do {                                    \
122              rw_wlock(&SCTP_BASE_INFO(ipi_addr_mtx));                         \
123 } while (0)
124
125 #define SCTP_IPI_ADDR_RUNLOCK()         rw_runlock(&SCTP_BASE_INFO(ipi_addr_mtx))
126 #define SCTP_IPI_ADDR_WUNLOCK()         rw_wunlock(&SCTP_BASE_INFO(ipi_addr_mtx))
127
128
129 #define SCTP_IPI_ITERATOR_WQ_INIT() \
130         mtx_init(&sctp_it_ctl.ipi_iterator_wq_mtx, "sctp-it-wq", "sctp_it_wq", MTX_DEF)
131
132 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
133         mtx_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
134
135 #define SCTP_IPI_ITERATOR_WQ_LOCK()     do {                                    \
136              mtx_lock(&sctp_it_ctl.ipi_iterator_wq_mtx);                \
137 } while (0)
138
139 #define SCTP_IPI_ITERATOR_WQ_UNLOCK()           mtx_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
140
141
142 #define SCTP_IP_PKTLOG_INIT() \
143         mtx_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), "sctp-pktlog", "packetlog", MTX_DEF)
144
145
146 #define SCTP_IP_PKTLOG_LOCK()   do {                    \
147              mtx_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx));     \
148 } while (0)
149
150 #define SCTP_IP_PKTLOG_UNLOCK() mtx_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
151
152 #define SCTP_IP_PKTLOG_DESTROY() \
153         mtx_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
154
155
156
157
158
159 /*
160  * The INP locks we will use for locking an SCTP endpoint, so for example if
161  * we want to change something at the endpoint level for example random_store
162  * or cookie secrets we lock the INP level.
163  */
164
165 #define SCTP_INP_READ_INIT(_inp) \
166         mtx_init(&(_inp)->inp_rdata_mtx, "sctp-read", "inpr", MTX_DEF | MTX_DUPOK)
167
168 #define SCTP_INP_READ_DESTROY(_inp) \
169         mtx_destroy(&(_inp)->inp_rdata_mtx)
170
171 #define SCTP_INP_READ_LOCK(_inp)        do { \
172         mtx_lock(&(_inp)->inp_rdata_mtx);    \
173 } while (0)
174
175
176 #define SCTP_INP_READ_UNLOCK(_inp) mtx_unlock(&(_inp)->inp_rdata_mtx)
177
178
179 #define SCTP_INP_LOCK_INIT(_inp) \
180         mtx_init(&(_inp)->inp_mtx, "sctp-inp", "inp", MTX_DEF | MTX_DUPOK)
181 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
182         mtx_init(&(_inp)->inp_create_mtx, "sctp-create", "inp_create", \
183                  MTX_DEF | MTX_DUPOK)
184
185 #define SCTP_INP_LOCK_DESTROY(_inp) \
186         mtx_destroy(&(_inp)->inp_mtx)
187
188 #define SCTP_INP_LOCK_CONTENDED(_inp) ((_inp)->inp_mtx.mtx_lock & MTX_CONTESTED)
189
190 #define SCTP_INP_READ_CONTENDED(_inp) ((_inp)->inp_rdata_mtx.mtx_lock & MTX_CONTESTED)
191
192 #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) ((_inp)->inp_create_mtx.mtx_lock & MTX_CONTESTED)
193
194
195 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
196         mtx_destroy(&(_inp)->inp_create_mtx)
197
198
199 #ifdef SCTP_LOCK_LOGGING
200 #define SCTP_INP_RLOCK(_inp)    do {                                    \
201         if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
202         mtx_lock(&(_inp)->inp_mtx);                                     \
203 } while (0)
204
205 #define SCTP_INP_WLOCK(_inp)    do {                                    \
206         if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
207         mtx_lock(&(_inp)->inp_mtx);                                     \
208 } while (0)
209
210 #else
211
212 #define SCTP_INP_RLOCK(_inp)    do {                                    \
213         mtx_lock(&(_inp)->inp_mtx);                                     \
214 } while (0)
215
216 #define SCTP_INP_WLOCK(_inp)    do {                                    \
217         mtx_lock(&(_inp)->inp_mtx);                                     \
218 } while (0)
219
220 #endif
221
222
223 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
224         mtx_init(&(_tcb)->tcb_send_mtx, "sctp-send-tcb", "tcbs", MTX_DEF | MTX_DUPOK)
225
226 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) mtx_destroy(&(_tcb)->tcb_send_mtx)
227
228 #define SCTP_TCB_SEND_LOCK(_tcb)  do { \
229         mtx_lock(&(_tcb)->tcb_send_mtx); \
230 } while (0)
231
232 #define SCTP_TCB_SEND_UNLOCK(_tcb) mtx_unlock(&(_tcb)->tcb_send_mtx)
233
234 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
235 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
236
237
238 #ifdef SCTP_LOCK_LOGGING
239 #define SCTP_ASOC_CREATE_LOCK(_inp) \
240         do {                                                            \
241         if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_CREATE); \
242                 mtx_lock(&(_inp)->inp_create_mtx);                      \
243         } while (0)
244 #else
245
246 #define SCTP_ASOC_CREATE_LOCK(_inp) \
247         do {                                                            \
248                 mtx_lock(&(_inp)->inp_create_mtx);                      \
249         } while (0)
250 #endif
251
252 #define SCTP_INP_RUNLOCK(_inp)          mtx_unlock(&(_inp)->inp_mtx)
253 #define SCTP_INP_WUNLOCK(_inp)          mtx_unlock(&(_inp)->inp_mtx)
254 #define SCTP_ASOC_CREATE_UNLOCK(_inp)   mtx_unlock(&(_inp)->inp_create_mtx)
255
256 /*
257  * For the majority of things (once we have found the association) we will
258  * lock the actual association mutex. This will protect all the assoiciation
259  * level queues and streams and such. We will need to lock the socket layer
260  * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
261  * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
262  */
263
264 #define SCTP_TCB_LOCK_INIT(_tcb) \
265         mtx_init(&(_tcb)->tcb_mtx, "sctp-tcb", "tcb", MTX_DEF | MTX_DUPOK)
266
267 #define SCTP_TCB_LOCK_DESTROY(_tcb)     mtx_destroy(&(_tcb)->tcb_mtx)
268
269 #ifdef SCTP_LOCK_LOGGING
270 #define SCTP_TCB_LOCK(_tcb)  do {                                       \
271         if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE)  sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);          \
272         mtx_lock(&(_tcb)->tcb_mtx);                                     \
273 } while (0)
274
275 #else
276 #define SCTP_TCB_LOCK(_tcb)  do {                                       \
277         mtx_lock(&(_tcb)->tcb_mtx);                                     \
278 } while (0)
279
280 #endif
281
282
283 #define SCTP_TCB_TRYLOCK(_tcb)  mtx_trylock(&(_tcb)->tcb_mtx)
284
285 #define SCTP_TCB_UNLOCK(_tcb)           mtx_unlock(&(_tcb)->tcb_mtx)
286
287 #define SCTP_TCB_UNLOCK_IFOWNED(_tcb)         do { \
288                                                 if (mtx_owned(&(_tcb)->tcb_mtx)) \
289                                                      mtx_unlock(&(_tcb)->tcb_mtx); \
290                                               } while (0)
291
292
293
294 #ifdef INVARIANTS
295 #define SCTP_TCB_LOCK_ASSERT(_tcb) do { \
296                             if (mtx_owned(&(_tcb)->tcb_mtx) == 0) \
297                                 panic("Don't own TCB lock"); \
298                             } while (0)
299 #else
300 #define SCTP_TCB_LOCK_ASSERT(_tcb)
301 #endif
302
303 #define SCTP_ITERATOR_LOCK_INIT() \
304         mtx_init(&sctp_it_ctl.it_mtx, "sctp-it", "iterator", MTX_DEF)
305
306 #ifdef INVARIANTS
307 #define SCTP_ITERATOR_LOCK() \
308         do {                                                            \
309                 if (mtx_owned(&sctp_it_ctl.it_mtx))                     \
310                         panic("Iterator Lock");                         \
311                 mtx_lock(&sctp_it_ctl.it_mtx);                          \
312         } while (0)
313 #else
314 #define SCTP_ITERATOR_LOCK() \
315         do {                                                            \
316                 mtx_lock(&sctp_it_ctl.it_mtx);                          \
317         } while (0)
318
319 #endif
320
321 #define SCTP_ITERATOR_UNLOCK()          mtx_unlock(&sctp_it_ctl.it_mtx)
322 #define SCTP_ITERATOR_LOCK_DESTROY()    mtx_destroy(&sctp_it_ctl.it_mtx)
323
324
325 #define SCTP_WQ_ADDR_INIT() do { \
326         mtx_init(&SCTP_BASE_INFO(wq_addr_mtx), "sctp-addr-wq","sctp_addr_wq",MTX_DEF); \
327  } while (0)
328
329 #define SCTP_WQ_ADDR_DESTROY() do  { \
330         if(mtx_owned(&SCTP_BASE_INFO(wq_addr_mtx))) { \
331              mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx)); \
332         } \
333             mtx_destroy(&SCTP_BASE_INFO(wq_addr_mtx)); \
334       }  while (0)
335
336 #define SCTP_WQ_ADDR_LOCK()     do { \
337              mtx_lock(&SCTP_BASE_INFO(wq_addr_mtx));  \
338 } while (0)
339 #define SCTP_WQ_ADDR_UNLOCK() do { \
340                 mtx_unlock(&SCTP_BASE_INFO(wq_addr_mtx)); \
341 } while (0)
342
343
344
345 #define SCTP_INCR_EP_COUNT() \
346                 do { \
347                        atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
348                 } while (0)
349
350 #define SCTP_DECR_EP_COUNT() \
351                 do { \
352                        atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
353                 } while (0)
354
355 #define SCTP_INCR_ASOC_COUNT() \
356                 do { \
357                        atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
358                 } while (0)
359
360 #define SCTP_DECR_ASOC_COUNT() \
361                 do { \
362                        atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
363                 } while (0)
364
365 #define SCTP_INCR_LADDR_COUNT() \
366                 do { \
367                        atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
368                 } while (0)
369
370 #define SCTP_DECR_LADDR_COUNT() \
371                 do { \
372                        atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
373                 } while (0)
374
375 #define SCTP_INCR_RADDR_COUNT() \
376                 do { \
377                        atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1); \
378                 } while (0)
379
380 #define SCTP_DECR_RADDR_COUNT() \
381                 do { \
382                        atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr),1); \
383                 } while (0)
384
385 #define SCTP_INCR_CHK_COUNT() \
386                 do { \
387                        atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
388                 } while (0)
389 #ifdef INVARIANTS
390 #define SCTP_DECR_CHK_COUNT() \
391                 do { \
392                        if(SCTP_BASE_INFO(ipi_count_chunk) == 0) \
393                              panic("chunk count to 0?");    \
394                        atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
395                 } while (0)
396 #else
397 #define SCTP_DECR_CHK_COUNT() \
398                 do { \
399                        if(SCTP_BASE_INFO(ipi_count_chunk) != 0) \
400                        atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
401                 } while (0)
402 #endif
403 #define SCTP_INCR_READQ_COUNT() \
404                 do { \
405                        atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq),1); \
406                 } while (0)
407
408 #define SCTP_DECR_READQ_COUNT() \
409                 do { \
410                        atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
411                 } while (0)
412
413 #define SCTP_INCR_STRMOQ_COUNT() \
414                 do { \
415                        atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
416                 } while (0)
417
418 #define SCTP_DECR_STRMOQ_COUNT() \
419                 do { \
420                        atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
421                 } while (0)
422
423
424 #if defined(SCTP_SO_LOCK_TESTING)
425 #define SCTP_INP_SO(sctpinp)    (sctpinp)->ip_inp.inp.inp_socket
426 #define SCTP_SOCKET_LOCK(so, refcnt)
427 #define SCTP_SOCKET_UNLOCK(so, refcnt)
428 #endif
429
430 #endif