2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #ifndef _NETINET_SCTP_VAR_H_
39 #define _NETINET_SCTP_VAR_H_
41 #include <netinet/sctp_uio.h>
43 #if defined(_KERNEL) || defined(__Userspace__)
45 extern struct pr_usrreqs sctp_usrreqs;
48 #define sctp_feature_on(inp, feature) (inp->sctp_features |= feature)
49 #define sctp_feature_off(inp, feature) (inp->sctp_features &= ~feature)
50 #define sctp_is_feature_on(inp, feature) ((inp->sctp_features & feature) == feature)
51 #define sctp_is_feature_off(inp, feature) ((inp->sctp_features & feature) == 0)
53 #define sctp_stcb_feature_on(inp, stcb, feature) {\
55 stcb->asoc.sctp_features |= feature; \
57 inp->sctp_features |= feature; \
60 #define sctp_stcb_feature_off(inp, stcb, feature) {\
62 stcb->asoc.sctp_features &= ~feature; \
64 inp->sctp_features &= ~feature; \
67 #define sctp_stcb_is_feature_on(inp, stcb, feature) \
69 ((stcb->asoc.sctp_features & feature) == feature)) || \
70 ((stcb == NULL) && (inp != NULL) && \
71 ((inp->sctp_features & feature) == feature)))
72 #define sctp_stcb_is_feature_off(inp, stcb, feature) \
74 ((stcb->asoc.sctp_features & feature) == 0)) || \
75 ((stcb == NULL) && (inp != NULL) && \
76 ((inp->sctp_features & feature) == 0)) || \
77 ((stcb == NULL) && (inp == NULL)))
79 /* managing mobility_feature in inpcb (by micchie) */
80 #define sctp_mobility_feature_on(inp, feature) (inp->sctp_mobility_features |= feature)
81 #define sctp_mobility_feature_off(inp, feature) (inp->sctp_mobility_features &= ~feature)
82 #define sctp_is_mobility_feature_on(inp, feature) (inp->sctp_mobility_features & feature)
83 #define sctp_is_mobility_feature_off(inp, feature) ((inp->sctp_mobility_features & feature) == 0)
85 #define sctp_maxspace(sb) (max((sb)->sb_hiwat,SCTP_MINIMAL_RWND))
87 #define sctp_sbspace(asoc, sb) ((long) ((sctp_maxspace(sb) > (asoc)->sb_cc) ? (sctp_maxspace(sb) - (asoc)->sb_cc) : 0))
89 #define sctp_sbspace_failedmsgs(sb) ((long) ((sctp_maxspace(sb) > (sb)->sb_cc) ? (sctp_maxspace(sb) - (sb)->sb_cc) : 0))
91 #define sctp_sbspace_sub(a,b) (((a) > (b)) ? ((a) - (b)) : 0)
94 * I tried to cache the readq entries at one point. But the reality
95 * is that it did not add any performance since this meant we had to
96 * lock the STCB on read. And at that point once you have to do an
97 * extra lock, it really does not matter if the lock is in the ZONE
98 * stuff or in our code. Note that this same problem would occur with
99 * an mbuf cache as well so it is not really worth doing, at least
103 #define sctp_free_a_readq(_stcb, _readq) { \
104 if ((_readq)->on_strm_q) \
105 panic("On strm q stcb:%p readq:%p", (_stcb), (_readq)); \
106 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
107 SCTP_DECR_READQ_COUNT(); \
110 #define sctp_free_a_readq(_stcb, _readq) { \
111 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
112 SCTP_DECR_READQ_COUNT(); \
116 #define sctp_alloc_a_readq(_stcb, _readq) { \
117 (_readq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_readq), struct sctp_queued_to_read); \
119 SCTP_INCR_READQ_COUNT(); \
123 #define sctp_free_a_strmoq(_stcb, _strmoq, _so_locked) { \
124 if ((_strmoq)->holds_key_ref) { \
125 sctp_auth_key_release(stcb, sp->auth_keyid, _so_locked); \
126 (_strmoq)->holds_key_ref = 0; \
128 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), (_strmoq)); \
129 SCTP_DECR_STRMOQ_COUNT(); \
132 #define sctp_alloc_a_strmoq(_stcb, _strmoq) { \
133 (_strmoq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_strmoq), struct sctp_stream_queue_pending); \
135 memset(_strmoq, 0, sizeof(struct sctp_stream_queue_pending)); \
136 SCTP_INCR_STRMOQ_COUNT(); \
137 (_strmoq)->holds_key_ref = 0; \
141 #define sctp_free_a_chunk(_stcb, _chk, _so_locked) { \
142 if ((_chk)->holds_key_ref) {\
143 sctp_auth_key_release((_stcb), (_chk)->auth_keyid, _so_locked); \
144 (_chk)->holds_key_ref = 0; \
147 SCTP_TCB_LOCK_ASSERT((_stcb)); \
148 if ((_chk)->whoTo) { \
149 sctp_free_remote_addr((_chk)->whoTo); \
150 (_chk)->whoTo = NULL; \
152 if (((_stcb)->asoc.free_chunk_cnt > SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit)) || \
153 (SCTP_BASE_INFO(ipi_free_chunks) > SCTP_BASE_SYSCTL(sctp_system_free_resc_limit))) { \
154 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
155 SCTP_DECR_CHK_COUNT(); \
157 TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
158 (_stcb)->asoc.free_chunk_cnt++; \
159 atomic_add_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
162 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
163 SCTP_DECR_CHK_COUNT(); \
167 #define sctp_alloc_a_chunk(_stcb, _chk) { \
168 if (TAILQ_EMPTY(&(_stcb)->asoc.free_chunks)) { \
169 (_chk) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_chunk), struct sctp_tmit_chunk); \
171 SCTP_INCR_CHK_COUNT(); \
172 (_chk)->whoTo = NULL; \
173 (_chk)->holds_key_ref = 0; \
176 (_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
177 TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
178 atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
179 (_chk)->holds_key_ref = 0; \
180 SCTP_STAT_INCR(sctps_cached_chk); \
181 (_stcb)->asoc.free_chunk_cnt--; \
186 #define sctp_free_remote_addr(__net) { \
188 if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \
189 (void)SCTP_OS_TIMER_STOP(&(__net)->rxt_timer.timer); \
190 if ((__net)->ro.ro_rt) { \
191 RTFREE((__net)->ro.ro_rt); \
192 (__net)->ro.ro_rt = NULL; \
194 if ((__net)->src_addr_selected) { \
195 sctp_free_ifa((__net)->ro._s_addr); \
196 (__net)->ro._s_addr = NULL; \
198 (__net)->src_addr_selected = 0; \
199 (__net)->dest_state &= ~SCTP_ADDR_REACHABLE; \
200 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_net), (__net)); \
201 SCTP_DECR_RADDR_COUNT(); \
206 #define sctp_sbfree(ctl, stcb, sb, m) { \
207 SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
208 SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
209 if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
210 SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
211 SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
213 if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
214 SCTP_BUF_TYPE(m) != MT_OOBDATA) \
215 atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
218 #define sctp_sballoc(stcb, sb, m) { \
219 atomic_add_int(&(sb)->sb_cc,SCTP_BUF_LEN((m))); \
220 atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
222 atomic_add_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
223 atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
225 if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
226 SCTP_BUF_TYPE(m) != MT_OOBDATA) \
227 atomic_add_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
231 #define sctp_ucount_incr(val) { \
235 #define sctp_ucount_decr(val) { \
243 #define sctp_mbuf_crush(data) do { \
246 while (_m && (SCTP_BUF_LEN(_m) == 0)) { \
247 (data) = SCTP_BUF_NEXT(_m); \
248 SCTP_BUF_NEXT(_m) = NULL; \
254 #define sctp_flight_size_decrease(tp1) do { \
255 if (tp1->whoTo->flight_size >= tp1->book_size) \
256 tp1->whoTo->flight_size -= tp1->book_size; \
258 tp1->whoTo->flight_size = 0; \
261 #define sctp_flight_size_increase(tp1) do { \
262 (tp1)->whoTo->flight_size += (tp1)->book_size; \
265 #ifdef SCTP_FS_SPEC_LOG
266 #define sctp_total_flight_decrease(stcb, tp1) do { \
267 if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
268 stcb->asoc.fs_index = 0;\
269 stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
270 stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
271 stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
272 stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
273 stcb->asoc.fslog[stcb->asoc.fs_index].incr = 0; \
274 stcb->asoc.fslog[stcb->asoc.fs_index].decr = 1; \
275 stcb->asoc.fs_index++; \
276 tp1->window_probe = 0; \
277 if (stcb->asoc.total_flight >= tp1->book_size) { \
278 stcb->asoc.total_flight -= tp1->book_size; \
279 if (stcb->asoc.total_flight_count > 0) \
280 stcb->asoc.total_flight_count--; \
282 stcb->asoc.total_flight = 0; \
283 stcb->asoc.total_flight_count = 0; \
287 #define sctp_total_flight_increase(stcb, tp1) do { \
288 if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
289 stcb->asoc.fs_index = 0;\
290 stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
291 stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
292 stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
293 stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
294 stcb->asoc.fslog[stcb->asoc.fs_index].incr = 1; \
295 stcb->asoc.fslog[stcb->asoc.fs_index].decr = 0; \
296 stcb->asoc.fs_index++; \
297 (stcb)->asoc.total_flight_count++; \
298 (stcb)->asoc.total_flight += (tp1)->book_size; \
303 #define sctp_total_flight_decrease(stcb, tp1) do { \
304 tp1->window_probe = 0; \
305 if (stcb->asoc.total_flight >= tp1->book_size) { \
306 stcb->asoc.total_flight -= tp1->book_size; \
307 if (stcb->asoc.total_flight_count > 0) \
308 stcb->asoc.total_flight_count--; \
310 stcb->asoc.total_flight = 0; \
311 stcb->asoc.total_flight_count = 0; \
315 #define sctp_total_flight_increase(stcb, tp1) do { \
316 (stcb)->asoc.total_flight_count++; \
317 (stcb)->asoc.total_flight += (tp1)->book_size; \
322 #define SCTP_PF_ENABLED(_net) (_net->pf_threshold < _net->failure_threshold)
323 #define SCTP_NET_IS_PF(_net) (_net->pf_threshold < _net->error_count)
331 void sctp_close(struct socket *so);
332 int sctp_disconnect(struct socket *so);
333 void sctp_ctlinput(int, struct sockaddr *, void *);
334 int sctp_ctloutput(struct socket *, struct sockopt *);
336 void sctp_input_with_port(struct mbuf *, int, uint16_t);
337 int sctp_input(struct mbuf **, int *, int);
339 void sctp_pathmtu_adjustment(struct sctp_tcb *, uint16_t);
340 void sctp_drain(void);
341 void sctp_init(void);
343 sctp_notify(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *,
344 uint8_t, uint8_t, uint16_t, uint32_t);
345 int sctp_flush(struct socket *, int);
346 int sctp_shutdown(struct socket *);
348 sctp_bindx(struct socket *, int, struct sockaddr_storage *,
349 int, int, struct proc *);
351 /* can't use sctp_assoc_t here */
352 int sctp_peeloff(struct socket *, struct socket *, int, caddr_t, int *);
353 int sctp_ingetaddr(struct socket *, struct sockaddr **);
354 int sctp_peeraddr(struct socket *, struct sockaddr **);
355 int sctp_listen(struct socket *, int, struct thread *);
356 int sctp_accept(struct socket *, struct sockaddr **);
360 #endif /* !_NETINET_SCTP_VAR_H_ */