2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_auth.h>
45 #include <netinet/sctp_indata.h>
46 #include <netinet/sctp_asconf.h>
47 #include <netinet/sctp_bsd_addr.h>
52 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
54 struct sctp_nets *net;
57 * This now not only stops all cookie timers it also stops any INIT
58 * timers as well. This will make sure that the timers are stopped
59 * in all collision cases.
61 SCTP_TCB_LOCK_ASSERT(stcb);
62 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
63 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
64 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
67 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
68 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
69 sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
72 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
79 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
80 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
81 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
83 struct sctp_init *init;
87 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
91 /* First are we accepting? */
92 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
93 SCTPDBG(SCTP_DEBUG_INPUT2,
94 "sctp_handle_init: Abort, so_qlimit:%d\n",
95 inp->sctp_socket->so_qlimit);
97 * FIX ME ?? What about TCP model and we have a
100 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
103 *abort_no_unlock = 1;
106 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
108 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
109 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
112 *abort_no_unlock = 1;
115 /* validate parameters */
116 if (init->initiate_tag == 0) {
117 /* protocol error... send abort */
118 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
119 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
122 *abort_no_unlock = 1;
125 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
126 /* invalid parameter... send abort */
127 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
128 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
132 if (init->num_inbound_streams == 0) {
133 /* protocol error... send abort */
134 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
135 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
138 *abort_no_unlock = 1;
141 if (init->num_outbound_streams == 0) {
142 /* protocol error... send abort */
143 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
144 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
147 *abort_no_unlock = 1;
150 init_limit = offset + ntohs(cp->ch.chunk_length);
151 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
153 /* auth parameter(s) error... send abort */
154 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id);
156 *abort_no_unlock = 1;
159 /* send an INIT-ACK w/cookie */
160 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
161 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id);
165 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
168 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
169 struct sctp_nets *net)
171 struct sctp_init *init;
172 struct sctp_association *asoc;
173 struct sctp_nets *lnet;
178 /* save off parameters */
179 asoc->peer_vtag = ntohl(init->initiate_tag);
180 asoc->peers_rwnd = ntohl(init->a_rwnd);
181 if (TAILQ_FIRST(&asoc->nets)) {
182 /* update any ssthresh's that may have a default */
183 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
184 lnet->ssthresh = asoc->peers_rwnd;
186 if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
187 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
191 SCTP_TCB_SEND_LOCK(stcb);
192 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
194 struct sctp_stream_out *outs;
195 struct sctp_stream_queue_pending *sp;
197 /* cut back on number of streams */
198 newcnt = ntohs(init->num_inbound_streams);
199 /* This if is probably not needed but I am cautious */
201 /* First make sure no data chunks are trapped */
202 for (i = newcnt; i < asoc->pre_open_streams; i++) {
203 outs = &asoc->strmout[i];
204 sp = TAILQ_FIRST(&outs->outqueue);
206 TAILQ_REMOVE(&outs->outqueue, sp,
208 asoc->stream_queue_cnt--;
209 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
210 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
213 sctp_m_freem(sp->data);
216 sctp_free_remote_addr(sp->net);
219 SCTP_PRINTF("sp:%p tcb:%p weird free case\n",
222 sctp_free_a_strmoq(stcb, sp);
223 /* sa_ignore FREED_MEMORY */
224 sp = TAILQ_FIRST(&outs->outqueue);
228 /* cut back the count and abandon the upper streams */
229 asoc->pre_open_streams = newcnt;
231 SCTP_TCB_SEND_UNLOCK(stcb);
232 asoc->streamoutcnt = asoc->pre_open_streams;
234 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
235 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
236 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
238 /* This is the next one we expect */
239 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
241 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
242 asoc->cumulative_tsn = asoc->asconf_seq_in;
243 asoc->last_echo_tsn = asoc->asconf_seq_in;
244 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
245 /* open the requested streams */
247 if (asoc->strmin != NULL) {
248 /* Free the old ones */
249 struct sctp_queued_to_read *ctl;
251 for (i = 0; i < asoc->streamincnt; i++) {
252 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
254 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
255 sctp_free_remote_addr(ctl->whoFrom);
256 sctp_m_freem(ctl->data);
258 sctp_free_a_readq(stcb, ctl);
259 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
262 SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
264 asoc->streamincnt = ntohs(init->num_outbound_streams);
265 if (asoc->streamincnt > MAX_SCTP_STREAMS) {
266 asoc->streamincnt = MAX_SCTP_STREAMS;
268 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
269 sizeof(struct sctp_stream_in), SCTP_M_STRMI);
270 if (asoc->strmin == NULL) {
271 /* we didn't get memory for the streams! */
272 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
275 for (i = 0; i < asoc->streamincnt; i++) {
276 asoc->strmin[i].stream_no = i;
277 asoc->strmin[i].last_sequence_delivered = 0xffff;
279 * U-stream ranges will be set when the cookie is unpacked.
280 * Or for the INIT sender they are un set (if pr-sctp not
281 * supported) when the INIT-ACK arrives.
283 TAILQ_INIT(&asoc->strmin[i].inqueue);
284 asoc->strmin[i].delivery_started = 0;
287 * load_address_from_init will put the addresses into the
288 * association when the COOKIE is processed or the INIT-ACK is
289 * processed. Both types of COOKIE's existing and new call this
290 * routine. It will remove addresses that are no longer in the
291 * association (for the restarting case where addresses are
292 * removed). Up front when the INIT arrives we will discard it if it
293 * is a restart and new addresses have been added.
295 /* sa_ignore MEMLEAK */
300 * INIT-ACK message processing/consumption returns value < 0 on error
303 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
304 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
305 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
307 struct sctp_association *asoc;
309 int retval, abort_flag;
310 uint32_t initack_limit;
312 /* First verify that we have no illegal param's */
316 op_err = sctp_arethere_unrecognized_parameters(m,
317 (offset + sizeof(struct sctp_init_chunk)),
318 &abort_flag, (struct sctp_chunkhdr *)cp);
320 /* Send an abort and notify peer */
321 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err);
322 *abort_no_unlock = 1;
326 /* process the peer's parameters in the INIT-ACK */
327 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
331 initack_limit = offset + ntohs(cp->ch.chunk_length);
332 /* load all addresses */
333 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
334 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
336 /* Huh, we should abort */
337 SCTPDBG(SCTP_DEBUG_INPUT1,
338 "Load addresses from INIT causes an abort %d\n",
340 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
342 *abort_no_unlock = 1;
345 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
346 stcb->asoc.local_hmacs);
348 sctp_queue_op_err(stcb, op_err);
349 /* queuing will steal away the mbuf chain to the out queue */
352 /* extract the cookie and queue it to "echo" it back... */
353 stcb->asoc.overall_error_count = 0;
354 net->error_count = 0;
357 * Cancel the INIT timer, We do this first before queueing the
358 * cookie. We always cancel at the primary to assue that we are
359 * canceling the timer started by the INIT which always goes to the
362 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
363 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
365 /* calculate the RTO */
366 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered);
368 retval = sctp_send_cookie_echo(m, offset, stcb, net);
371 * No cookie, we probably should send a op error. But in any
372 * case if there is no cookie in the INIT-ACK, we can
373 * abandon the peer, its broke.
376 /* We abort with an error of missing mandatory param */
378 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
381 * Expand beyond to include the mandatory
384 struct sctp_inv_mandatory_param *mp;
386 SCTP_BUF_LEN(op_err) =
387 sizeof(struct sctp_inv_mandatory_param);
389 struct sctp_inv_mandatory_param *);
390 /* Subtract the reserved param */
392 htons(sizeof(struct sctp_inv_mandatory_param) - 2);
393 mp->num_param = htonl(1);
394 mp->param = htons(SCTP_STATE_COOKIE);
397 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
399 *abort_no_unlock = 1;
407 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
408 struct sctp_tcb *stcb, struct sctp_nets *net)
410 struct sockaddr_storage store;
411 struct sockaddr_in *sin;
412 struct sockaddr_in6 *sin6;
413 struct sctp_nets *r_net;
416 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
420 sin = (struct sockaddr_in *)&store;
421 sin6 = (struct sockaddr_in6 *)&store;
423 memset(&store, 0, sizeof(store));
424 if (cp->heartbeat.hb_info.addr_family == AF_INET &&
425 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
426 sin->sin_family = cp->heartbeat.hb_info.addr_family;
427 sin->sin_len = cp->heartbeat.hb_info.addr_len;
428 sin->sin_port = stcb->rport;
429 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
430 sizeof(sin->sin_addr));
431 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
432 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
433 sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
434 sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
435 sin6->sin6_port = stcb->rport;
436 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
437 sizeof(sin6->sin6_addr));
441 r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
443 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
446 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
447 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
448 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
450 * If the its a HB and it's random value is correct when can
451 * confirm the destination.
453 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
454 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
455 stcb->asoc.primary_destination = r_net;
456 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
457 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
458 r_net = TAILQ_FIRST(&stcb->asoc.nets);
459 if (r_net != stcb->asoc.primary_destination) {
461 * first one on the list is NOT the primary
462 * sctp_cmpaddr() is much more efficent if
463 * the primary is the first on the list,
466 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
467 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
470 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
471 stcb, 0, (void *)r_net);
473 r_net->error_count = 0;
474 r_net->hb_responded = 1;
475 tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
476 tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
477 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
478 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
479 r_net->dest_state |= SCTP_ADDR_REACHABLE;
480 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
481 SCTP_HEARTBEAT_SUCCESS, (void *)r_net);
482 /* now was it the primary? if so restore */
483 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
484 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
487 /* Now lets do a RTO with this */
488 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv);
492 sctp_handle_abort(struct sctp_abort_chunk *cp,
493 struct sctp_tcb *stcb, struct sctp_nets *net)
495 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
499 /* stop any receive timers */
500 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
501 /* notify user of the abort and clean up... */
502 sctp_abort_notification(stcb, 0);
504 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
505 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
506 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
507 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
509 #ifdef SCTP_ASOCLOG_OF_TSNS
510 sctp_print_out_track_log(stcb);
512 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
513 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
517 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
518 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
520 struct sctp_association *asoc;
521 int some_on_streamwheel;
523 SCTPDBG(SCTP_DEBUG_INPUT2,
524 "sctp_handle_shutdown: handling SHUTDOWN\n");
528 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
529 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
532 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
533 /* Shutdown NOT the expected size */
536 sctp_update_acked(stcb, cp, net, abort_flag);
538 if (asoc->control_pdapi) {
540 * With a normal shutdown we assume the end of last record.
542 SCTP_INP_READ_LOCK(stcb->sctp_ep);
543 asoc->control_pdapi->end_added = 1;
544 asoc->control_pdapi->pdapi_aborted = 1;
545 asoc->control_pdapi = NULL;
546 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
547 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
549 /* goto SHUTDOWN_RECEIVED state to block new requests */
550 if (stcb->sctp_socket) {
551 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
552 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
553 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
554 asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED;
556 * notify upper layer that peer has initiated a
559 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL);
562 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
565 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
567 * stop the shutdown timer, since we WILL move to
570 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
572 /* Now are we there yet? */
573 some_on_streamwheel = 0;
574 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
575 /* Check to see if some data queued */
576 struct sctp_stream_out *outs;
578 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
579 if (!TAILQ_EMPTY(&outs->outqueue)) {
580 some_on_streamwheel = 1;
585 if (!TAILQ_EMPTY(&asoc->send_queue) ||
586 !TAILQ_EMPTY(&asoc->sent_queue) ||
587 some_on_streamwheel) {
588 /* By returning we will push more data out */
591 /* no outstanding data to send, so move on... */
592 /* send SHUTDOWN-ACK */
593 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
594 /* move to SHUTDOWN-ACK-SENT state */
595 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
596 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
597 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
599 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
601 /* start SHUTDOWN timer */
602 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
608 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
609 struct sctp_tcb *stcb, struct sctp_nets *net)
611 struct sctp_association *asoc;
613 SCTPDBG(SCTP_DEBUG_INPUT2,
614 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
619 /* process according to association state */
620 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
621 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
622 /* unexpected SHUTDOWN-ACK... so ignore... */
623 SCTP_TCB_UNLOCK(stcb);
626 if (asoc->control_pdapi) {
628 * With a normal shutdown we assume the end of last record.
630 SCTP_INP_READ_LOCK(stcb->sctp_ep);
631 asoc->control_pdapi->end_added = 1;
632 asoc->control_pdapi->pdapi_aborted = 1;
633 asoc->control_pdapi = NULL;
634 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
635 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
637 /* are the queues empty? */
638 if (!TAILQ_EMPTY(&asoc->send_queue) ||
639 !TAILQ_EMPTY(&asoc->sent_queue) ||
640 !TAILQ_EMPTY(&asoc->out_wheel)) {
641 sctp_report_all_outbound(stcb, 0);
644 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
645 /* send SHUTDOWN-COMPLETE */
646 sctp_send_shutdown_complete(stcb, net);
647 /* notify upper layer protocol */
648 if (stcb->sctp_socket) {
649 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
650 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
651 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
652 /* Set the connected flag to disconnected */
653 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
656 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
657 /* free the TCB but first save off the ep */
658 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
659 SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
663 * Skip past the param header and then we will find the chunk that caused the
664 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
665 * our peer must be broken.
668 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
669 struct sctp_nets *net)
671 struct sctp_chunkhdr *chk;
673 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
674 switch (chk->chunk_type) {
675 case SCTP_ASCONF_ACK:
677 sctp_asconf_cleanup(stcb, net);
679 case SCTP_FORWARD_CUM_TSN:
680 stcb->asoc.peer_supports_prsctp = 0;
683 SCTPDBG(SCTP_DEBUG_INPUT2,
684 "Peer does not support chunk type %d(%x)??\n",
685 chk->chunk_type, (uint32_t) chk->chunk_type);
691 * Skip past the param header and then we will find the param that caused the
692 * problem. There are a number of param's in a ASCONF OR the prsctp param
693 * these will turn of specific features.
696 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
698 struct sctp_paramhdr *pbad;
701 switch (ntohs(pbad->param_type)) {
703 case SCTP_PRSCTP_SUPPORTED:
704 stcb->asoc.peer_supports_prsctp = 0;
706 case SCTP_SUPPORTED_CHUNK_EXT:
708 /* draft-ietf-tsvwg-addip-sctp */
709 case SCTP_ECN_NONCE_SUPPORTED:
710 stcb->asoc.peer_supports_ecn_nonce = 0;
711 stcb->asoc.ecn_nonce_allowed = 0;
712 stcb->asoc.ecn_allowed = 0;
714 case SCTP_ADD_IP_ADDRESS:
715 case SCTP_DEL_IP_ADDRESS:
716 case SCTP_SET_PRIM_ADDR:
717 stcb->asoc.peer_supports_asconf = 0;
719 case SCTP_SUCCESS_REPORT:
720 case SCTP_ERROR_CAUSE_IND:
721 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
722 SCTPDBG(SCTP_DEBUG_INPUT2,
723 "Turning off ASCONF to this strange peer\n");
724 stcb->asoc.peer_supports_asconf = 0;
727 SCTPDBG(SCTP_DEBUG_INPUT2,
728 "Peer does not support param type %d(%x)??\n",
729 pbad->param_type, (uint32_t) pbad->param_type);
735 sctp_handle_error(struct sctp_chunkhdr *ch,
736 struct sctp_tcb *stcb, struct sctp_nets *net)
739 struct sctp_paramhdr *phdr;
742 struct sctp_association *asoc;
746 /* parse through all of the errors and process */
748 phdr = (struct sctp_paramhdr *)((caddr_t)ch +
749 sizeof(struct sctp_chunkhdr));
750 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
751 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
752 /* Process an Error Cause */
753 error_type = ntohs(phdr->param_type);
754 error_len = ntohs(phdr->param_length);
755 if ((error_len > chklen) || (error_len == 0)) {
756 /* invalid param length for this param */
757 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
761 switch (error_type) {
762 case SCTP_CAUSE_INVALID_STREAM:
763 case SCTP_CAUSE_MISSING_PARAM:
764 case SCTP_CAUSE_INVALID_PARAM:
765 case SCTP_CAUSE_NO_USER_DATA:
766 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
769 case SCTP_CAUSE_STALE_COOKIE:
771 * We only act if we have echoed a cookie and are
774 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
777 p = (int *)((caddr_t)phdr + sizeof(*phdr));
778 /* Save the time doubled */
779 asoc->cookie_preserve_req = ntohl(*p) << 1;
780 asoc->stale_cookie_count++;
781 if (asoc->stale_cookie_count >
782 asoc->max_init_times) {
783 sctp_abort_notification(stcb, 0);
784 /* now free the asoc */
785 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
788 /* blast back to INIT state */
789 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
790 asoc->state |= SCTP_STATE_COOKIE_WAIT;
792 sctp_stop_all_cookie_timers(stcb);
793 sctp_send_initiate(stcb->sctp_ep, stcb);
796 case SCTP_CAUSE_UNRESOLVABLE_ADDR:
798 * Nothing we can do here, we don't do hostname
799 * addresses so if the peer does not like my IPv6
800 * (or IPv4 for that matter) it does not matter. If
801 * they don't support that type of address, they can
802 * NOT possibly get that packet type... i.e. with no
803 * IPv6 you can't recieve a IPv6 packet. so we can
804 * safely ignore this one. If we ever added support
805 * for HOSTNAME Addresses, then we would need to do
809 case SCTP_CAUSE_UNRECOG_CHUNK:
810 sctp_process_unrecog_chunk(stcb, phdr, net);
812 case SCTP_CAUSE_UNRECOG_PARAM:
813 sctp_process_unrecog_param(stcb, phdr);
815 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
817 * We ignore this since the timer will drive out a
818 * new cookie anyway and there timer will drive us
819 * to send a SHUTDOWN_COMPLETE. We can't send one
820 * here since we don't have their tag.
823 case SCTP_CAUSE_DELETING_LAST_ADDR:
824 case SCTP_CAUSE_RESOURCE_SHORTAGE:
825 case SCTP_CAUSE_DELETING_SRC_ADDR:
827 * We should NOT get these here, but in a
830 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
833 case SCTP_CAUSE_OUT_OF_RESC:
835 * And what, pray tell do we do with the fact that
836 * the peer is out of resources? Not really sure we
837 * could do anything but abort. I suspect this
838 * should have came WITH an abort instead of in a
843 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
847 adjust = SCTP_SIZE32(error_len);
849 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
855 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
856 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
857 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
859 struct sctp_init_ack *init_ack;
863 SCTPDBG(SCTP_DEBUG_INPUT2,
864 "sctp_handle_init_ack: handling INIT-ACK\n");
867 SCTPDBG(SCTP_DEBUG_INPUT2,
868 "sctp_handle_init_ack: TCB is null\n");
871 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
873 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
874 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
876 *abort_no_unlock = 1;
879 init_ack = &cp->init;
880 /* validate parameters */
881 if (init_ack->initiate_tag == 0) {
882 /* protocol error... send an abort */
883 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
884 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
886 *abort_no_unlock = 1;
889 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
890 /* protocol error... send an abort */
891 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
892 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
894 *abort_no_unlock = 1;
897 if (init_ack->num_inbound_streams == 0) {
898 /* protocol error... send an abort */
899 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
900 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
902 *abort_no_unlock = 1;
905 if (init_ack->num_outbound_streams == 0) {
906 /* protocol error... send an abort */
907 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
908 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
910 *abort_no_unlock = 1;
913 /* process according to association state... */
914 state = &stcb->asoc.state;
915 switch (*state & SCTP_STATE_MASK) {
916 case SCTP_STATE_COOKIE_WAIT:
917 /* this is the expected state for this chunk */
918 /* process the INIT-ACK parameters */
919 if (stcb->asoc.primary_destination->dest_state &
920 SCTP_ADDR_UNCONFIRMED) {
922 * The primary is where we sent the INIT, we can
923 * always consider it confirmed when the INIT-ACK is
924 * returned. Do this before we load addresses
927 stcb->asoc.primary_destination->dest_state &=
928 ~SCTP_ADDR_UNCONFIRMED;
929 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
930 stcb, 0, (void *)stcb->asoc.primary_destination);
932 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
933 net, abort_no_unlock, vrf_id) < 0) {
934 /* error in parsing parameters */
937 /* update our state */
938 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
939 if (*state & SCTP_STATE_SHUTDOWN_PENDING) {
940 *state = SCTP_STATE_COOKIE_ECHOED |
941 SCTP_STATE_SHUTDOWN_PENDING;
943 *state = SCTP_STATE_COOKIE_ECHOED;
946 /* reset the RTO calc */
947 stcb->asoc.overall_error_count = 0;
948 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
950 * collapse the init timer back in case of a exponential
953 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
956 * the send at the end of the inbound data processing will
957 * cause the cookie to be sent
960 case SCTP_STATE_SHUTDOWN_SENT:
961 /* incorrect state... discard */
963 case SCTP_STATE_COOKIE_ECHOED:
964 /* incorrect state... discard */
966 case SCTP_STATE_OPEN:
967 /* incorrect state... discard */
969 case SCTP_STATE_EMPTY:
970 case SCTP_STATE_INUSE:
972 /* incorrect state... discard */
976 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
982 * handle a state cookie for an existing association m: input packet mbuf
983 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
984 * "split" mbuf and the cookie signature does not exist offset: offset into
985 * mbuf to the cookie-echo chunk
987 static struct sctp_tcb *
988 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
989 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
990 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
991 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
994 struct sctp_association *asoc;
995 struct sctp_init_chunk *init_cp, init_buf;
996 struct sctp_init_ack_chunk *initack_cp, initack_buf;
998 int init_offset, initack_offset, i;
1003 /* I know that the TCB is non-NULL from the caller */
1005 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1006 if (asoc->cookie_how[how_indx] == 0)
1009 if (how_indx < sizeof(asoc->cookie_how)) {
1010 asoc->cookie_how[how_indx] = 1;
1012 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1013 /* SHUTDOWN came in after sending INIT-ACK */
1014 struct mbuf *op_err;
1015 struct sctp_paramhdr *ph;
1017 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1018 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1019 0, M_DONTWAIT, 1, MT_DATA);
1020 if (op_err == NULL) {
1024 /* pre-reserve some space */
1025 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1026 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1027 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1029 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1030 ph = mtod(op_err, struct sctp_paramhdr *);
1031 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1032 ph->param_length = htons(sizeof(struct sctp_paramhdr));
1033 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1035 if (how_indx < sizeof(asoc->cookie_how))
1036 asoc->cookie_how[how_indx] = 2;
1040 * find and validate the INIT chunk in the cookie (peer's info) the
1041 * INIT should start after the cookie-echo header struct (chunk
1042 * header, state cookie header struct)
1044 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1046 init_cp = (struct sctp_init_chunk *)
1047 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1048 (uint8_t *) & init_buf);
1049 if (init_cp == NULL) {
1050 /* could not pull a INIT chunk in cookie */
1053 chk_length = ntohs(init_cp->ch.chunk_length);
1054 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1058 * find and validate the INIT-ACK chunk in the cookie (my info) the
1059 * INIT-ACK follows the INIT chunk
1061 initack_offset = init_offset + SCTP_SIZE32(chk_length);
1062 initack_cp = (struct sctp_init_ack_chunk *)
1063 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1064 (uint8_t *) & initack_buf);
1065 if (initack_cp == NULL) {
1066 /* could not pull INIT-ACK chunk in cookie */
1069 chk_length = ntohs(initack_cp->ch.chunk_length);
1070 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1073 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1074 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1076 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1077 * to get into the OPEN state
1079 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1081 panic("Case D and non-match seq?");
1083 SCTP_PRINTF("Case D, seq non-match %x vs %x?\n",
1084 ntohl(initack_cp->init.initial_tsn),
1085 asoc->init_seq_number);
1088 switch SCTP_GET_STATE
1090 case SCTP_STATE_COOKIE_WAIT:
1091 case SCTP_STATE_COOKIE_ECHOED:
1093 * INIT was sent but got a COOKIE_ECHO with the
1094 * correct tags... just accept it...but we must
1095 * process the init so that we can make sure we have
1096 * the right seq no's.
1098 /* First we must process the INIT !! */
1099 retval = sctp_process_init(init_cp, stcb, net);
1101 if (how_indx < sizeof(asoc->cookie_how))
1102 asoc->cookie_how[how_indx] = 3;
1105 /* we have already processed the INIT so no problem */
1106 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1107 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1108 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1109 /* update current state */
1110 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1111 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1113 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1114 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1115 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1116 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1117 stcb->sctp_ep, stcb, asoc->primary_destination);
1120 /* if ok, move to OPEN state */
1121 asoc->state = SCTP_STATE_OPEN;
1123 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1124 sctp_stop_all_cookie_timers(stcb);
1125 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1126 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1127 (inp->sctp_socket->so_qlimit == 0)
1130 * Here is where collision would go if we
1131 * did a connect() and instead got a
1132 * init/init-ack/cookie done before the
1133 * init-ack came back..
1135 stcb->sctp_ep->sctp_flags |=
1136 SCTP_PCB_FLAGS_CONNECTED;
1137 soisconnected(stcb->sctp_ep->sctp_socket);
1139 /* notify upper layer */
1140 *notification = SCTP_NOTIFY_ASSOC_UP;
1142 * since we did not send a HB make sure we don't
1145 net->hb_responded = 1;
1146 net->RTO = sctp_calculate_rto(stcb, asoc, net,
1147 &cookie->time_entered);
1149 if (stcb->asoc.sctp_autoclose_ticks &&
1150 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1151 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1157 * we're in the OPEN state (or beyond), so peer must
1158 * have simply lost the COOKIE-ACK
1162 sctp_stop_all_cookie_timers(stcb);
1164 * We ignore the return code here.. not sure if we should
1165 * somehow abort.. but we do have an existing asoc. This
1166 * really should not fail.
1168 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1169 init_offset + sizeof(struct sctp_init_chunk),
1170 initack_offset, sh, init_src)) {
1171 if (how_indx < sizeof(asoc->cookie_how))
1172 asoc->cookie_how[how_indx] = 4;
1175 /* respond with a COOKIE-ACK */
1176 sctp_toss_old_cookies(stcb, asoc);
1177 sctp_send_cookie_ack(stcb);
1178 if (how_indx < sizeof(asoc->cookie_how))
1179 asoc->cookie_how[how_indx] = 5;
1182 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1183 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1184 cookie->tie_tag_my_vtag == 0 &&
1185 cookie->tie_tag_peer_vtag == 0) {
1187 * case C in Section 5.2.4 Table 2: XMOO silently discard
1189 if (how_indx < sizeof(asoc->cookie_how))
1190 asoc->cookie_how[how_indx] = 6;
1193 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
1194 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
1195 init_cp->init.initiate_tag == 0)) {
1197 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1198 * should be ok, re-accept peer info
1200 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1202 * Extension of case C. If we hit this, then the
1203 * random number generator returned the same vtag
1204 * when we first sent our INIT-ACK and when we later
1205 * sent our INIT. The side with the seq numbers that
1206 * are different will be the one that normnally
1207 * would have hit case C. This in effect "extends"
1208 * our vtags in this collision case to be 64 bits.
1209 * The same collision could occur aka you get both
1210 * vtag and seq number the same twice in a row.. but
1211 * is much less likely. If it did happen then we
1212 * would proceed through and bring up the assoc.. we
1213 * may end up with the wrong stream setup however..
1214 * which would be bad.. but there is no way to
1215 * tell.. until we send on a stream that does not
1218 if (how_indx < sizeof(asoc->cookie_how))
1219 asoc->cookie_how[how_indx] = 7;
1223 if (how_indx < sizeof(asoc->cookie_how))
1224 asoc->cookie_how[how_indx] = 8;
1225 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1226 sctp_stop_all_cookie_timers(stcb);
1228 * since we did not send a HB make sure we don't double
1231 net->hb_responded = 1;
1232 if (stcb->asoc.sctp_autoclose_ticks &&
1233 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1234 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1237 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1238 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1240 /* Note last_cwr_tsn? where is this used? */
1241 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1242 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1244 * Ok the peer probably discarded our data (if we
1245 * echoed a cookie+data). So anything on the
1246 * sent_queue should be marked for retransmit, we
1247 * may not get something to kick us so it COULD
1248 * still take a timeout to move these.. but it can't
1249 * hurt to mark them.
1251 struct sctp_tmit_chunk *chk;
1253 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1254 if (chk->sent < SCTP_DATAGRAM_RESEND) {
1255 chk->sent = SCTP_DATAGRAM_RESEND;
1256 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1262 /* process the INIT info (peer's info) */
1263 retval = sctp_process_init(init_cp, stcb, net);
1265 if (how_indx < sizeof(asoc->cookie_how))
1266 asoc->cookie_how[how_indx] = 9;
1269 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1270 init_offset + sizeof(struct sctp_init_chunk),
1271 initack_offset, sh, init_src)) {
1272 if (how_indx < sizeof(asoc->cookie_how))
1273 asoc->cookie_how[how_indx] = 10;
1276 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1277 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1278 *notification = SCTP_NOTIFY_ASSOC_UP;
1280 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1281 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1282 (inp->sctp_socket->so_qlimit == 0)) {
1283 stcb->sctp_ep->sctp_flags |=
1284 SCTP_PCB_FLAGS_CONNECTED;
1285 soisconnected(stcb->sctp_ep->sctp_socket);
1287 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1288 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1290 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1291 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1292 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1293 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1294 SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1296 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1298 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1299 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1300 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1301 stcb->sctp_ep, stcb, asoc->primary_destination);
1304 asoc->state = SCTP_STATE_OPEN;
1306 sctp_stop_all_cookie_timers(stcb);
1307 sctp_toss_old_cookies(stcb, asoc);
1308 sctp_send_cookie_ack(stcb);
1311 * only if we have retrans set do we do this. What
1312 * this call does is get only the COOKIE-ACK out and
1313 * then when we return the normal call to
1314 * sctp_chunk_output will get the retrans out behind
1317 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK);
1319 if (how_indx < sizeof(asoc->cookie_how))
1320 asoc->cookie_how[how_indx] = 11;
1324 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1325 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1326 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1327 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1328 cookie->tie_tag_peer_vtag != 0) {
1329 struct sctpasochead *head;
1332 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1335 if (how_indx < sizeof(asoc->cookie_how))
1336 asoc->cookie_how[how_indx] = 12;
1337 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1338 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1340 *sac_assoc_id = sctp_get_associd(stcb);
1341 /* notify upper layer */
1342 *notification = SCTP_NOTIFY_ASSOC_RESTART;
1343 atomic_add_int(&stcb->asoc.refcnt, 1);
1344 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1345 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1346 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1347 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1349 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1350 SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1351 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1352 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1354 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1355 asoc->state = SCTP_STATE_OPEN |
1356 SCTP_STATE_SHUTDOWN_PENDING;
1357 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1358 stcb->sctp_ep, stcb, asoc->primary_destination);
1360 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1361 /* move to OPEN state, if not in SHUTDOWN_SENT */
1362 asoc->state = SCTP_STATE_OPEN;
1364 asoc->pre_open_streams =
1365 ntohs(initack_cp->init.num_outbound_streams);
1366 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1367 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1369 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1370 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1372 asoc->str_reset_seq_in = asoc->init_seq_number;
1374 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1375 if (asoc->mapping_array) {
1376 memset(asoc->mapping_array, 0,
1377 asoc->mapping_array_size);
1379 SCTP_TCB_UNLOCK(stcb);
1380 SCTP_INP_INFO_WLOCK();
1381 SCTP_INP_WLOCK(stcb->sctp_ep);
1382 SCTP_TCB_LOCK(stcb);
1383 atomic_add_int(&stcb->asoc.refcnt, -1);
1384 /* send up all the data */
1385 SCTP_TCB_SEND_LOCK(stcb);
1387 sctp_report_all_outbound(stcb, 1);
1388 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1389 stcb->asoc.strmout[i].stream_no = i;
1390 stcb->asoc.strmout[i].next_sequence_sent = 0;
1391 stcb->asoc.strmout[i].last_msg_incomplete = 0;
1393 /* process the INIT-ACK info (my info) */
1394 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1395 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1397 /* pull from vtag hash */
1398 LIST_REMOVE(stcb, sctp_asocs);
1399 /* re-insert to new vtag position */
1400 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1401 sctppcbinfo.hashasocmark)];
1403 * put it in the bucket in the vtag hash of assoc's for the
1406 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1408 /* Is this the first restart? */
1409 if (stcb->asoc.in_restart_hash == 0) {
1410 /* Ok add it to assoc_id vtag hash */
1411 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
1412 sctppcbinfo.hashrestartmark)];
1413 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
1414 stcb->asoc.in_restart_hash = 1;
1416 /* process the INIT info (peer's info) */
1417 SCTP_TCB_SEND_UNLOCK(stcb);
1418 SCTP_INP_WUNLOCK(stcb->sctp_ep);
1419 SCTP_INP_INFO_WUNLOCK();
1421 retval = sctp_process_init(init_cp, stcb, net);
1423 if (how_indx < sizeof(asoc->cookie_how))
1424 asoc->cookie_how[how_indx] = 13;
1429 * since we did not send a HB make sure we don't double
1432 net->hb_responded = 1;
1434 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1435 init_offset + sizeof(struct sctp_init_chunk),
1436 initack_offset, sh, init_src)) {
1437 if (how_indx < sizeof(asoc->cookie_how))
1438 asoc->cookie_how[how_indx] = 14;
1442 /* respond with a COOKIE-ACK */
1443 sctp_stop_all_cookie_timers(stcb);
1444 sctp_toss_old_cookies(stcb, asoc);
1445 sctp_send_cookie_ack(stcb);
1446 if (how_indx < sizeof(asoc->cookie_how))
1447 asoc->cookie_how[how_indx] = 15;
1451 if (how_indx < sizeof(asoc->cookie_how))
1452 asoc->cookie_how[how_indx] = 16;
1453 /* all other cases... */
1459 * handle a state cookie for a new association m: input packet mbuf chain--
1460 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1461 * and the cookie signature does not exist offset: offset into mbuf to the
1462 * cookie-echo chunk length: length of the cookie chunk to: where the init
1463 * was from returns a new TCB
1465 static struct sctp_tcb *
1466 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1467 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1468 struct sctp_inpcb *inp, struct sctp_nets **netp,
1469 struct sockaddr *init_src, int *notification,
1470 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1473 struct sctp_tcb *stcb;
1474 struct sctp_init_chunk *init_cp, init_buf;
1475 struct sctp_init_ack_chunk *initack_cp, initack_buf;
1476 struct sockaddr_storage sa_store;
1477 struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1478 struct sockaddr_in *sin;
1479 struct sockaddr_in6 *sin6;
1480 struct sctp_association *asoc;
1482 int init_offset, initack_offset, initack_limit;
1486 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1489 * find and validate the INIT chunk in the cookie (peer's info) the
1490 * INIT should start after the cookie-echo header struct (chunk
1491 * header, state cookie header struct)
1493 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1494 init_cp = (struct sctp_init_chunk *)
1495 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1496 (uint8_t *) & init_buf);
1497 if (init_cp == NULL) {
1498 /* could not pull a INIT chunk in cookie */
1499 SCTPDBG(SCTP_DEBUG_INPUT1,
1500 "process_cookie_new: could not pull INIT chunk hdr\n");
1503 chk_length = ntohs(init_cp->ch.chunk_length);
1504 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1505 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1508 initack_offset = init_offset + SCTP_SIZE32(chk_length);
1510 * find and validate the INIT-ACK chunk in the cookie (my info) the
1511 * INIT-ACK follows the INIT chunk
1513 initack_cp = (struct sctp_init_ack_chunk *)
1514 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1515 (uint8_t *) & initack_buf);
1516 if (initack_cp == NULL) {
1517 /* could not pull INIT-ACK chunk in cookie */
1518 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1521 chk_length = ntohs(initack_cp->ch.chunk_length);
1522 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1526 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1527 * "initack_limit" value. This is because the chk_length field
1528 * includes the length of the cookie, but the cookie is omitted when
1529 * the INIT and INIT_ACK are tacked onto the cookie...
1531 initack_limit = offset + cookie_len;
1534 * now that we know the INIT/INIT-ACK are in place, create a new TCB
1537 stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
1538 ntohl(initack_cp->init.initiate_tag), vrf_id);
1540 struct mbuf *op_err;
1542 /* memory problem? */
1543 SCTPDBG(SCTP_DEBUG_INPUT1,
1544 "process_cookie_new: no room for another TCB!\n");
1545 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1547 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1548 sh, op_err, vrf_id);
1551 /* get the correct sctp_nets */
1553 *netp = sctp_findnet(stcb, init_src);
1556 /* get scope variables out of cookie */
1557 asoc->ipv4_local_scope = cookie->ipv4_scope;
1558 asoc->site_scope = cookie->site_scope;
1559 asoc->local_scope = cookie->local_scope;
1560 asoc->loopback_scope = cookie->loopback_scope;
1562 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
1563 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
1564 struct mbuf *op_err;
1567 * Houston we have a problem. The EP changed while the
1568 * cookie was in flight. Only recourse is to abort the
1571 atomic_add_int(&stcb->asoc.refcnt, 1);
1572 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1573 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1574 sh, op_err, vrf_id);
1575 atomic_add_int(&stcb->asoc.refcnt, -1);
1578 /* process the INIT-ACK info (my info) */
1579 old_tag = asoc->my_vtag;
1580 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1581 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1582 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1583 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1584 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1585 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1586 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1587 asoc->str_reset_seq_in = asoc->init_seq_number;
1589 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1591 /* process the INIT info (peer's info) */
1593 retval = sctp_process_init(init_cp, stcb, *netp);
1597 atomic_add_int(&stcb->asoc.refcnt, 1);
1598 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1599 atomic_add_int(&stcb->asoc.refcnt, -1);
1602 /* load all addresses */
1603 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1604 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
1606 atomic_add_int(&stcb->asoc.refcnt, 1);
1607 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1608 atomic_add_int(&stcb->asoc.refcnt, -1);
1612 * verify any preceding AUTH chunk that was skipped
1614 /* pull the local authentication parameters from the cookie/init-ack */
1615 sctp_auth_get_cookie_params(stcb, m,
1616 initack_offset + sizeof(struct sctp_init_ack_chunk),
1617 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
1619 struct sctp_auth_chunk *auth;
1621 auth = (struct sctp_auth_chunk *)
1622 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
1623 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
1624 /* auth HMAC failed, dump the assoc and packet */
1625 SCTPDBG(SCTP_DEBUG_AUTH1,
1626 "COOKIE-ECHO: AUTH failed\n");
1627 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
1630 /* remaining chunks checked... good to go */
1631 stcb->asoc.authenticated = 1;
1634 /* update current state */
1635 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
1636 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1637 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1638 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1639 stcb->sctp_ep, stcb, asoc->primary_destination);
1641 asoc->state = SCTP_STATE_OPEN;
1643 sctp_stop_all_cookie_timers(stcb);
1644 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
1645 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1648 * if we're doing ASCONFs, check to see if we have any new local
1649 * addresses that need to get added to the peer (eg. addresses
1650 * changed while cookie echo in flight). This needs to be done
1651 * after we go to the OPEN state to do the correct asconf
1652 * processing. else, make sure we have the correct addresses in our
1656 /* warning, we re-use sin, sin6, sa_store here! */
1657 /* pull in local_address (our "from" address) */
1658 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
1659 /* source addr is IPv4 */
1660 sin = (struct sockaddr_in *)initack_src;
1661 memset(sin, 0, sizeof(*sin));
1662 sin->sin_family = AF_INET;
1663 sin->sin_len = sizeof(struct sockaddr_in);
1664 sin->sin_addr.s_addr = cookie->laddress[0];
1665 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
1666 /* source addr is IPv6 */
1667 sin6 = (struct sockaddr_in6 *)initack_src;
1668 memset(sin6, 0, sizeof(*sin6));
1669 sin6->sin6_family = AF_INET6;
1670 sin6->sin6_len = sizeof(struct sockaddr_in6);
1671 sin6->sin6_scope_id = cookie->scope_id;
1672 memcpy(&sin6->sin6_addr, cookie->laddress,
1673 sizeof(sin6->sin6_addr));
1675 atomic_add_int(&stcb->asoc.refcnt, 1);
1676 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
1677 atomic_add_int(&stcb->asoc.refcnt, -1);
1681 sctp_check_address_list(stcb, m,
1682 initack_offset + sizeof(struct sctp_init_ack_chunk),
1683 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
1684 initack_src, cookie->local_scope, cookie->site_scope,
1685 cookie->ipv4_scope, cookie->loopback_scope);
1688 /* set up to notify upper layer */
1689 *notification = SCTP_NOTIFY_ASSOC_UP;
1690 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1691 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1692 (inp->sctp_socket->so_qlimit == 0)) {
1694 * This is an endpoint that called connect() how it got a
1695 * cookie that is NEW is a bit of a mystery. It must be that
1696 * the INIT was sent, but before it got there.. a complete
1697 * INIT/INIT-ACK/COOKIE arrived. But of course then it
1698 * should have went to the other code.. not here.. oh well..
1699 * a bit of protection is worth having..
1701 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1702 soisconnected(stcb->sctp_ep->sctp_socket);
1703 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1704 (inp->sctp_socket->so_qlimit)) {
1706 * We don't want to do anything with this one. Since it is
1707 * the listening guy. The timer will get started for
1708 * accepted connections in the caller.
1712 /* since we did not send a HB make sure we don't double things */
1713 if ((netp) && (*netp))
1714 (*netp)->hb_responded = 1;
1716 if (stcb->asoc.sctp_autoclose_ticks &&
1717 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1718 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
1720 /* respond with a COOKIE-ACK */
1721 /* calculate the RTT */
1722 if ((netp) && (*netp)) {
1723 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
1724 &cookie->time_entered);
1726 sctp_send_cookie_ack(stcb);
1732 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
1733 * existing (non-NULL) TCB
1735 static struct mbuf *
1736 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
1737 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
1738 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
1739 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1740 struct sctp_tcb **locked_tcb, uint32_t vrf_id)
1742 struct sctp_state_cookie *cookie;
1743 struct sockaddr_in6 sin6;
1744 struct sockaddr_in sin;
1745 struct sctp_tcb *l_stcb = *stcb;
1746 struct sctp_inpcb *l_inp;
1747 struct sockaddr *to;
1748 sctp_assoc_t sac_restart_id;
1749 struct sctp_pcb *ep;
1751 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
1753 uint8_t cookie_ok = 0;
1754 unsigned int size_of_pkt, sig_offset, cookie_offset;
1755 unsigned int cookie_len;
1757 struct timeval time_expires;
1758 struct sockaddr_storage dest_store;
1759 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
1761 int notification = 0;
1762 struct sctp_nets *netl;
1763 int had_a_existing_tcb = 0;
1765 SCTPDBG(SCTP_DEBUG_INPUT2,
1766 "sctp_handle_cookie: handling COOKIE-ECHO\n");
1768 if (inp_p == NULL) {
1771 /* First get the destination address setup too. */
1772 iph = mtod(m, struct ip *);
1773 if (iph->ip_v == IPVERSION) {
1775 struct sockaddr_in *lsin;
1777 lsin = (struct sockaddr_in *)(localep_sa);
1778 memset(lsin, 0, sizeof(*lsin));
1779 lsin->sin_family = AF_INET;
1780 lsin->sin_len = sizeof(*lsin);
1781 lsin->sin_port = sh->dest_port;
1782 lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
1783 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
1784 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1786 struct ip6_hdr *ip6;
1787 struct sockaddr_in6 *lsin6;
1789 lsin6 = (struct sockaddr_in6 *)(localep_sa);
1790 memset(lsin6, 0, sizeof(*lsin6));
1791 lsin6->sin6_family = AF_INET6;
1792 lsin6->sin6_len = sizeof(struct sockaddr_in6);
1793 ip6 = mtod(m, struct ip6_hdr *);
1794 lsin6->sin6_port = sh->dest_port;
1795 lsin6->sin6_addr = ip6->ip6_dst;
1796 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
1801 cookie = &cp->cookie;
1802 cookie_offset = offset + sizeof(struct sctp_chunkhdr);
1803 cookie_len = ntohs(cp->ch.chunk_length);
1805 if ((cookie->peerport != sh->src_port) &&
1806 (cookie->myport != sh->dest_port) &&
1807 (cookie->my_vtag != sh->v_tag)) {
1809 * invalid ports or bad tag. Note that we always leave the
1810 * v_tag in the header in network order and when we stored
1811 * it in the my_vtag slot we also left it in network order.
1812 * This maintains the match even though it may be in the
1813 * opposite byte order of the machine :->
1817 if (cookie_len > size_of_pkt ||
1818 cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
1819 sizeof(struct sctp_init_chunk) +
1820 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
1821 /* cookie too long! or too small */
1825 * split off the signature into its own mbuf (since it should not be
1826 * calculated in the sctp_hmac_m() call).
1828 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
1829 if (sig_offset > size_of_pkt) {
1830 /* packet not correct size! */
1831 /* XXX this may already be accounted for earlier... */
1834 m_sig = m_split(m, sig_offset, M_DONTWAIT);
1835 if (m_sig == NULL) {
1836 /* out of memory or ?? */
1840 * compute the signature/digest for the cookie
1842 ep = &(*inp_p)->sctp_ep;
1845 SCTP_TCB_UNLOCK(l_stcb);
1847 SCTP_INP_RLOCK(l_inp);
1849 SCTP_TCB_LOCK(l_stcb);
1851 /* which cookie is it? */
1852 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
1853 (ep->current_secret_number != ep->last_secret_number)) {
1854 /* it's the old cookie */
1855 (void)sctp_hmac_m(SCTP_HMAC,
1856 (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1857 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1859 /* it's the current cookie */
1860 (void)sctp_hmac_m(SCTP_HMAC,
1861 (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
1862 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1864 /* get the signature */
1865 SCTP_INP_RUNLOCK(l_inp);
1866 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
1868 /* couldn't find signature */
1869 sctp_m_freem(m_sig);
1872 /* compare the received digest with the computed digest */
1873 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
1874 /* try the old cookie? */
1875 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
1876 (ep->current_secret_number != ep->last_secret_number)) {
1877 /* compute digest with old */
1878 (void)sctp_hmac_m(SCTP_HMAC,
1879 (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1880 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1882 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
1890 * Now before we continue we must reconstruct our mbuf so that
1891 * normal processing of any other chunks will work.
1897 while (SCTP_BUF_NEXT(m_at) != NULL) {
1898 m_at = SCTP_BUF_NEXT(m_at);
1900 SCTP_BUF_NEXT(m_at) = m_sig;
1903 if (cookie_ok == 0) {
1904 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
1905 SCTPDBG(SCTP_DEBUG_INPUT2,
1906 "offset = %u, cookie_offset = %u, sig_offset = %u\n",
1907 (uint32_t) offset, cookie_offset, sig_offset);
1911 * check the cookie timestamps to be sure it's not stale
1913 (void)SCTP_GETTIME_TIMEVAL(&now);
1914 /* Expire time is in Ticks, so we convert to seconds */
1915 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
1916 time_expires.tv_usec = cookie->time_entered.tv_usec;
1917 if (timevalcmp(&now, &time_expires, >)) {
1918 /* cookie is stale! */
1919 struct mbuf *op_err;
1920 struct sctp_stale_cookie_msg *scm;
1923 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
1924 0, M_DONTWAIT, 1, MT_DATA);
1925 if (op_err == NULL) {
1929 /* pre-reserve some space */
1930 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1931 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1932 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1935 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
1936 scm = mtod(op_err, struct sctp_stale_cookie_msg *);
1937 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
1938 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
1939 (sizeof(uint32_t))));
1940 /* seconds to usec */
1941 tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
1944 tim = now.tv_usec - cookie->time_entered.tv_usec;
1945 scm->time_usec = htonl(tim);
1946 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1951 * Now we must see with the lookup address if we have an existing
1952 * asoc. This will only happen if we were in the COOKIE-WAIT state
1953 * and a INIT collided with us and somewhere the peer sent the
1954 * cookie on another address besides the single address our assoc
1955 * had for him. In this case we will have one of the tie-tags set at
1956 * least AND the address field in the cookie can be used to look it
1960 if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
1961 memset(&sin6, 0, sizeof(sin6));
1962 sin6.sin6_family = AF_INET6;
1963 sin6.sin6_len = sizeof(sin6);
1964 sin6.sin6_port = sh->src_port;
1965 sin6.sin6_scope_id = cookie->scope_id;
1966 memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
1967 sizeof(sin6.sin6_addr.s6_addr));
1968 to = (struct sockaddr *)&sin6;
1969 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
1970 memset(&sin, 0, sizeof(sin));
1971 sin.sin_family = AF_INET;
1972 sin.sin_len = sizeof(sin);
1973 sin.sin_port = sh->src_port;
1974 sin.sin_addr.s_addr = cookie->address[0];
1975 to = (struct sockaddr *)&sin;
1977 /* This should not happen */
1980 if ((*stcb == NULL) && to) {
1981 /* Yep, lets check */
1982 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
1983 if (*stcb == NULL) {
1985 * We should have only got back the same inp. If we
1986 * got back a different ep we have a problem. The
1987 * original findep got back l_inp and now
1989 if (l_inp != *inp_p) {
1990 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
1993 if (*locked_tcb == NULL) {
1995 * In this case we found the assoc only
1996 * after we locked the create lock. This
1997 * means we are in a colliding case and we
1998 * must make sure that we unlock the tcb if
1999 * its one of the cases where we throw away
2000 * the incoming packets.
2002 *locked_tcb = *stcb;
2005 * We must also increment the inp ref count
2006 * since the ref_count flags was set when we
2007 * did not find the TCB, now we found it
2008 * which reduces the refcount.. we must
2009 * raise it back out to balance it all :-)
2011 SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2012 if ((*stcb)->sctp_ep != l_inp) {
2013 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2014 (*stcb)->sctp_ep, l_inp);
2022 cookie_len -= SCTP_SIGNATURE_SIZE;
2023 if (*stcb == NULL) {
2024 /* this is the "normal" case... get a new TCB */
2025 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2026 cookie_len, *inp_p, netp, to, ¬ification,
2027 auth_skipped, auth_offset, auth_len, vrf_id);
2029 /* this is abnormal... cookie-echo on existing TCB */
2030 had_a_existing_tcb = 1;
2031 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2032 cookie, cookie_len, *inp_p, *stcb, *netp, to,
2033 ¬ification, &sac_restart_id, vrf_id);
2036 if (*stcb == NULL) {
2037 /* still no TCB... must be bad cookie-echo */
2041 * Ok, we built an association so confirm the address we sent the
2044 netl = sctp_findnet(*stcb, to);
2046 * This code should in theory NOT run but
2049 /* TSNH! Huh, why do I need to add this address here? */
2052 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2053 SCTP_IN_COOKIE_PROC);
2054 netl = sctp_findnet(*stcb, to);
2057 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2058 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2059 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2061 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2062 (*stcb), 0, (void *)netl);
2066 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2069 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2070 if (!had_a_existing_tcb ||
2071 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2073 * If we have a NEW cookie or the connect never
2074 * reached the connected state during collision we
2075 * must do the TCP accept thing.
2077 struct socket *so, *oso;
2078 struct sctp_inpcb *inp;
2080 if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2082 * For a restart we will keep the same
2083 * socket, no need to do anything. I THINK!!
2085 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id);
2088 oso = (*inp_p)->sctp_socket;
2090 * We do this to keep the sockets side happy durin
2091 * the sonewcon ONLY.
2094 SCTP_TCB_UNLOCK((*stcb));
2095 so = sonewconn(oso, 0
2098 SCTP_INP_WLOCK((*stcb)->sctp_ep);
2099 SCTP_TCB_LOCK((*stcb));
2100 SCTP_INP_WUNLOCK((*stcb)->sctp_ep);
2102 struct mbuf *op_err;
2104 /* Too many sockets */
2105 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2106 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2107 sctp_abort_association(*inp_p, NULL, m, iphlen,
2108 sh, op_err, vrf_id);
2109 sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2112 inp = (struct sctp_inpcb *)so->so_pcb;
2113 SCTP_INP_INCR_REF(inp);
2115 * We add the unbound flag here so that if we get an
2116 * soabort() before we get the move_pcb done, we
2117 * will properly cleanup.
2119 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2120 SCTP_PCB_FLAGS_CONNECTED |
2121 SCTP_PCB_FLAGS_IN_TCPPOOL |
2122 SCTP_PCB_FLAGS_UNBOUND |
2123 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2124 SCTP_PCB_FLAGS_DONT_WAKE);
2125 inp->sctp_features = (*inp_p)->sctp_features;
2126 inp->sctp_socket = so;
2127 inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2128 inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2129 inp->sctp_context = (*inp_p)->sctp_context;
2130 inp->inp_starting_point_for_iterator = NULL;
2132 * copy in the authentication parameters from the
2135 if (inp->sctp_ep.local_hmacs)
2136 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2137 inp->sctp_ep.local_hmacs =
2138 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2139 if (inp->sctp_ep.local_auth_chunks)
2140 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2141 inp->sctp_ep.local_auth_chunks =
2142 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2143 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
2144 &inp->sctp_ep.shared_keys);
2147 * Now we must move it from one hash table to
2148 * another and get the tcb in the right place.
2150 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2152 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2153 SCTP_TCB_UNLOCK((*stcb));
2155 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
2156 SCTP_TCB_LOCK((*stcb));
2157 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2161 * now we must check to see if we were aborted while
2162 * the move was going on and the lock/unlock
2165 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2167 * yep it was, we leave the assoc attached
2168 * to the socket since the sctp_inpcb_free()
2169 * call will send an abort for us.
2171 SCTP_INP_DECR_REF(inp);
2174 SCTP_INP_DECR_REF(inp);
2175 /* Switch over to the new guy */
2177 sctp_ulp_notify(notification, *stcb, 0, NULL);
2180 * Pull it from the incomplete queue and wake the
2187 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2188 sctp_ulp_notify(notification, *stcb, 0, NULL);
2194 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2195 struct sctp_tcb *stcb, struct sctp_nets *net)
2197 /* cp must not be used, others call this without a c-ack :-) */
2198 struct sctp_association *asoc;
2200 SCTPDBG(SCTP_DEBUG_INPUT2,
2201 "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2207 sctp_stop_all_cookie_timers(stcb);
2208 /* process according to association state */
2209 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2210 /* state change only needed when I am in right state */
2211 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2212 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2213 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
2214 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2215 stcb->sctp_ep, stcb, asoc->primary_destination);
2218 asoc->state = SCTP_STATE_OPEN;
2221 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2222 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2223 if (asoc->overall_error_count == 0) {
2224 net->RTO = sctp_calculate_rto(stcb, asoc, net,
2225 &asoc->time_entered);
2227 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2228 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL);
2229 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2230 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2231 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2232 soisconnected(stcb->sctp_ep->sctp_socket);
2234 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2237 * since we did not send a HB make sure we don't double
2240 net->hb_responded = 1;
2242 if (stcb->asoc.sctp_autoclose_ticks &&
2243 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2244 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2245 stcb->sctp_ep, stcb, NULL);
2248 * set ASCONF timer if ASCONFs are pending and allowed (eg.
2249 * addresses changed when init/cookie echo in flight)
2251 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2252 (stcb->asoc.peer_supports_asconf) &&
2253 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2254 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2255 stcb->sctp_ep, stcb,
2256 stcb->asoc.primary_destination);
2259 /* Toss the cookie if I can */
2260 sctp_toss_old_cookies(stcb, asoc);
2261 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2262 /* Restart the timer if we have pending data */
2263 struct sctp_tmit_chunk *chk;
2265 chk = TAILQ_FIRST(&asoc->sent_queue);
2267 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2274 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2275 struct sctp_tcb *stcb)
2277 struct sctp_nets *net;
2278 struct sctp_tmit_chunk *lchk;
2281 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2284 SCTP_STAT_INCR(sctps_recvecne);
2285 tsn = ntohl(cp->tsn);
2286 /* ECN Nonce stuff: need a resync and disable the nonce sum check */
2287 /* Also we make sure we disable the nonce_wait */
2288 lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2290 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2292 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2294 stcb->asoc.nonce_wait_for_ecne = 0;
2295 stcb->asoc.nonce_sum_check = 0;
2297 /* Find where it was sent, if possible */
2299 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2301 if (lchk->rec.data.TSN_seq == tsn) {
2305 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2307 lchk = TAILQ_NEXT(lchk, sctp_next);
2310 /* default is we use the primary */
2311 net = stcb->asoc.primary_destination;
2313 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2316 old_cwnd = net->cwnd;
2317 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2318 net->ssthresh = net->cwnd / 2;
2319 if (net->ssthresh < net->mtu) {
2320 net->ssthresh = net->mtu;
2321 /* here back off the timer as well, to slow us down */
2324 net->cwnd = net->ssthresh;
2325 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
2326 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2329 * we reduce once every RTT. So we will only lower cwnd at
2330 * the next sending seq i.e. the resync_tsn.
2332 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2335 * We always send a CWR this way if our previous one was lost our
2336 * peer will get an update, or if it is not time again to reduce we
2337 * still get the cwr to the peer.
2339 sctp_send_cwr(stcb, net, tsn);
2343 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2346 * Here we get a CWR from the peer. We must look in the outqueue and
2347 * make sure that we have a covered ECNE in teh control chunk part.
2350 struct sctp_tmit_chunk *chk;
2351 struct sctp_ecne_chunk *ecne;
2353 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2354 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2358 * Look for and remove if it is the right TSN. Since there
2359 * is only ONE ECNE on the control queue at any one time we
2360 * don't need to worry about more than one!
2362 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2363 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2364 MAX_TSN) || (cp->tsn == ecne->tsn)) {
2365 /* this covers this ECNE, we can remove it */
2366 stcb->asoc.ecn_echo_cnt_onq--;
2367 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2370 sctp_m_freem(chk->data);
2373 stcb->asoc.ctrl_queue_cnt--;
2374 sctp_free_remote_addr(chk->whoTo);
2375 sctp_free_a_chunk(stcb, chk);
2382 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2383 struct sctp_tcb *stcb, struct sctp_nets *net)
2385 struct sctp_association *asoc;
2387 SCTPDBG(SCTP_DEBUG_INPUT2,
2388 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
2393 /* process according to association state */
2394 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
2395 /* unexpected SHUTDOWN-COMPLETE... so ignore... */
2396 SCTP_TCB_UNLOCK(stcb);
2399 /* notify upper layer protocol */
2400 if (stcb->sctp_socket) {
2401 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
2402 /* are the queues empty? they should be */
2403 if (!TAILQ_EMPTY(&asoc->send_queue) ||
2404 !TAILQ_EMPTY(&asoc->sent_queue) ||
2405 !TAILQ_EMPTY(&asoc->out_wheel)) {
2406 sctp_report_all_outbound(stcb, 0);
2409 /* stop the timer */
2410 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2411 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
2413 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2418 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
2419 struct sctp_nets *net, uint8_t flg)
2421 switch (desc->chunk_type) {
2423 /* find the tsn to resend (possibly */
2426 struct sctp_tmit_chunk *tp1;
2428 tsn = ntohl(desc->tsn_ifany);
2429 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2431 if (tp1->rec.data.TSN_seq == tsn) {
2435 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
2441 tp1 = TAILQ_NEXT(tp1, sctp_next);
2445 * Do it the other way , aka without paying
2446 * attention to queue seq order.
2448 SCTP_STAT_INCR(sctps_pdrpdnfnd);
2449 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2451 if (tp1->rec.data.TSN_seq == tsn) {
2455 tp1 = TAILQ_NEXT(tp1, sctp_next);
2459 SCTP_STAT_INCR(sctps_pdrptsnnf);
2461 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
2464 if ((stcb->asoc.peers_rwnd == 0) &&
2465 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
2466 SCTP_STAT_INCR(sctps_pdrpdiwnp);
2469 if (stcb->asoc.peers_rwnd == 0 &&
2470 (flg & SCTP_FROM_MIDDLE_BOX)) {
2471 SCTP_STAT_INCR(sctps_pdrpdizrw);
2474 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
2475 sizeof(struct sctp_data_chunk));
2479 for (iii = 0; iii < sizeof(desc->data_bytes);
2481 if (ddp[iii] != desc->data_bytes[iii]) {
2482 SCTP_STAT_INCR(sctps_pdrpbadd);
2488 * We zero out the nonce so resync not
2491 tp1->rec.data.ect_nonce = 0;
2495 * this guy had a RTO calculation
2496 * pending on it, cancel it
2500 SCTP_STAT_INCR(sctps_pdrpmark);
2501 if (tp1->sent != SCTP_DATAGRAM_RESEND)
2502 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2503 tp1->sent = SCTP_DATAGRAM_RESEND;
2505 * mark it as if we were doing a FR, since
2506 * we will be getting gap ack reports behind
2507 * the info from the router.
2509 tp1->rec.data.doing_fast_retransmit = 1;
2511 * mark the tsn with what sequences can
2514 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
2515 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
2517 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
2520 /* restart the timer */
2521 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2522 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2523 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2526 /* fix counts and things */
2527 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
2528 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
2529 tp1->whoTo->flight_size,
2532 tp1->rec.data.TSN_seq);
2534 sctp_flight_size_decrease(tp1);
2535 sctp_total_flight_decrease(stcb, tp1);
2541 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
2542 if (tp1->sent == SCTP_DATAGRAM_RESEND)
2545 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
2547 if (tp1->sent == SCTP_DATAGRAM_RESEND)
2550 if (audit != stcb->asoc.sent_queue_retran_cnt) {
2551 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
2552 audit, stcb->asoc.sent_queue_retran_cnt);
2553 #ifndef SCTP_AUDITING_ENABLED
2554 stcb->asoc.sent_queue_retran_cnt = audit;
2562 struct sctp_tmit_chunk *asconf;
2564 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
2566 if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
2571 if (asconf->sent != SCTP_DATAGRAM_RESEND)
2572 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2573 asconf->sent = SCTP_DATAGRAM_RESEND;
2574 asconf->snd_count--;
2578 case SCTP_INITIATION:
2579 /* resend the INIT */
2580 stcb->asoc.dropped_special_cnt++;
2581 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
2583 * If we can get it in, in a few attempts we do
2584 * this, otherwise we let the timer fire.
2586 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
2587 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
2588 sctp_send_initiate(stcb->sctp_ep, stcb);
2591 case SCTP_SELECTIVE_ACK:
2592 /* resend the sack */
2593 sctp_send_sack(stcb);
2595 case SCTP_HEARTBEAT_REQUEST:
2596 /* resend a demand HB */
2597 (void)sctp_send_hb(stcb, 1, net);
2600 sctp_send_shutdown(stcb, net);
2602 case SCTP_SHUTDOWN_ACK:
2603 sctp_send_shutdown_ack(stcb, net);
2605 case SCTP_COOKIE_ECHO:
2607 struct sctp_tmit_chunk *cookie;
2610 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
2612 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
2617 if (cookie->sent != SCTP_DATAGRAM_RESEND)
2618 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2619 cookie->sent = SCTP_DATAGRAM_RESEND;
2620 sctp_stop_all_cookie_timers(stcb);
2624 case SCTP_COOKIE_ACK:
2625 sctp_send_cookie_ack(stcb);
2627 case SCTP_ASCONF_ACK:
2628 /* resend last asconf ack */
2629 sctp_send_asconf_ack(stcb, 1);
2631 case SCTP_FORWARD_CUM_TSN:
2632 send_forward_tsn(stcb, &stcb->asoc);
2634 /* can't do anything with these */
2635 case SCTP_PACKET_DROPPED:
2636 case SCTP_INITIATION_ACK: /* this should not happen */
2637 case SCTP_HEARTBEAT_ACK:
2638 case SCTP_ABORT_ASSOCIATION:
2639 case SCTP_OPERATION_ERROR:
2640 case SCTP_SHUTDOWN_COMPLETE:
2650 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2656 * We set things to 0xffff since this is the last delivered sequence
2657 * and we will be sending in 0 after the reset.
2660 if (number_entries) {
2661 for (i = 0; i < number_entries; i++) {
2662 temp = ntohs(list[i]);
2663 if (temp >= stcb->asoc.streamincnt) {
2666 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
2670 for (i = 0; i < stcb->asoc.streamincnt; i++) {
2671 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
2674 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list);
2678 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2682 if (number_entries == 0) {
2683 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2684 stcb->asoc.strmout[i].next_sequence_sent = 0;
2686 } else if (number_entries) {
2687 for (i = 0; i < number_entries; i++) {
2690 temp = ntohs(list[i]);
2691 if (temp >= stcb->asoc.streamoutcnt) {
2692 /* no such stream */
2695 stcb->asoc.strmout[temp].next_sequence_sent = 0;
2698 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list);
2702 struct sctp_stream_reset_out_request *
2703 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
2705 struct sctp_association *asoc;
2706 struct sctp_stream_reset_out_req *req;
2707 struct sctp_stream_reset_out_request *r;
2708 struct sctp_tmit_chunk *chk;
2712 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
2713 asoc->stream_reset_outstanding = 0;
2716 if (stcb->asoc.str_reset == NULL) {
2717 asoc->stream_reset_outstanding = 0;
2720 chk = stcb->asoc.str_reset;
2721 if (chk->data == NULL) {
2725 /* he wants a copy of the chk pointer */
2728 clen = chk->send_size;
2729 req = mtod(chk->data, struct sctp_stream_reset_out_req *);
2731 if (ntohl(r->request_seq) == seq) {
2735 len = SCTP_SIZE32(ntohs(r->ph.param_length));
2736 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
2737 /* move to the next one, there can only be a max of two */
2738 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
2739 if (ntohl(r->request_seq) == seq) {
2743 /* that seq is not here */
2748 sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
2750 struct sctp_association *asoc;
2751 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
2753 if (stcb->asoc.str_reset == NULL) {
2758 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
2759 TAILQ_REMOVE(&asoc->control_send_queue,
2763 sctp_m_freem(chk->data);
2766 asoc->ctrl_queue_cnt--;
2767 sctp_free_remote_addr(chk->whoTo);
2769 sctp_free_a_chunk(stcb, chk);
2770 stcb->asoc.str_reset = NULL;
2775 sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
2776 uint32_t seq, uint32_t action,
2777 struct sctp_stream_reset_response *respin)
2781 struct sctp_association *asoc = &stcb->asoc;
2782 struct sctp_tmit_chunk *chk;
2783 struct sctp_stream_reset_out_request *srparam;
2786 if (asoc->stream_reset_outstanding == 0) {
2790 if (seq == stcb->asoc.str_reset_seq_out) {
2791 srparam = sctp_find_stream_reset(stcb, seq, &chk);
2793 stcb->asoc.str_reset_seq_out++;
2794 type = ntohs(srparam->ph.param_type);
2795 lparm_len = ntohs(srparam->ph.param_length);
2796 if (type == SCTP_STR_RESET_OUT_REQUEST) {
2797 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
2798 asoc->stream_reset_out_is_outstanding = 0;
2799 if (asoc->stream_reset_outstanding)
2800 asoc->stream_reset_outstanding--;
2801 if (action == SCTP_STREAM_RESET_PERFORMED) {
2803 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
2805 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams);
2807 } else if (type == SCTP_STR_RESET_IN_REQUEST) {
2808 /* Answered my request */
2809 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
2810 if (asoc->stream_reset_outstanding)
2811 asoc->stream_reset_outstanding--;
2812 if (action != SCTP_STREAM_RESET_PERFORMED) {
2813 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams);
2815 } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
2817 * a) Adopt the new in tsn.
2819 * c) Adopt the new out-tsn
2821 struct sctp_stream_reset_response_tsn *resp;
2822 struct sctp_forward_tsn_chunk fwdtsn;
2825 if (respin == NULL) {
2829 if (action == SCTP_STREAM_RESET_PERFORMED) {
2830 resp = (struct sctp_stream_reset_response_tsn *)respin;
2831 asoc->stream_reset_outstanding--;
2832 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2833 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2834 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
2835 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2839 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
2840 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2841 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
2842 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2843 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
2844 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
2846 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2847 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2851 /* get rid of the request and get the request flags */
2852 if (asoc->stream_reset_outstanding == 0) {
2853 sctp_clean_up_stream_reset(stcb);
2861 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
2862 struct sctp_tmit_chunk *chk,
2863 struct sctp_stream_reset_in_request *req)
2871 * peer wants me to send a str-reset to him for my outgoing seq's if
2874 struct sctp_association *asoc = &stcb->asoc;
2876 seq = ntohl(req->request_seq);
2877 if (asoc->str_reset_seq_in == seq) {
2878 if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
2879 len = ntohs(req->ph.param_length);
2880 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
2881 for (i = 0; i < number_entries; i++) {
2882 temp = ntohs(req->list_of_streams[i]);
2883 req->list_of_streams[i] = temp;
2885 /* move the reset action back one */
2886 asoc->last_reset_action[1] = asoc->last_reset_action[0];
2887 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2888 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
2889 asoc->str_reset_seq_out,
2890 seq, (asoc->sending_seq - 1));
2891 asoc->stream_reset_out_is_outstanding = 1;
2892 asoc->str_reset = chk;
2893 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
2894 stcb->asoc.stream_reset_outstanding++;
2896 /* Can't do it, since we have sent one out */
2897 asoc->last_reset_action[1] = asoc->last_reset_action[0];
2898 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
2899 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2901 asoc->str_reset_seq_in++;
2902 } else if (asoc->str_reset_seq_in - 1 == seq) {
2903 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2904 } else if (asoc->str_reset_seq_in - 2 == seq) {
2905 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
2907 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2912 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
2913 struct sctp_tmit_chunk *chk,
2914 struct sctp_stream_reset_tsn_request *req)
2916 /* reset all in and out and update the tsn */
2918 * A) reset my str-seq's on in and out. B) Select a receive next,
2919 * and set cum-ack to it. Also process this selected number as a
2920 * fwd-tsn as well. C) set in the response my next sending seq.
2922 struct sctp_forward_tsn_chunk fwdtsn;
2923 struct sctp_association *asoc = &stcb->asoc;
2927 seq = ntohl(req->request_seq);
2928 if (asoc->str_reset_seq_in == seq) {
2929 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2930 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2931 fwdtsn.ch.chunk_flags = 0;
2932 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
2933 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2937 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
2938 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2939 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
2940 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2941 atomic_add_int(&stcb->asoc.sending_seq, 1);
2942 /* save off historical data for retrans */
2943 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
2944 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
2945 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
2946 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
2948 sctp_add_stream_reset_result_tsn(chk,
2949 ntohl(req->request_seq),
2950 SCTP_STREAM_RESET_PERFORMED,
2951 stcb->asoc.sending_seq,
2952 stcb->asoc.mapping_array_base_tsn);
2953 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2954 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2955 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
2956 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2958 asoc->str_reset_seq_in++;
2959 } else if (asoc->str_reset_seq_in - 1 == seq) {
2960 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
2961 stcb->asoc.last_sending_seq[0],
2962 stcb->asoc.last_base_tsnsent[0]
2964 } else if (asoc->str_reset_seq_in - 2 == seq) {
2965 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
2966 stcb->asoc.last_sending_seq[1],
2967 stcb->asoc.last_base_tsnsent[1]
2970 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2976 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
2977 struct sctp_tmit_chunk *chk,
2978 struct sctp_stream_reset_out_request *req)
2981 int number_entries, len;
2982 struct sctp_association *asoc = &stcb->asoc;
2984 seq = ntohl(req->request_seq);
2986 /* now if its not a duplicate we process it */
2987 if (asoc->str_reset_seq_in == seq) {
2988 len = ntohs(req->ph.param_length);
2989 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
2991 * the sender is resetting, handle the list issue.. we must
2992 * a) verify if we can do the reset, if so no problem b) If
2993 * we can't do the reset we must copy the request. c) queue
2994 * it, and setup the data in processor to trigger it off
2995 * when needed and dequeue all the queued data.
2997 tsn = ntohl(req->send_reset_at_tsn);
2999 /* move the reset action back one */
3000 asoc->last_reset_action[1] = asoc->last_reset_action[0];
3001 if ((tsn == asoc->cumulative_tsn) ||
3002 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3003 /* we can do it now */
3004 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3005 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3006 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3009 * we must queue it up and thus wait for the TSN's
3010 * to arrive that are at or before tsn
3012 struct sctp_stream_reset_list *liste;
3015 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3016 SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3017 siz, SCTP_M_STRESET);
3018 if (liste == NULL) {
3019 /* gak out of memory */
3020 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3021 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3025 liste->number_entries = number_entries;
3026 memcpy(&liste->req, req,
3027 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3028 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3029 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3030 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3032 asoc->str_reset_seq_in++;
3033 } else if ((asoc->str_reset_seq_in - 1) == seq) {
3035 * one seq back, just echo back last action since my
3036 * response was lost.
3038 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3039 } else if ((asoc->str_reset_seq_in - 2) == seq) {
3041 * two seq back, just echo back last action since my
3042 * response was lost.
3044 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3046 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3051 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct sctp_stream_reset_out_req *sr_req)
3053 int chk_length, param_len, ptype;
3056 struct sctp_tmit_chunk *chk;
3057 struct sctp_chunkhdr *ch;
3058 struct sctp_paramhdr *ph;
3062 /* now it may be a reset or a reset-response */
3063 chk_length = ntohs(sr_req->ch.chunk_length);
3065 /* setup for adding the response */
3066 sctp_alloc_a_chunk(stcb, chk);
3070 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3071 chk->rec.chunk_id.can_take_data = 0;
3072 chk->asoc = &stcb->asoc;
3073 chk->no_fr_allowed = 0;
3074 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3075 chk->book_size_scale = 0;
3076 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3077 if (chk->data == NULL) {
3080 sctp_m_freem(chk->data);
3083 sctp_free_a_chunk(stcb, chk);
3086 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3088 /* setup chunk parameters */
3089 chk->sent = SCTP_DATAGRAM_UNSENT;
3091 chk->whoTo = stcb->asoc.primary_destination;
3092 atomic_add_int(&chk->whoTo->ref_count, 1);
3094 ch = mtod(chk->data, struct sctp_chunkhdr *);
3095 ch->chunk_type = SCTP_STREAM_RESET;
3096 ch->chunk_flags = 0;
3097 ch->chunk_length = htons(chk->send_size);
3098 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3099 ph = (struct sctp_paramhdr *)&sr_req->sr_req;
3100 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3101 param_len = ntohs(ph->param_length);
3102 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3106 ptype = ntohs(ph->param_type);
3108 if (num_param > SCTP_MAX_RESET_PARAMS) {
3109 /* hit the max of parameters already sorry.. */
3112 if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3113 struct sctp_stream_reset_out_request *req_out;
3115 req_out = (struct sctp_stream_reset_out_request *)ph;
3117 if (stcb->asoc.stream_reset_outstanding) {
3118 seq = ntohl(req_out->response_seq);
3119 if (seq == stcb->asoc.str_reset_seq_out) {
3121 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3124 sctp_handle_str_reset_request_out(stcb, chk, req_out);
3125 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3126 struct sctp_stream_reset_in_request *req_in;
3129 req_in = (struct sctp_stream_reset_in_request *)ph;
3130 sctp_handle_str_reset_request_in(stcb, chk, req_in);
3131 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3132 struct sctp_stream_reset_tsn_request *req_tsn;
3135 req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3136 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3138 goto strres_nochunk;
3142 } else if (ptype == SCTP_STR_RESET_RESPONSE) {
3143 struct sctp_stream_reset_response *resp;
3146 resp = (struct sctp_stream_reset_response *)ph;
3147 seq = ntohl(resp->response_seq);
3148 result = ntohl(resp->result);
3149 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3151 goto strres_nochunk;
3157 ph = (struct sctp_paramhdr *)((caddr_t)ph + SCTP_SIZE32(param_len));
3158 chk_length -= SCTP_SIZE32(param_len);
3161 /* we have no response free the stuff */
3162 goto strres_nochunk;
3164 /* ok we have a chunk to link in */
3165 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3168 stcb->asoc.ctrl_queue_cnt++;
3173 * Handle a router or endpoints report of a packet loss, there are two ways
3174 * to handle this, either we get the whole packet and must disect it
3175 * ourselves (possibly with truncation and or corruption) or it is a summary
3176 * from a middle box that did the disectting for us.
3179 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3180 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
3182 uint32_t bottle_bw, on_queue;
3186 struct sctp_chunk_desc desc;
3187 struct sctp_chunkhdr *ch;
3189 chlen = ntohs(cp->ch.chunk_length);
3190 chlen -= sizeof(struct sctp_pktdrop_chunk);
3191 /* XXX possible chlen underflow */
3194 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3195 SCTP_STAT_INCR(sctps_pdrpbwrpt);
3197 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3198 chlen -= sizeof(struct sctphdr);
3199 /* XXX possible chlen underflow */
3200 memset(&desc, 0, sizeof(desc));
3202 trunc_len = (uint16_t) ntohs(cp->trunc_len);
3203 if (trunc_len > limit) {
3206 /* now the chunks themselves */
3207 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3208 desc.chunk_type = ch->chunk_type;
3209 /* get amount we need to move */
3210 at = ntohs(ch->chunk_length);
3211 if (at < sizeof(struct sctp_chunkhdr)) {
3212 /* corrupt chunk, maybe at the end? */
3213 SCTP_STAT_INCR(sctps_pdrpcrupt);
3216 if (trunc_len == 0) {
3217 /* we are supposed to have all of it */
3219 /* corrupt skip it */
3220 SCTP_STAT_INCR(sctps_pdrpcrupt);
3224 /* is there enough of it left ? */
3225 if (desc.chunk_type == SCTP_DATA) {
3226 if (chlen < (sizeof(struct sctp_data_chunk) +
3227 sizeof(desc.data_bytes))) {
3231 if (chlen < sizeof(struct sctp_chunkhdr)) {
3236 if (desc.chunk_type == SCTP_DATA) {
3237 /* can we get out the tsn? */
3238 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3239 SCTP_STAT_INCR(sctps_pdrpmbda);
3241 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
3243 struct sctp_data_chunk *dcp;
3247 dcp = (struct sctp_data_chunk *)ch;
3248 ddp = (uint8_t *) (dcp + 1);
3249 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
3250 desc.data_bytes[iii] = ddp[iii];
3252 desc.tsn_ifany = dcp->dp.tsn;
3254 /* nope we are done. */
3255 SCTP_STAT_INCR(sctps_pdrpnedat);
3259 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3260 SCTP_STAT_INCR(sctps_pdrpmbct);
3263 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
3264 SCTP_STAT_INCR(sctps_pdrppdbrk);
3267 if (SCTP_SIZE32(at) > chlen) {
3270 chlen -= SCTP_SIZE32(at);
3271 if (chlen < sizeof(struct sctp_chunkhdr)) {
3272 /* done, none left */
3275 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
3277 /* Now update any rwnd --- possibly */
3278 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
3279 /* From a peer, we get a rwnd report */
3282 SCTP_STAT_INCR(sctps_pdrpfehos);
3284 bottle_bw = ntohl(cp->bottle_bw);
3285 on_queue = ntohl(cp->current_onq);
3286 if (bottle_bw && on_queue) {
3287 /* a rwnd report is in here */
3288 if (bottle_bw > on_queue)
3289 a_rwnd = bottle_bw - on_queue;
3294 stcb->asoc.peers_rwnd = 0;
3296 if (a_rwnd > stcb->asoc.total_flight) {
3297 stcb->asoc.peers_rwnd =
3298 a_rwnd - stcb->asoc.total_flight;
3300 stcb->asoc.peers_rwnd = 0;
3302 if (stcb->asoc.peers_rwnd <
3303 stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3304 /* SWS sender side engages */
3305 stcb->asoc.peers_rwnd = 0;
3310 SCTP_STAT_INCR(sctps_pdrpfmbox);
3313 /* now middle boxes in sat networks get a cwnd bump */
3314 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
3315 (stcb->asoc.sat_t3_loss_recovery == 0) &&
3316 (stcb->asoc.sat_network)) {
3318 * This is debateable but for sat networks it makes sense
3319 * Note if a T3 timer has went off, we will prohibit any
3320 * changes to cwnd until we exit the t3 loss recovery.
3325 int old_cwnd = net->cwnd;
3327 /* need real RTT for this calc */
3328 rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
3329 /* get bottle neck bw */
3330 bottle_bw = ntohl(cp->bottle_bw);
3331 /* and whats on queue */
3332 on_queue = ntohl(cp->current_onq);
3334 * adjust the on-queue if our flight is more it could be
3335 * that the router has not yet gotten data "in-flight" to it
3337 if (on_queue < net->flight_size)
3338 on_queue = net->flight_size;
3340 /* calculate the available space */
3341 bw_avail = (bottle_bw * rtt) / 1000;
3342 if (bw_avail > bottle_bw) {
3344 * Cap the growth to no more than the bottle neck.
3345 * This can happen as RTT slides up due to queues.
3346 * It also means if you have more than a 1 second
3347 * RTT with a empty queue you will be limited to the
3348 * bottle_bw per second no matter if other points
3349 * have 1/2 the RTT and you could get more out...
3351 bw_avail = bottle_bw;
3353 if (on_queue > bw_avail) {
3355 * No room for anything else don't allow anything
3356 * else to be "added to the fire".
3358 int seg_inflight, seg_onqueue, my_portion;
3360 net->partial_bytes_acked = 0;
3362 /* how much are we over queue size? */
3363 incr = on_queue - bw_avail;
3364 if (stcb->asoc.seen_a_sack_this_pkt) {
3366 * undo any cwnd adjustment that the sack
3369 net->cwnd = net->prev_cwnd;
3371 /* Now how much of that is mine? */
3372 seg_inflight = net->flight_size / net->mtu;
3373 seg_onqueue = on_queue / net->mtu;
3374 my_portion = (incr * seg_inflight) / seg_onqueue;
3376 /* Have I made an adjustment already */
3377 if (net->cwnd > net->flight_size) {
3379 * for this flight I made an adjustment we
3380 * need to decrease the portion by a share
3381 * our previous adjustment.
3385 diff_adj = net->cwnd - net->flight_size;
3386 if (diff_adj > my_portion)
3389 my_portion -= diff_adj;
3392 * back down to the previous cwnd (assume we have
3393 * had a sack before this packet). minus what ever
3394 * portion of the overage is my fault.
3396 net->cwnd -= my_portion;
3398 /* we will NOT back down more than 1 MTU */
3399 if (net->cwnd <= net->mtu) {
3400 net->cwnd = net->mtu;
3403 net->ssthresh = net->cwnd - 1;
3406 * Take 1/4 of the space left or max burst up ..
3407 * whichever is less.
3409 incr = min((bw_avail - on_queue) >> 2,
3410 stcb->asoc.max_burst * net->mtu);
3413 if (net->cwnd > bw_avail) {
3414 /* We can't exceed the pipe size */
3415 net->cwnd = bw_avail;
3417 if (net->cwnd < net->mtu) {
3418 /* We always have 1 MTU */
3419 net->cwnd = net->mtu;
3421 if (net->cwnd - old_cwnd != 0) {
3422 /* log only changes */
3423 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
3424 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
3425 SCTP_CWND_LOG_FROM_SAT);
3432 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
3433 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
3434 * offset: offset into the mbuf chain to first chunkhdr - length: is the
3435 * length of the complete packet outputs: - length: modified to remaining
3436 * length after control processing - netp: modified to new sctp_nets after
3437 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
3438 * bad packet,...) otherwise return the tcb for this packet
3441 __attribute__((noinline))
3443 static struct sctp_tcb *
3444 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
3445 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
3446 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
3449 struct sctp_association *asoc;
3451 int num_chunks = 0; /* number of control chunks processed */
3452 uint32_t chk_length;
3454 int abort_no_unlock = 0;
3457 * How big should this be, and should it be alloc'd? Lets try the
3458 * d-mtu-ceiling for now (2k) and that should hopefully work ...
3459 * until we get into jumbo grams and such..
3461 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
3462 struct sctp_tcb *locked_tcb = stcb;
3464 uint32_t auth_offset = 0, auth_len = 0;
3465 int auth_skipped = 0;
3467 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
3468 iphlen, *offset, length, stcb);
3470 /* validate chunk header length... */
3471 if (ntohs(ch->chunk_length) < sizeof(*ch)) {
3472 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
3473 ntohs(ch->chunk_length));
3475 SCTP_TCB_UNLOCK(locked_tcb);
3480 * validate the verification tag
3482 vtag_in = ntohl(sh->v_tag);
3485 SCTP_TCB_LOCK_ASSERT(locked_tcb);
3487 if (ch->chunk_type == SCTP_INITIATION) {
3488 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
3489 ntohs(ch->chunk_length), vtag_in);
3491 /* protocol error- silently discard... */
3492 SCTP_STAT_INCR(sctps_badvtag);
3494 SCTP_TCB_UNLOCK(locked_tcb);
3498 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
3500 * If there is no stcb, skip the AUTH chunk and process
3501 * later after a stcb is found (to validate the lookup was
3504 if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
3505 (stcb == NULL) && !sctp_auth_disable) {
3506 /* save this chunk for later processing */
3508 auth_offset = *offset;
3509 auth_len = ntohs(ch->chunk_length);
3511 /* (temporarily) move past this chunk */
3512 *offset += SCTP_SIZE32(auth_len);
3513 if (*offset >= length) {
3514 /* no more data left in the mbuf chain */
3517 SCTP_TCB_UNLOCK(locked_tcb);
3521 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3522 sizeof(struct sctp_chunkhdr), chunk_buf);
3528 SCTP_TCB_UNLOCK(locked_tcb);
3532 if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3533 goto process_control_chunks;
3536 * first check if it's an ASCONF with an unknown src addr we
3537 * need to look inside to find the association
3539 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
3540 /* inp's refcount may be reduced */
3541 SCTP_INP_INCR_REF(inp);
3543 stcb = sctp_findassociation_ep_asconf(m, iphlen,
3544 *offset, sh, &inp, netp);
3547 * reduce inp's refcount if not reduced in
3548 * sctp_findassociation_ep_asconf().
3550 SCTP_INP_DECR_REF(inp);
3552 /* now go back and verify any auth chunk to be sure */
3553 if (auth_skipped && (stcb != NULL)) {
3554 struct sctp_auth_chunk *auth;
3556 auth = (struct sctp_auth_chunk *)
3557 sctp_m_getptr(m, auth_offset,
3558 auth_len, chunk_buf);
3561 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
3563 /* auth HMAC failed so dump it */
3566 SCTP_TCB_UNLOCK(locked_tcb);
3570 /* remaining chunks are HMAC checked */
3571 stcb->asoc.authenticated = 1;
3576 /* no association, so it's out of the blue... */
3577 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
3581 SCTP_TCB_UNLOCK(locked_tcb);
3586 /* ABORT and SHUTDOWN can use either v_tag... */
3587 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
3588 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
3589 (ch->chunk_type == SCTP_PACKET_DROPPED)) {
3590 if ((vtag_in == asoc->my_vtag) ||
3591 ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
3592 (vtag_in == asoc->peer_vtag))) {
3595 /* drop this packet... */
3596 SCTP_STAT_INCR(sctps_badvtag);
3598 SCTP_TCB_UNLOCK(locked_tcb);
3602 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
3603 if (vtag_in != asoc->my_vtag) {
3605 * this could be a stale SHUTDOWN-ACK or the
3606 * peer never got the SHUTDOWN-COMPLETE and
3607 * is still hung; we have started a new asoc
3608 * but it won't complete until the shutdown
3612 SCTP_TCB_UNLOCK(locked_tcb);
3614 sctp_handle_ootb(m, iphlen, *offset, sh, inp,
3619 /* for all other chunks, vtag must match */
3620 if (vtag_in != asoc->my_vtag) {
3621 /* invalid vtag... */
3622 SCTPDBG(SCTP_DEBUG_INPUT3,
3623 "invalid vtag: %xh, expect %xh\n",
3624 vtag_in, asoc->my_vtag);
3625 SCTP_STAT_INCR(sctps_badvtag);
3627 SCTP_TCB_UNLOCK(locked_tcb);
3633 } /* end if !SCTP_COOKIE_ECHO */
3635 * process all control chunks...
3637 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
3638 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
3639 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
3640 /* implied cookie-ack.. we must have lost the ack */
3641 stcb->asoc.overall_error_count = 0;
3642 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
3645 process_control_chunks:
3646 while (IS_SCTP_CONTROL(ch)) {
3647 /* validate chunk length */
3648 chk_length = ntohs(ch->chunk_length);
3649 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
3650 ch->chunk_type, chk_length);
3651 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
3652 if (chk_length < sizeof(*ch) ||
3653 (*offset + (int)chk_length) > length) {
3656 SCTP_TCB_UNLOCK(locked_tcb);
3660 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
3662 * INIT-ACK only gets the init ack "header" portion only
3663 * because we don't have to process the peer's COOKIE. All
3664 * others get a complete chunk.
3666 if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
3667 (ch->chunk_type == SCTP_INITIATION)) {
3668 /* get an init-ack chunk */
3669 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3670 sizeof(struct sctp_init_ack_chunk), chunk_buf);
3674 SCTP_TCB_UNLOCK(locked_tcb);
3680 * For cookies and all other chunks. if the
3682 if (chk_length > sizeof(chunk_buf)) {
3684 * use just the size of the chunk buffer so
3685 * the front part of our chunks fit in
3686 * contiguous space up to the chunk buffer
3687 * size (508 bytes). For chunks that need to
3688 * get more than that they mus use the
3689 * sctp_m_getptr() function or other means
3690 * (know how to parse mbuf chains). Cookies
3693 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3694 (sizeof(chunk_buf) - 4),
3699 SCTP_TCB_UNLOCK(locked_tcb);
3704 /* We can fit it all */
3705 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3706 chk_length, chunk_buf);
3708 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
3711 SCTP_TCB_UNLOCK(locked_tcb);
3718 /* Save off the last place we got a control from */
3720 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
3722 * allow last_control to be NULL if
3723 * ASCONF... ASCONF processing will find the
3726 if ((netp != NULL) && (*netp != NULL))
3727 stcb->asoc.last_control_chunk_from = *netp;
3730 #ifdef SCTP_AUDITING_ENABLED
3731 sctp_audit_log(0xB0, ch->chunk_type);
3734 /* check to see if this chunk required auth, but isn't */
3735 if ((stcb != NULL) && !sctp_auth_disable &&
3736 sctp_auth_is_required_chunk(ch->chunk_type,
3737 stcb->asoc.local_auth_chunks) &&
3738 !stcb->asoc.authenticated) {
3739 /* "silently" ignore */
3740 SCTP_STAT_INCR(sctps_recvauthmissing);
3743 switch (ch->chunk_type) {
3744 case SCTP_INITIATION:
3745 /* must be first and only chunk */
3746 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
3747 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3748 /* We are not interested anymore? */
3749 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3751 * collision case where we are
3752 * sending to them too
3757 SCTP_TCB_UNLOCK(locked_tcb);
3763 if ((num_chunks > 1) ||
3764 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3767 SCTP_TCB_UNLOCK(locked_tcb);
3771 if ((stcb != NULL) &&
3772 (SCTP_GET_STATE(&stcb->asoc) ==
3773 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
3774 sctp_send_shutdown_ack(stcb,
3775 stcb->asoc.primary_destination);
3777 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3779 SCTP_TCB_UNLOCK(locked_tcb);
3784 sctp_handle_init(m, iphlen, *offset, sh,
3785 (struct sctp_init_chunk *)ch, inp,
3786 stcb, *netp, &abort_no_unlock, vrf_id);
3788 if (abort_no_unlock)
3793 SCTP_TCB_UNLOCK(locked_tcb);
3797 case SCTP_PAD_CHUNK:
3799 case SCTP_INITIATION_ACK:
3800 /* must be first and only chunk */
3801 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
3802 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3803 /* We are not interested anymore */
3804 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3808 SCTP_TCB_UNLOCK(locked_tcb);
3812 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3817 if ((num_chunks > 1) ||
3818 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3821 SCTP_TCB_UNLOCK(locked_tcb);
3825 if ((netp) && (*netp)) {
3826 ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
3827 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
3832 * Special case, I must call the output routine to
3833 * get the cookie echoed
3835 if (abort_no_unlock)
3838 if ((stcb) && ret == 0)
3839 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3842 SCTP_TCB_UNLOCK(locked_tcb);
3846 case SCTP_SELECTIVE_ACK:
3847 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
3848 SCTP_STAT_INCR(sctps_recvsacks);
3850 struct sctp_sack_chunk *sack;
3852 uint32_t a_rwnd, cum_ack;
3856 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
3857 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
3860 SCTP_TCB_UNLOCK(locked_tcb);
3864 sack = (struct sctp_sack_chunk *)ch;
3865 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
3866 cum_ack = ntohl(sack->sack.cum_tsn_ack);
3867 num_seg = ntohs(sack->sack.num_gap_ack_blks);
3868 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
3869 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
3874 stcb->asoc.seen_a_sack_this_pkt = 1;
3875 if ((stcb->asoc.pr_sctp_cnt == 0) &&
3877 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
3878 (cum_ack == stcb->asoc.last_acked_seq)) &&
3879 (stcb->asoc.saw_sack_with_frags == 0) &&
3880 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
3883 * We have a SIMPLE sack having no
3884 * prior segments and data on sent
3885 * queue to be acked.. Use the
3886 * faster path sack processing. We
3887 * also allow window update sacks
3888 * with no missing segments to go
3891 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
3895 sctp_handle_sack(m, *offset,
3896 sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
3899 /* ABORT signal from sack processing */
3905 case SCTP_HEARTBEAT_REQUEST:
3906 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
3907 if ((stcb) && netp && *netp) {
3908 SCTP_STAT_INCR(sctps_recvheartbeat);
3909 sctp_send_heartbeat_ack(stcb, m, *offset,
3912 /* He's alive so give him credit */
3913 stcb->asoc.overall_error_count = 0;
3916 case SCTP_HEARTBEAT_ACK:
3917 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
3918 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
3922 SCTP_TCB_UNLOCK(locked_tcb);
3926 /* He's alive so give him credit */
3927 stcb->asoc.overall_error_count = 0;
3928 SCTP_STAT_INCR(sctps_recvheartbeatack);
3930 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
3933 case SCTP_ABORT_ASSOCIATION:
3934 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
3936 if ((stcb) && netp && *netp)
3937 sctp_handle_abort((struct sctp_abort_chunk *)ch,
3943 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
3945 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
3948 SCTP_TCB_UNLOCK(locked_tcb);
3953 if (netp && *netp) {
3956 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
3957 stcb, *netp, &abort_flag);
3964 case SCTP_SHUTDOWN_ACK:
3965 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
3966 if ((stcb) && (netp) && (*netp))
3967 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
3972 case SCTP_OPERATION_ERROR:
3973 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
3974 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
3980 case SCTP_COOKIE_ECHO:
3981 SCTPDBG(SCTP_DEBUG_INPUT3,
3982 "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
3983 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3986 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3987 /* We are not interested anymore */
3993 * First are we accepting? We do this again here
3994 * sincen it is possible that a previous endpoint
3995 * WAS listening responded to a INIT-ACK and then
3996 * closed. We opened and bound.. and are now no
3999 if (inp->sctp_socket->so_qlimit == 0) {
4000 if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4002 * special case, is this a retran'd
4003 * COOKIE-ECHO or a restarting assoc
4004 * that is a peeled off or
4005 * one-to-one style socket.
4007 goto process_cookie_anyway;
4009 sctp_abort_association(inp, stcb, m, iphlen,
4013 } else if (inp->sctp_socket->so_qlimit) {
4014 /* we are accepting so check limits like TCP */
4015 if (inp->sctp_socket->so_qlen >
4016 inp->sctp_socket->so_qlimit) {
4019 struct sctp_paramhdr *phdr;
4021 if (sctp_abort_if_one_2_one_hits_limit) {
4023 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4024 0, M_DONTWAIT, 1, MT_DATA);
4026 SCTP_BUF_LEN(oper) =
4027 sizeof(struct sctp_paramhdr);
4029 struct sctp_paramhdr *);
4031 htons(SCTP_CAUSE_OUT_OF_RESC);
4032 phdr->param_length =
4033 htons(sizeof(struct sctp_paramhdr));
4035 sctp_abort_association(inp, stcb, m,
4036 iphlen, sh, oper, vrf_id);
4042 process_cookie_anyway:
4044 struct mbuf *ret_buf;
4045 struct sctp_inpcb *linp;
4054 SCTP_ASOC_CREATE_LOCK(linp);
4058 sctp_handle_cookie_echo(m, iphlen,
4060 (struct sctp_cookie_echo_chunk *)ch,
4071 SCTP_ASOC_CREATE_UNLOCK(linp);
4073 if (ret_buf == NULL) {
4075 SCTP_TCB_UNLOCK(locked_tcb);
4077 SCTPDBG(SCTP_DEBUG_INPUT3,
4078 "GAK, null buffer\n");
4083 /* if AUTH skipped, see if it verified... */
4088 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4090 * Restart the timer if we have
4093 struct sctp_tmit_chunk *chk;
4095 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4097 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4098 stcb->sctp_ep, stcb,
4104 case SCTP_COOKIE_ACK:
4105 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4106 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4108 SCTP_TCB_UNLOCK(locked_tcb);
4112 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4113 /* We are not interested anymore */
4114 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4117 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4122 /* He's alive so give him credit */
4123 if ((stcb) && netp && *netp) {
4124 stcb->asoc.overall_error_count = 0;
4125 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4129 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4130 /* He's alive so give him credit */
4131 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4134 SCTP_TCB_UNLOCK(locked_tcb);
4140 stcb->asoc.overall_error_count = 0;
4141 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
4146 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
4147 /* He's alive so give him credit */
4148 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
4151 SCTP_TCB_UNLOCK(locked_tcb);
4157 stcb->asoc.overall_error_count = 0;
4158 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
4161 case SCTP_SHUTDOWN_COMPLETE:
4162 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
4163 /* must be first and only chunk */
4164 if ((num_chunks > 1) ||
4165 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4168 SCTP_TCB_UNLOCK(locked_tcb);
4172 if ((stcb) && netp && *netp) {
4173 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
4180 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
4181 /* He's alive so give him credit */
4183 stcb->asoc.overall_error_count = 0;
4184 sctp_handle_asconf(m, *offset,
4185 (struct sctp_asconf_chunk *)ch, stcb);
4188 case SCTP_ASCONF_ACK:
4189 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
4190 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
4193 SCTP_TCB_UNLOCK(locked_tcb);
4198 if ((stcb) && netp && *netp) {
4199 /* He's alive so give him credit */
4200 stcb->asoc.overall_error_count = 0;
4201 sctp_handle_asconf_ack(m, *offset,
4202 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp);
4205 case SCTP_FORWARD_CUM_TSN:
4206 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
4207 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
4210 SCTP_TCB_UNLOCK(locked_tcb);
4215 /* He's alive so give him credit */
4219 stcb->asoc.overall_error_count = 0;
4221 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4222 /* We are not interested anymore */
4223 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
4227 sctp_handle_forward_tsn(stcb,
4228 (struct sctp_forward_tsn_chunk *)ch, &abort_flag);
4233 stcb->asoc.overall_error_count = 0;
4238 case SCTP_STREAM_RESET:
4239 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
4240 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
4243 SCTP_TCB_UNLOCK(locked_tcb);
4248 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4249 /* We are not interested anymore */
4250 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4254 if (stcb->asoc.peer_supports_strreset == 0) {
4256 * hmm, peer should have announced this, but
4257 * we will turn it on since he is sending us
4260 stcb->asoc.peer_supports_strreset = 1;
4262 if (sctp_handle_stream_reset(stcb, (struct sctp_stream_reset_out_req *)ch)) {
4263 /* stop processing */
4268 case SCTP_PACKET_DROPPED:
4269 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
4270 /* re-get it all please */
4271 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
4274 SCTP_TCB_UNLOCK(locked_tcb);
4279 if (ch && (stcb) && netp && (*netp)) {
4280 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
4282 min(chk_length, (sizeof(chunk_buf) - 4)));
4287 case SCTP_AUTHENTICATION:
4288 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
4289 if (sctp_auth_disable)
4293 /* save the first AUTH for later processing */
4294 if (auth_skipped == 0) {
4295 auth_offset = *offset;
4296 auth_len = chk_length;
4299 /* skip this chunk (temporarily) */
4302 if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
4303 (chk_length > (sizeof(struct sctp_auth_chunk) +
4304 SCTP_AUTH_DIGEST_LEN_MAX))) {
4307 SCTP_TCB_UNLOCK(locked_tcb);
4312 if (got_auth == 1) {
4313 /* skip this chunk... it's already auth'd */
4317 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
4319 /* auth HMAC failed so dump the packet */
4323 /* remaining chunks are HMAC checked */
4324 stcb->asoc.authenticated = 1;
4330 /* it's an unknown chunk! */
4331 if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
4333 struct sctp_paramhdr *phd;
4335 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4336 0, M_DONTWAIT, 1, MT_DATA);
4338 phd = mtod(mm, struct sctp_paramhdr *);
4340 * We cheat and use param type since
4341 * we did not bother to define a
4342 * error cause struct. They are the
4343 * same basic format with different
4346 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
4347 phd->param_length = htons(chk_length + sizeof(*phd));
4348 SCTP_BUF_LEN(mm) = sizeof(*phd);
4349 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
4351 if (SCTP_BUF_NEXT(mm)) {
4352 sctp_queue_op_err(stcb, mm);
4358 if ((ch->chunk_type & 0x80) == 0) {
4359 /* discard this packet */
4362 } /* else skip this bad chunk and continue... */
4364 } /* switch (ch->chunk_type) */
4368 /* get the next chunk */
4369 *offset += SCTP_SIZE32(chk_length);
4370 if (*offset >= length) {
4371 /* no more data left in the mbuf chain */
4374 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4375 sizeof(struct sctp_chunkhdr), chunk_buf);
4378 SCTP_TCB_UNLOCK(locked_tcb);
4389 * Process the ECN bits we have something set so we must look to see if it is
4390 * ECN(0) or ECN(1) or CE
4392 static __inline void
4393 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
4396 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4398 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
4400 * we only add to the nonce sum for ECT1, ECT0 does not
4401 * change the NS bit (that we have yet to find a way to send
4405 /* ECN Nonce stuff */
4406 stcb->asoc.receiver_nonce_sum++;
4407 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
4410 * Drag up the last_echo point if cumack is larger since we
4411 * don't want the point falling way behind by more than
4412 * 2^^31 and then having it be incorrect.
4414 if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4415 stcb->asoc.last_echo_tsn, MAX_TSN)) {
4416 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4418 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
4420 * Drag up the last_echo point if cumack is larger since we
4421 * don't want the point falling way behind by more than
4422 * 2^^31 and then having it be incorrect.
4424 if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4425 stcb->asoc.last_echo_tsn, MAX_TSN)) {
4426 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4431 static __inline void
4432 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
4433 uint32_t high_tsn, uint8_t ecn_bits)
4435 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4437 * we possibly must notify the sender that a congestion
4438 * window reduction is in order. We do this by adding a ECNE
4439 * chunk to the output chunk queue. The incoming CWR will
4440 * remove this chunk.
4442 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
4444 /* Yep, we need to add a ECNE */
4445 sctp_send_ecn_echo(stcb, net, high_tsn);
4446 stcb->asoc.last_echo_tsn = high_tsn;
4452 * common input chunk processing (v4 and v6)
4455 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
4456 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
4457 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
4458 uint8_t ecn_bits, uint32_t vrf_id)
4461 * Control chunk processing
4464 int fwd_tsn_seen = 0, data_processed = 0;
4465 struct mbuf *m = *mm;
4469 SCTP_STAT_INCR(sctps_recvdatagrams);
4470 #ifdef SCTP_AUDITING_ENABLED
4471 sctp_audit_log(0xE0, 1);
4472 sctp_auditing(0, inp, stcb, net);
4475 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n",
4479 /* always clear this before beginning a packet */
4480 stcb->asoc.authenticated = 0;
4481 stcb->asoc.seen_a_sack_this_pkt = 0;
4483 if (IS_SCTP_CONTROL(ch)) {
4484 /* process the control portion of the SCTP packet */
4485 /* sa_ignore NO_NULL_CHK */
4486 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
4487 inp, stcb, &net, &fwd_tsn_seen, vrf_id);
4490 * This covers us if the cookie-echo was there and
4491 * it changes our INP.
4493 inp = stcb->sctp_ep;
4497 * no control chunks, so pre-process DATA chunks (these
4498 * checks are taken care of by control processing)
4502 * if DATA only packet, and auth is required, then punt...
4503 * can't have authenticated without any AUTH (control)
4506 if ((stcb != NULL) && !sctp_auth_disable &&
4507 sctp_auth_is_required_chunk(SCTP_DATA,
4508 stcb->asoc.local_auth_chunks)) {
4509 /* "silently" ignore */
4510 SCTP_STAT_INCR(sctps_recvauthmissing);
4511 SCTP_TCB_UNLOCK(stcb);
4515 /* out of the blue DATA chunk */
4516 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4520 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
4521 /* v_tag mismatch! */
4522 SCTP_STAT_INCR(sctps_badvtag);
4523 SCTP_TCB_UNLOCK(stcb);
4530 * no valid TCB for this packet, or we found it's a bad
4531 * packet while processing control, or we're done with this
4532 * packet (done or skip rest of data), so we drop it...
4537 * DATA chunk processing
4539 /* plow through the data chunks while length > offset */
4542 * Rest should be DATA only. Check authentication state if AUTH for
4545 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable &&
4546 sctp_auth_is_required_chunk(SCTP_DATA,
4547 stcb->asoc.local_auth_chunks) &&
4548 !stcb->asoc.authenticated) {
4549 /* "silently" ignore */
4550 SCTP_STAT_INCR(sctps_recvauthmissing);
4551 SCTPDBG(SCTP_DEBUG_AUTH1,
4552 "Data chunk requires AUTH, skipped\n");
4555 if (length > offset) {
4559 * First check to make sure our state is correct. We would
4560 * not get here unless we really did have a tag, so we don't
4561 * abort if this happens, just dump the chunk silently.
4563 switch (SCTP_GET_STATE(&stcb->asoc)) {
4564 case SCTP_STATE_COOKIE_ECHOED:
4566 * we consider data with valid tags in this state
4567 * shows us the cookie-ack was lost. Imply it was
4570 stcb->asoc.overall_error_count = 0;
4571 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
4573 case SCTP_STATE_COOKIE_WAIT:
4575 * We consider OOTB any data sent during asoc setup.
4577 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4579 SCTP_TCB_UNLOCK(stcb);
4582 case SCTP_STATE_EMPTY: /* should not happen */
4583 case SCTP_STATE_INUSE: /* should not happen */
4584 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
4585 case SCTP_STATE_SHUTDOWN_ACK_SENT:
4587 SCTP_TCB_UNLOCK(stcb);
4590 case SCTP_STATE_OPEN:
4591 case SCTP_STATE_SHUTDOWN_SENT:
4594 /* take care of ECN, part 1. */
4595 if (stcb->asoc.ecn_allowed &&
4596 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4597 sctp_process_ecn_marked_a(stcb, net, ecn_bits);
4599 /* plow through the data chunks while length > offset */
4600 retval = sctp_process_data(mm, iphlen, &offset, length, sh,
4601 inp, stcb, net, &high_tsn);
4604 * The association aborted, NO UNLOCK needed since
4605 * the association is destroyed.
4611 /* take care of ecn part 2. */
4612 if (stcb->asoc.ecn_allowed &&
4613 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4614 sctp_process_ecn_marked_b(stcb, net, high_tsn,
4619 * Anything important needs to have been m_copy'ed in
4623 if ((data_processed == 0) && (fwd_tsn_seen)) {
4626 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
4627 stcb->asoc.cumulative_tsn, MAX_TSN)) {
4628 /* there was a gap before this data was processed */
4631 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
4633 /* Again, we aborted so NO UNLOCK needed */
4637 /* trigger send of any chunks in queue... */
4639 #ifdef SCTP_AUDITING_ENABLED
4640 sctp_audit_log(0xE0, 2);
4641 sctp_auditing(1, inp, stcb, net);
4643 SCTPDBG(SCTP_DEBUG_INPUT1,
4644 "Check for chunk output prw:%d tqe:%d tf=%d\n",
4645 stcb->asoc.peers_rwnd,
4646 TAILQ_EMPTY(&stcb->asoc.control_send_queue),
4647 stcb->asoc.total_flight);
4648 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
4650 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
4652 (stcb->asoc.peers_rwnd > 0 ||
4653 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
4654 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
4655 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
4656 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
4658 #ifdef SCTP_AUDITING_ENABLED
4659 sctp_audit_log(0xE0, 3);
4660 sctp_auditing(2, inp, stcb, net);
4662 SCTP_TCB_UNLOCK(stcb);
4669 sctp_input(i_pak, off)
4674 #ifdef SCTP_MBUF_LOGGING
4680 uint32_t vrf_id = 0;
4684 struct sctp_inpcb *inp = NULL;
4686 uint32_t check, calc_check;
4687 struct sctp_nets *net;
4688 struct sctp_tcb *stcb = NULL;
4689 struct sctp_chunkhdr *ch;
4690 int refcount_up = 0;
4691 int length, mlen, offset;
4694 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
4695 SCTP_RELEASE_PKT(i_pak);
4698 mlen = SCTP_HEADER_LEN(i_pak);
4700 m = SCTP_HEADER_TO_CHAIN(i_pak);
4703 SCTP_STAT_INCR(sctps_recvpackets);
4704 SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
4707 #ifdef SCTP_MBUF_LOGGING
4708 /* Log in any input mbufs */
4709 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
4712 if (SCTP_BUF_IS_EXTENDED(mat)) {
4713 sctp_log_mb(mat, SCTP_MBUF_INPUT);
4715 mat = SCTP_BUF_NEXT(mat);
4719 #ifdef SCTP_PACKET_LOGGING
4720 sctp_packet_log(m, mlen);
4723 * Must take out the iphlen, since mlen expects this (only effect lb
4729 * Get IP, SCTP, and first chunk header together in first mbuf.
4731 ip = mtod(m, struct ip *);
4732 offset = iphlen + sizeof(*sh) + sizeof(*ch);
4733 if (SCTP_BUF_LEN(m) < offset) {
4734 if ((m = m_pullup(m, offset)) == 0) {
4735 SCTP_STAT_INCR(sctps_hdrops);
4738 ip = mtod(m, struct ip *);
4740 sh = (struct sctphdr *)((caddr_t)ip + iphlen);
4741 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
4743 /* SCTP does not allow broadcasts or multicasts */
4744 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
4747 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
4749 * We only look at broadcast if its a front state, All
4750 * others we will not have a tcb for anyway.
4754 /* validate SCTP checksum */
4755 if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) {
4757 * we do NOT validate things from the loopback if the sysctl
4760 check = sh->checksum; /* save incoming checksum */
4761 if ((check == 0) && (sctp_no_csum_on_loopback)) {
4763 * special hook for where we got a local address
4764 * somehow routed across a non IFT_LOOP type
4767 if (ip->ip_src.s_addr == ip->ip_dst.s_addr)
4768 goto sctp_skip_csum_4;
4770 sh->checksum = 0; /* prepare for calc */
4771 calc_check = sctp_calculate_sum(m, &mlen, iphlen);
4772 if (calc_check != check) {
4773 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
4774 calc_check, check, m, mlen, iphlen);
4776 stcb = sctp_findassociation_addr(m, iphlen,
4777 offset - sizeof(*ch),
4780 if ((inp) && (stcb)) {
4781 sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
4782 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR);
4783 } else if ((inp != NULL) && (stcb == NULL)) {
4786 SCTP_STAT_INCR(sctps_badsum);
4787 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
4790 sh->checksum = calc_check;
4793 /* destination port of 0 is illegal, based on RFC2960. */
4794 if (sh->dest_port == 0) {
4795 SCTP_STAT_INCR(sctps_hdrops);
4798 /* validate mbuf chain length with IP payload length */
4799 if (mlen < (ip->ip_len - iphlen)) {
4800 SCTP_STAT_INCR(sctps_hdrops);
4804 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
4805 * IP/SCTP/first chunk header...
4807 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
4808 sh, ch, &inp, &net, vrf_id);
4809 /* inp's ref-count increased && stcb locked */
4811 struct sctp_init_chunk *init_chk, chunk_buf;
4813 SCTP_STAT_INCR(sctps_noport);
4816 * we use the bandwidth limiting to protect against sending
4817 * too many ABORTS all at once. In this case these count the
4818 * same as an ICMP message.
4820 if (badport_bandlim(0) < 0)
4822 #endif /* ICMP_BANDLIM */
4823 SCTPDBG(SCTP_DEBUG_INPUT1,
4824 "Sending a ABORT from packet entry!\n");
4825 if (ch->chunk_type == SCTP_INITIATION) {
4827 * we do a trick here to get the INIT tag, dig in
4828 * and get the tag from the INIT and put it in the
4831 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4832 iphlen + sizeof(*sh), sizeof(*init_chk),
4833 (uint8_t *) & chunk_buf);
4834 if (init_chk != NULL)
4835 sh->v_tag = init_chk->init.initiate_tag;
4837 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4838 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
4841 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
4844 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
4845 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id);
4847 } else if (stcb == NULL) {
4852 * I very much doubt any of the IPSEC stuff will work but I have no
4853 * idea, so I will leave it in place.
4856 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
4857 ipsecstat.in_polvio++;
4858 SCTP_STAT_INCR(sctps_hdrops);
4864 * common chunk processing
4866 length = ip->ip_len + iphlen;
4867 offset -= sizeof(struct sctp_chunkhdr);
4869 ecn_bits = ip->ip_tos;
4871 /* sa_ignore NO_NULL_CHK */
4872 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
4873 inp, stcb, net, ecn_bits, vrf_id);
4874 /* inp's ref-count reduced && stcb unlocked */
4878 if ((inp) && (refcount_up)) {
4879 /* reduce ref-count */
4880 SCTP_INP_WLOCK(inp);
4881 SCTP_INP_DECR_REF(inp);
4882 SCTP_INP_WUNLOCK(inp);
4887 SCTP_TCB_UNLOCK(stcb);
4889 if ((inp) && (refcount_up)) {
4890 /* reduce ref-count */
4891 SCTP_INP_WLOCK(inp);
4892 SCTP_INP_DECR_REF(inp);
4893 SCTP_INP_WUNLOCK(inp);