2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_auth.h>
45 #include <netinet/sctp_indata.h>
46 #include <netinet/sctp_asconf.h>
52 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
54 struct sctp_nets *net;
57 * This now not only stops all cookie timers it also stops any INIT
58 * timers as well. This will make sure that the timers are stopped
59 * in all collision cases.
61 SCTP_TCB_LOCK_ASSERT(stcb);
62 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
63 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
64 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
67 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
68 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
69 sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
72 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
79 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
80 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
81 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id,
84 struct sctp_init *init;
88 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
92 /* First are we accepting? */
93 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
94 SCTPDBG(SCTP_DEBUG_INPUT2,
95 "sctp_handle_init: Abort, so_qlimit:%d\n",
96 inp->sctp_socket->so_qlimit);
98 * FIX ME ?? What about TCP model and we have a
101 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
104 *abort_no_unlock = 1;
107 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
109 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
110 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
113 *abort_no_unlock = 1;
116 /* validate parameters */
117 if (init->initiate_tag == 0) {
118 /* protocol error... send abort */
119 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
120 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
123 *abort_no_unlock = 1;
126 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
127 /* invalid parameter... send abort */
128 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
129 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
133 if (init->num_inbound_streams == 0) {
134 /* protocol error... send abort */
135 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
136 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
139 *abort_no_unlock = 1;
142 if (init->num_outbound_streams == 0) {
143 /* protocol error... send abort */
144 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
145 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
148 *abort_no_unlock = 1;
151 init_limit = offset + ntohs(cp->ch.chunk_length);
152 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
154 /* auth parameter(s) error... send abort */
155 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id,
158 *abort_no_unlock = 1;
161 /* send an INIT-ACK w/cookie */
162 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
163 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id,
168 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
171 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
172 struct sctp_nets *net)
174 struct sctp_init *init;
175 struct sctp_association *asoc;
176 struct sctp_nets *lnet;
181 /* save off parameters */
182 asoc->peer_vtag = ntohl(init->initiate_tag);
183 asoc->peers_rwnd = ntohl(init->a_rwnd);
184 if (TAILQ_FIRST(&asoc->nets)) {
185 /* update any ssthresh's that may have a default */
186 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
187 lnet->ssthresh = asoc->peers_rwnd;
189 #if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING)
190 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
195 SCTP_TCB_SEND_LOCK(stcb);
196 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
198 struct sctp_stream_out *outs;
199 struct sctp_stream_queue_pending *sp;
201 /* cut back on number of streams */
202 newcnt = ntohs(init->num_inbound_streams);
203 /* This if is probably not needed but I am cautious */
205 /* First make sure no data chunks are trapped */
206 for (i = newcnt; i < asoc->pre_open_streams; i++) {
207 outs = &asoc->strmout[i];
208 sp = TAILQ_FIRST(&outs->outqueue);
210 TAILQ_REMOVE(&outs->outqueue, sp,
212 asoc->stream_queue_cnt--;
213 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
214 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
217 sctp_m_freem(sp->data);
220 sctp_free_remote_addr(sp->net);
223 SCTP_PRINTF("sp:%p tcb:%p weird free case\n",
226 sctp_free_a_strmoq(stcb, sp);
227 /* sa_ignore FREED_MEMORY */
228 sp = TAILQ_FIRST(&outs->outqueue);
232 /* cut back the count and abandon the upper streams */
233 asoc->pre_open_streams = newcnt;
235 SCTP_TCB_SEND_UNLOCK(stcb);
236 asoc->streamoutcnt = asoc->pre_open_streams;
238 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
239 #ifdef SCTP_MAP_LOGGING
240 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
242 /* This is the next one we expect */
243 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
245 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
246 asoc->cumulative_tsn = asoc->asconf_seq_in;
247 asoc->last_echo_tsn = asoc->asconf_seq_in;
248 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
249 /* open the requested streams */
250 if (asoc->strmin != NULL) {
251 /* Free the old ones */
252 struct sctp_queued_to_read *ctl;
254 for (i = 0; i < asoc->streamincnt; i++) {
255 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
257 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
258 sctp_free_remote_addr(ctl->whoFrom);
259 sctp_m_freem(ctl->data);
261 sctp_free_a_readq(stcb, ctl);
262 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
265 SCTP_FREE(asoc->strmin);
267 asoc->streamincnt = ntohs(init->num_outbound_streams);
268 if (asoc->streamincnt > MAX_SCTP_STREAMS) {
269 asoc->streamincnt = MAX_SCTP_STREAMS;
271 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
272 sizeof(struct sctp_stream_in), "StreamsIn");
273 if (asoc->strmin == NULL) {
274 /* we didn't get memory for the streams! */
275 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
278 for (i = 0; i < asoc->streamincnt; i++) {
279 asoc->strmin[i].stream_no = i;
280 asoc->strmin[i].last_sequence_delivered = 0xffff;
282 * U-stream ranges will be set when the cookie is unpacked.
283 * Or for the INIT sender they are un set (if pr-sctp not
284 * supported) when the INIT-ACK arrives.
286 TAILQ_INIT(&asoc->strmin[i].inqueue);
287 asoc->strmin[i].delivery_started = 0;
290 * load_address_from_init will put the addresses into the
291 * association when the COOKIE is processed or the INIT-ACK is
292 * processed. Both types of COOKIE's existing and new call this
293 * routine. It will remove addresses that are no longer in the
294 * association (for the restarting case where addresses are
295 * removed). Up front when the INIT arrives we will discard it if it
296 * is a restart and new addresses have been added.
298 /* sa_ignore MEMLEAK */
303 * INIT-ACK message processing/consumption returns value < 0 on error
306 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
307 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
308 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id,
311 struct sctp_association *asoc;
313 int retval, abort_flag;
314 uint32_t initack_limit;
316 /* First verify that we have no illegal param's */
320 op_err = sctp_arethere_unrecognized_parameters(m,
321 (offset + sizeof(struct sctp_init_chunk)),
322 &abort_flag, (struct sctp_chunkhdr *)cp);
324 /* Send an abort and notify peer */
325 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err);
326 *abort_no_unlock = 1;
330 /* process the peer's parameters in the INIT-ACK */
331 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
335 initack_limit = offset + ntohs(cp->ch.chunk_length);
336 /* load all addresses */
337 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
338 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
340 /* Huh, we should abort */
341 SCTPDBG(SCTP_DEBUG_INPUT1,
342 "Load addresses from INIT causes an abort %d\n",
344 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
346 *abort_no_unlock = 1;
349 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
350 stcb->asoc.local_hmacs);
352 sctp_queue_op_err(stcb, op_err);
353 /* queuing will steal away the mbuf chain to the out queue */
356 /* extract the cookie and queue it to "echo" it back... */
357 stcb->asoc.overall_error_count = 0;
358 net->error_count = 0;
361 * Cancel the INIT timer, We do this first before queueing the
362 * cookie. We always cancel at the primary to assue that we are
363 * canceling the timer started by the INIT which always goes to the
366 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
367 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
369 /* calculate the RTO */
370 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered);
372 retval = sctp_send_cookie_echo(m, offset, stcb, net);
375 * No cookie, we probably should send a op error. But in any
376 * case if there is no cookie in the INIT-ACK, we can
377 * abandon the peer, its broke.
380 /* We abort with an error of missing mandatory param */
382 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
385 * Expand beyond to include the mandatory
388 struct sctp_inv_mandatory_param *mp;
390 SCTP_BUF_LEN(op_err) =
391 sizeof(struct sctp_inv_mandatory_param);
393 struct sctp_inv_mandatory_param *);
394 /* Subtract the reserved param */
396 htons(sizeof(struct sctp_inv_mandatory_param) - 2);
397 mp->num_param = htonl(1);
398 mp->param = htons(SCTP_STATE_COOKIE);
401 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
403 *abort_no_unlock = 1;
411 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
412 struct sctp_tcb *stcb, struct sctp_nets *net)
414 struct sockaddr_storage store;
415 struct sockaddr_in *sin;
416 struct sockaddr_in6 *sin6;
417 struct sctp_nets *r_net;
420 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
424 sin = (struct sockaddr_in *)&store;
425 sin6 = (struct sockaddr_in6 *)&store;
427 memset(&store, 0, sizeof(store));
428 if (cp->heartbeat.hb_info.addr_family == AF_INET &&
429 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
430 sin->sin_family = cp->heartbeat.hb_info.addr_family;
431 sin->sin_len = cp->heartbeat.hb_info.addr_len;
432 sin->sin_port = stcb->rport;
433 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
434 sizeof(sin->sin_addr));
435 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
436 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
437 sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
438 sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
439 sin6->sin6_port = stcb->rport;
440 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
441 sizeof(sin6->sin6_addr));
445 r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
447 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
450 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
451 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
452 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
454 * If the its a HB and it's random value is correct when can
455 * confirm the destination.
457 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
458 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
459 stcb->asoc.primary_destination = r_net;
460 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
461 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
462 r_net = TAILQ_FIRST(&stcb->asoc.nets);
463 if (r_net != stcb->asoc.primary_destination) {
465 * first one on the list is NOT the primary
466 * sctp_cmpaddr() is much more efficent if
467 * the primary is the first on the list,
470 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
471 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
474 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
475 stcb, 0, (void *)r_net);
477 r_net->error_count = 0;
478 r_net->hb_responded = 1;
479 tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
480 tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
481 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
482 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
483 r_net->dest_state |= SCTP_ADDR_REACHABLE;
484 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
485 SCTP_HEARTBEAT_SUCCESS, (void *)r_net);
486 /* now was it the primary? if so restore */
487 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
488 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
491 /* Now lets do a RTO with this */
492 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv);
496 sctp_handle_abort(struct sctp_abort_chunk *cp,
497 struct sctp_tcb *stcb, struct sctp_nets *net)
499 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
503 /* stop any receive timers */
504 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
505 /* notify user of the abort and clean up... */
506 sctp_abort_notification(stcb, 0);
508 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
509 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
510 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
511 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
513 #ifdef SCTP_ASOCLOG_OF_TSNS
514 sctp_print_out_track_log(stcb);
516 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
517 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
521 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
522 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
524 struct sctp_association *asoc;
525 int some_on_streamwheel;
527 SCTPDBG(SCTP_DEBUG_INPUT2,
528 "sctp_handle_shutdown: handling SHUTDOWN\n");
532 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
533 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
536 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
537 /* Shutdown NOT the expected size */
540 sctp_update_acked(stcb, cp, net, abort_flag);
542 if (asoc->control_pdapi) {
544 * With a normal shutdown we assume the end of last record.
546 SCTP_INP_READ_LOCK(stcb->sctp_ep);
547 asoc->control_pdapi->end_added = 1;
548 asoc->control_pdapi->pdapi_aborted = 1;
549 asoc->control_pdapi = NULL;
550 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
551 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
553 /* goto SHUTDOWN_RECEIVED state to block new requests */
554 if (stcb->sctp_socket) {
555 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
556 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
557 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
558 asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED;
560 * notify upper layer that peer has initiated a
563 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL);
566 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
569 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
571 * stop the shutdown timer, since we WILL move to
574 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
576 /* Now are we there yet? */
577 some_on_streamwheel = 0;
578 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
579 /* Check to see if some data queued */
580 struct sctp_stream_out *outs;
582 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
583 if (!TAILQ_EMPTY(&outs->outqueue)) {
584 some_on_streamwheel = 1;
589 if (!TAILQ_EMPTY(&asoc->send_queue) ||
590 !TAILQ_EMPTY(&asoc->sent_queue) ||
591 some_on_streamwheel) {
592 /* By returning we will push more data out */
595 /* no outstanding data to send, so move on... */
596 /* send SHUTDOWN-ACK */
597 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
598 /* move to SHUTDOWN-ACK-SENT state */
599 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
600 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
601 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
603 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
605 /* start SHUTDOWN timer */
606 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
612 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
613 struct sctp_tcb *stcb, struct sctp_nets *net)
615 struct sctp_association *asoc;
617 SCTPDBG(SCTP_DEBUG_INPUT2,
618 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
623 /* process according to association state */
624 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
625 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
626 /* unexpected SHUTDOWN-ACK... so ignore... */
627 SCTP_TCB_UNLOCK(stcb);
630 if (asoc->control_pdapi) {
632 * With a normal shutdown we assume the end of last record.
634 SCTP_INP_READ_LOCK(stcb->sctp_ep);
635 asoc->control_pdapi->end_added = 1;
636 asoc->control_pdapi->pdapi_aborted = 1;
637 asoc->control_pdapi = NULL;
638 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
639 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
641 /* are the queues empty? */
642 if (!TAILQ_EMPTY(&asoc->send_queue) ||
643 !TAILQ_EMPTY(&asoc->sent_queue) ||
644 !TAILQ_EMPTY(&asoc->out_wheel)) {
645 sctp_report_all_outbound(stcb, 0);
648 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
649 /* send SHUTDOWN-COMPLETE */
650 sctp_send_shutdown_complete(stcb, net);
651 /* notify upper layer protocol */
652 if (stcb->sctp_socket) {
653 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
654 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
655 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
656 /* Set the connected flag to disconnected */
657 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
660 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
661 /* free the TCB but first save off the ep */
662 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
663 SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
667 * Skip past the param header and then we will find the chunk that caused the
668 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
669 * our peer must be broken.
672 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
673 struct sctp_nets *net)
675 struct sctp_chunkhdr *chk;
677 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
678 switch (chk->chunk_type) {
679 case SCTP_ASCONF_ACK:
681 sctp_asconf_cleanup(stcb, net);
683 case SCTP_FORWARD_CUM_TSN:
684 stcb->asoc.peer_supports_prsctp = 0;
687 SCTPDBG(SCTP_DEBUG_INPUT2,
688 "Peer does not support chunk type %d(%x)??\n",
689 chk->chunk_type, (uint32_t) chk->chunk_type);
695 * Skip past the param header and then we will find the param that caused the
696 * problem. There are a number of param's in a ASCONF OR the prsctp param
697 * these will turn of specific features.
700 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
702 struct sctp_paramhdr *pbad;
705 switch (ntohs(pbad->param_type)) {
707 case SCTP_PRSCTP_SUPPORTED:
708 stcb->asoc.peer_supports_prsctp = 0;
710 case SCTP_SUPPORTED_CHUNK_EXT:
712 /* draft-ietf-tsvwg-addip-sctp */
713 case SCTP_ECN_NONCE_SUPPORTED:
714 stcb->asoc.peer_supports_ecn_nonce = 0;
715 stcb->asoc.ecn_nonce_allowed = 0;
716 stcb->asoc.ecn_allowed = 0;
718 case SCTP_ADD_IP_ADDRESS:
719 case SCTP_DEL_IP_ADDRESS:
720 case SCTP_SET_PRIM_ADDR:
721 stcb->asoc.peer_supports_asconf = 0;
723 case SCTP_SUCCESS_REPORT:
724 case SCTP_ERROR_CAUSE_IND:
725 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
726 SCTPDBG(SCTP_DEBUG_INPUT2,
727 "Turning off ASCONF to this strange peer\n");
728 stcb->asoc.peer_supports_asconf = 0;
731 SCTPDBG(SCTP_DEBUG_INPUT2,
732 "Peer does not support param type %d(%x)??\n",
733 pbad->param_type, (uint32_t) pbad->param_type);
739 sctp_handle_error(struct sctp_chunkhdr *ch,
740 struct sctp_tcb *stcb, struct sctp_nets *net)
743 struct sctp_paramhdr *phdr;
746 struct sctp_association *asoc;
750 /* parse through all of the errors and process */
752 phdr = (struct sctp_paramhdr *)((caddr_t)ch +
753 sizeof(struct sctp_chunkhdr));
754 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
755 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
756 /* Process an Error Cause */
757 error_type = ntohs(phdr->param_type);
758 error_len = ntohs(phdr->param_length);
759 if ((error_len > chklen) || (error_len == 0)) {
760 /* invalid param length for this param */
761 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
765 switch (error_type) {
766 case SCTP_CAUSE_INVALID_STREAM:
767 case SCTP_CAUSE_MISSING_PARAM:
768 case SCTP_CAUSE_INVALID_PARAM:
769 case SCTP_CAUSE_NO_USER_DATA:
770 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
773 case SCTP_CAUSE_STALE_COOKIE:
775 * We only act if we have echoed a cookie and are
778 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
781 p = (int *)((caddr_t)phdr + sizeof(*phdr));
782 /* Save the time doubled */
783 asoc->cookie_preserve_req = ntohl(*p) << 1;
784 asoc->stale_cookie_count++;
785 if (asoc->stale_cookie_count >
786 asoc->max_init_times) {
787 sctp_abort_notification(stcb, 0);
788 /* now free the asoc */
789 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
792 /* blast back to INIT state */
793 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
794 asoc->state |= SCTP_STATE_COOKIE_WAIT;
796 sctp_stop_all_cookie_timers(stcb);
797 sctp_send_initiate(stcb->sctp_ep, stcb);
800 case SCTP_CAUSE_UNRESOLVABLE_ADDR:
802 * Nothing we can do here, we don't do hostname
803 * addresses so if the peer does not like my IPv6
804 * (or IPv4 for that matter) it does not matter. If
805 * they don't support that type of address, they can
806 * NOT possibly get that packet type... i.e. with no
807 * IPv6 you can't recieve a IPv6 packet. so we can
808 * safely ignore this one. If we ever added support
809 * for HOSTNAME Addresses, then we would need to do
813 case SCTP_CAUSE_UNRECOG_CHUNK:
814 sctp_process_unrecog_chunk(stcb, phdr, net);
816 case SCTP_CAUSE_UNRECOG_PARAM:
817 sctp_process_unrecog_param(stcb, phdr);
819 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
821 * We ignore this since the timer will drive out a
822 * new cookie anyway and there timer will drive us
823 * to send a SHUTDOWN_COMPLETE. We can't send one
824 * here since we don't have their tag.
827 case SCTP_CAUSE_DELETING_LAST_ADDR:
828 case SCTP_CAUSE_RESOURCE_SHORTAGE:
829 case SCTP_CAUSE_DELETING_SRC_ADDR:
831 * We should NOT get these here, but in a
834 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
837 case SCTP_CAUSE_OUT_OF_RESC:
839 * And what, pray tell do we do with the fact that
840 * the peer is out of resources? Not really sure we
841 * could do anything but abort. I suspect this
842 * should have came WITH an abort instead of in a
847 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
851 adjust = SCTP_SIZE32(error_len);
853 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
859 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
860 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
861 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id,
864 struct sctp_init_ack *init_ack;
868 SCTPDBG(SCTP_DEBUG_INPUT2,
869 "sctp_handle_init_ack: handling INIT-ACK\n");
872 SCTPDBG(SCTP_DEBUG_INPUT2,
873 "sctp_handle_init_ack: TCB is null\n");
876 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
878 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
879 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
881 *abort_no_unlock = 1;
884 init_ack = &cp->init;
885 /* validate parameters */
886 if (init_ack->initiate_tag == 0) {
887 /* protocol error... send an abort */
888 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
889 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
891 *abort_no_unlock = 1;
894 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
895 /* protocol error... send an abort */
896 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
897 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
899 *abort_no_unlock = 1;
902 if (init_ack->num_inbound_streams == 0) {
903 /* protocol error... send an abort */
904 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
905 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
907 *abort_no_unlock = 1;
910 if (init_ack->num_outbound_streams == 0) {
911 /* protocol error... send an abort */
912 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
913 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
915 *abort_no_unlock = 1;
918 /* process according to association state... */
919 state = &stcb->asoc.state;
920 switch (*state & SCTP_STATE_MASK) {
921 case SCTP_STATE_COOKIE_WAIT:
922 /* this is the expected state for this chunk */
923 /* process the INIT-ACK parameters */
924 if (stcb->asoc.primary_destination->dest_state &
925 SCTP_ADDR_UNCONFIRMED) {
927 * The primary is where we sent the INIT, we can
928 * always consider it confirmed when the INIT-ACK is
929 * returned. Do this before we load addresses
932 stcb->asoc.primary_destination->dest_state &=
933 ~SCTP_ADDR_UNCONFIRMED;
934 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
935 stcb, 0, (void *)stcb->asoc.primary_destination);
937 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
938 net, abort_no_unlock, vrf_id,
940 /* error in parsing parameters */
943 /* update our state */
944 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
945 if (*state & SCTP_STATE_SHUTDOWN_PENDING) {
946 *state = SCTP_STATE_COOKIE_ECHOED |
947 SCTP_STATE_SHUTDOWN_PENDING;
949 *state = SCTP_STATE_COOKIE_ECHOED;
952 /* reset the RTO calc */
953 stcb->asoc.overall_error_count = 0;
954 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
956 * collapse the init timer back in case of a exponential
959 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
962 * the send at the end of the inbound data processing will
963 * cause the cookie to be sent
966 case SCTP_STATE_SHUTDOWN_SENT:
967 /* incorrect state... discard */
969 case SCTP_STATE_COOKIE_ECHOED:
970 /* incorrect state... discard */
972 case SCTP_STATE_OPEN:
973 /* incorrect state... discard */
975 case SCTP_STATE_EMPTY:
976 case SCTP_STATE_INUSE:
978 /* incorrect state... discard */
982 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
988 * handle a state cookie for an existing association m: input packet mbuf
989 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
990 * "split" mbuf and the cookie signature does not exist offset: offset into
991 * mbuf to the cookie-echo chunk
993 static struct sctp_tcb *
994 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
995 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
996 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
997 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
998 uint32_t vrf_id, uint32_t table_id)
1000 struct sctp_association *asoc;
1001 struct sctp_init_chunk *init_cp, init_buf;
1002 struct sctp_init_ack_chunk *initack_cp, initack_buf;
1004 int init_offset, initack_offset, i;
1009 /* I know that the TCB is non-NULL from the caller */
1011 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1012 if (asoc->cookie_how[how_indx] == 0)
1015 if (how_indx < sizeof(asoc->cookie_how)) {
1016 asoc->cookie_how[how_indx] = 1;
1018 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1019 /* SHUTDOWN came in after sending INIT-ACK */
1020 struct mbuf *op_err;
1021 struct sctp_paramhdr *ph;
1023 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1024 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1025 0, M_DONTWAIT, 1, MT_DATA);
1026 if (op_err == NULL) {
1030 /* pre-reserve some space */
1031 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1032 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1033 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1035 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1036 ph = mtod(op_err, struct sctp_paramhdr *);
1037 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1038 ph->param_length = htons(sizeof(struct sctp_paramhdr));
1039 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1041 if (how_indx < sizeof(asoc->cookie_how))
1042 asoc->cookie_how[how_indx] = 2;
1046 * find and validate the INIT chunk in the cookie (peer's info) the
1047 * INIT should start after the cookie-echo header struct (chunk
1048 * header, state cookie header struct)
1050 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1052 init_cp = (struct sctp_init_chunk *)
1053 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1054 (uint8_t *) & init_buf);
1055 if (init_cp == NULL) {
1056 /* could not pull a INIT chunk in cookie */
1059 chk_length = ntohs(init_cp->ch.chunk_length);
1060 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1064 * find and validate the INIT-ACK chunk in the cookie (my info) the
1065 * INIT-ACK follows the INIT chunk
1067 initack_offset = init_offset + SCTP_SIZE32(chk_length);
1068 initack_cp = (struct sctp_init_ack_chunk *)
1069 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1070 (uint8_t *) & initack_buf);
1071 if (initack_cp == NULL) {
1072 /* could not pull INIT-ACK chunk in cookie */
1075 chk_length = ntohs(initack_cp->ch.chunk_length);
1076 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1079 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1080 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1082 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1083 * to get into the OPEN state
1085 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1087 panic("Case D and non-match seq?");
1089 SCTP_PRINTF("Case D, seq non-match %x vs %x?\n",
1090 ntohl(initack_cp->init.initial_tsn),
1091 asoc->init_seq_number);
1094 switch SCTP_GET_STATE
1096 case SCTP_STATE_COOKIE_WAIT:
1097 case SCTP_STATE_COOKIE_ECHOED:
1099 * INIT was sent but got a COOKIE_ECHO with the
1100 * correct tags... just accept it...but we must
1101 * process the init so that we can make sure we have
1102 * the right seq no's.
1104 /* First we must process the INIT !! */
1105 retval = sctp_process_init(init_cp, stcb, net);
1107 if (how_indx < sizeof(asoc->cookie_how))
1108 asoc->cookie_how[how_indx] = 3;
1111 /* we have already processed the INIT so no problem */
1112 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1113 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1114 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1115 /* update current state */
1116 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1117 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1119 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1120 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1121 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1122 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1123 stcb->sctp_ep, stcb, asoc->primary_destination);
1126 /* if ok, move to OPEN state */
1127 asoc->state = SCTP_STATE_OPEN;
1129 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1130 sctp_stop_all_cookie_timers(stcb);
1131 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1132 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1133 (inp->sctp_socket->so_qlimit == 0)
1136 * Here is where collision would go if we
1137 * did a connect() and instead got a
1138 * init/init-ack/cookie done before the
1139 * init-ack came back..
1141 stcb->sctp_ep->sctp_flags |=
1142 SCTP_PCB_FLAGS_CONNECTED;
1143 soisconnected(stcb->sctp_ep->sctp_socket);
1145 /* notify upper layer */
1146 *notification = SCTP_NOTIFY_ASSOC_UP;
1148 * since we did not send a HB make sure we don't
1151 net->hb_responded = 1;
1153 if (stcb->asoc.sctp_autoclose_ticks &&
1154 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1155 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1161 * we're in the OPEN state (or beyond), so peer must
1162 * have simply lost the COOKIE-ACK
1166 sctp_stop_all_cookie_timers(stcb);
1168 * We ignore the return code here.. not sure if we should
1169 * somehow abort.. but we do have an existing asoc. This
1170 * really should not fail.
1172 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1173 init_offset + sizeof(struct sctp_init_chunk),
1174 initack_offset, sh, init_src)) {
1175 if (how_indx < sizeof(asoc->cookie_how))
1176 asoc->cookie_how[how_indx] = 4;
1179 /* respond with a COOKIE-ACK */
1180 sctp_toss_old_cookies(stcb, asoc);
1181 sctp_send_cookie_ack(stcb);
1182 if (how_indx < sizeof(asoc->cookie_how))
1183 asoc->cookie_how[how_indx] = 5;
1186 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1187 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1188 cookie->tie_tag_my_vtag == 0 &&
1189 cookie->tie_tag_peer_vtag == 0) {
1191 * case C in Section 5.2.4 Table 2: XMOO silently discard
1193 if (how_indx < sizeof(asoc->cookie_how))
1194 asoc->cookie_how[how_indx] = 6;
1197 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
1198 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
1199 init_cp->init.initiate_tag == 0)) {
1201 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1202 * should be ok, re-accept peer info
1204 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1206 * Extension of case C. If we hit this, then the
1207 * random number generator returned the same vtag
1208 * when we first sent our INIT-ACK and when we later
1209 * sent our INIT. The side with the seq numbers that
1210 * are different will be the one that normnally
1211 * would have hit case C. This in effect "extends"
1212 * our vtags in this collision case to be 64 bits.
1213 * The same collision could occur aka you get both
1214 * vtag and seq number the same twice in a row.. but
1215 * is much less likely. If it did happen then we
1216 * would proceed through and bring up the assoc.. we
1217 * may end up with the wrong stream setup however..
1218 * which would be bad.. but there is no way to
1219 * tell.. until we send on a stream that does not
1222 if (how_indx < sizeof(asoc->cookie_how))
1223 asoc->cookie_how[how_indx] = 7;
1227 if (how_indx < sizeof(asoc->cookie_how))
1228 asoc->cookie_how[how_indx] = 8;
1229 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1230 sctp_stop_all_cookie_timers(stcb);
1232 * since we did not send a HB make sure we don't double
1235 net->hb_responded = 1;
1236 if (stcb->asoc.sctp_autoclose_ticks &&
1237 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1238 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1241 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1242 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1244 /* Note last_cwr_tsn? where is this used? */
1245 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1246 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1248 * Ok the peer probably discarded our data (if we
1249 * echoed a cookie+data). So anything on the
1250 * sent_queue should be marked for retransmit, we
1251 * may not get something to kick us so it COULD
1252 * still take a timeout to move these.. but it can't
1253 * hurt to mark them.
1255 struct sctp_tmit_chunk *chk;
1257 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1258 if (chk->sent < SCTP_DATAGRAM_RESEND) {
1259 chk->sent = SCTP_DATAGRAM_RESEND;
1260 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1266 /* process the INIT info (peer's info) */
1267 retval = sctp_process_init(init_cp, stcb, net);
1269 if (how_indx < sizeof(asoc->cookie_how))
1270 asoc->cookie_how[how_indx] = 9;
1273 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1274 init_offset + sizeof(struct sctp_init_chunk),
1275 initack_offset, sh, init_src)) {
1276 if (how_indx < sizeof(asoc->cookie_how))
1277 asoc->cookie_how[how_indx] = 10;
1280 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1281 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1282 *notification = SCTP_NOTIFY_ASSOC_UP;
1284 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1285 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1286 (inp->sctp_socket->so_qlimit == 0)) {
1287 stcb->sctp_ep->sctp_flags |=
1288 SCTP_PCB_FLAGS_CONNECTED;
1289 soisconnected(stcb->sctp_ep->sctp_socket);
1291 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1292 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1294 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1295 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1296 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1297 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1298 SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1300 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1302 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1303 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1304 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1305 stcb->sctp_ep, stcb, asoc->primary_destination);
1308 asoc->state = SCTP_STATE_OPEN;
1310 sctp_stop_all_cookie_timers(stcb);
1311 sctp_toss_old_cookies(stcb, asoc);
1312 sctp_send_cookie_ack(stcb);
1315 * only if we have retrans set do we do this. What
1316 * this call does is get only the COOKIE-ACK out and
1317 * then when we return the normal call to
1318 * sctp_chunk_output will get the retrans out behind
1321 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK);
1323 if (how_indx < sizeof(asoc->cookie_how))
1324 asoc->cookie_how[how_indx] = 11;
1328 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1329 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1330 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1331 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1332 cookie->tie_tag_peer_vtag != 0) {
1333 struct sctpasochead *head;
1336 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1339 if (how_indx < sizeof(asoc->cookie_how))
1340 asoc->cookie_how[how_indx] = 12;
1341 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1342 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1344 *sac_assoc_id = sctp_get_associd(stcb);
1345 /* notify upper layer */
1346 *notification = SCTP_NOTIFY_ASSOC_RESTART;
1347 atomic_add_int(&stcb->asoc.refcnt, 1);
1348 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1349 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1350 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1351 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1353 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1354 SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1355 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1356 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1358 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1359 asoc->state = SCTP_STATE_OPEN |
1360 SCTP_STATE_SHUTDOWN_PENDING;
1361 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1362 stcb->sctp_ep, stcb, asoc->primary_destination);
1364 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1365 /* move to OPEN state, if not in SHUTDOWN_SENT */
1366 asoc->state = SCTP_STATE_OPEN;
1368 asoc->pre_open_streams =
1369 ntohs(initack_cp->init.num_outbound_streams);
1370 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1371 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1373 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1374 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1376 asoc->str_reset_seq_in = asoc->init_seq_number;
1378 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1379 if (asoc->mapping_array)
1380 memset(asoc->mapping_array, 0,
1381 asoc->mapping_array_size);
1382 SCTP_TCB_UNLOCK(stcb);
1383 SCTP_INP_INFO_WLOCK();
1384 SCTP_INP_WLOCK(stcb->sctp_ep);
1385 SCTP_TCB_LOCK(stcb);
1386 atomic_add_int(&stcb->asoc.refcnt, -1);
1387 /* send up all the data */
1388 SCTP_TCB_SEND_LOCK(stcb);
1390 sctp_report_all_outbound(stcb, 1);
1391 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1392 stcb->asoc.strmout[i].stream_no = i;
1393 stcb->asoc.strmout[i].next_sequence_sent = 0;
1394 stcb->asoc.strmout[i].last_msg_incomplete = 0;
1396 /* process the INIT-ACK info (my info) */
1397 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1398 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1400 /* pull from vtag hash */
1401 LIST_REMOVE(stcb, sctp_asocs);
1402 /* re-insert to new vtag position */
1403 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1404 sctppcbinfo.hashasocmark)];
1406 * put it in the bucket in the vtag hash of assoc's for the
1409 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1411 /* Is this the first restart? */
1412 if (stcb->asoc.in_restart_hash == 0) {
1413 /* Ok add it to assoc_id vtag hash */
1414 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
1415 sctppcbinfo.hashrestartmark)];
1416 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
1417 stcb->asoc.in_restart_hash = 1;
1419 /* process the INIT info (peer's info) */
1420 SCTP_TCB_SEND_UNLOCK(stcb);
1421 SCTP_INP_WUNLOCK(stcb->sctp_ep);
1422 SCTP_INP_INFO_WUNLOCK();
1424 retval = sctp_process_init(init_cp, stcb, net);
1426 if (how_indx < sizeof(asoc->cookie_how))
1427 asoc->cookie_how[how_indx] = 13;
1432 * since we did not send a HB make sure we don't double
1435 net->hb_responded = 1;
1437 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1438 init_offset + sizeof(struct sctp_init_chunk),
1439 initack_offset, sh, init_src)) {
1440 if (how_indx < sizeof(asoc->cookie_how))
1441 asoc->cookie_how[how_indx] = 14;
1445 /* respond with a COOKIE-ACK */
1446 sctp_stop_all_cookie_timers(stcb);
1447 sctp_toss_old_cookies(stcb, asoc);
1448 sctp_send_cookie_ack(stcb);
1449 if (how_indx < sizeof(asoc->cookie_how))
1450 asoc->cookie_how[how_indx] = 15;
1454 if (how_indx < sizeof(asoc->cookie_how))
1455 asoc->cookie_how[how_indx] = 16;
1456 /* all other cases... */
1462 * handle a state cookie for a new association m: input packet mbuf chain--
1463 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1464 * and the cookie signature does not exist offset: offset into mbuf to the
1465 * cookie-echo chunk length: length of the cookie chunk to: where the init
1466 * was from returns a new TCB
1468 static struct sctp_tcb *
1469 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1470 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1471 struct sctp_inpcb *inp, struct sctp_nets **netp,
1472 struct sockaddr *init_src, int *notification,
1473 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1474 uint32_t vrf_id, uint32_t table_id)
1476 struct sctp_tcb *stcb;
1477 struct sctp_init_chunk *init_cp, init_buf;
1478 struct sctp_init_ack_chunk *initack_cp, initack_buf;
1479 struct sockaddr_storage sa_store;
1480 struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1481 struct sockaddr_in *sin;
1482 struct sockaddr_in6 *sin6;
1483 struct sctp_association *asoc;
1485 int init_offset, initack_offset, initack_limit;
1489 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1492 * find and validate the INIT chunk in the cookie (peer's info) the
1493 * INIT should start after the cookie-echo header struct (chunk
1494 * header, state cookie header struct)
1496 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1497 init_cp = (struct sctp_init_chunk *)
1498 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1499 (uint8_t *) & init_buf);
1500 if (init_cp == NULL) {
1501 /* could not pull a INIT chunk in cookie */
1502 SCTPDBG(SCTP_DEBUG_INPUT1,
1503 "process_cookie_new: could not pull INIT chunk hdr\n");
1506 chk_length = ntohs(init_cp->ch.chunk_length);
1507 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1508 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1511 initack_offset = init_offset + SCTP_SIZE32(chk_length);
1513 * find and validate the INIT-ACK chunk in the cookie (my info) the
1514 * INIT-ACK follows the INIT chunk
1516 initack_cp = (struct sctp_init_ack_chunk *)
1517 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1518 (uint8_t *) & initack_buf);
1519 if (initack_cp == NULL) {
1520 /* could not pull INIT-ACK chunk in cookie */
1521 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1524 chk_length = ntohs(initack_cp->ch.chunk_length);
1525 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1529 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1530 * "initack_limit" value. This is because the chk_length field
1531 * includes the length of the cookie, but the cookie is omitted when
1532 * the INIT and INIT_ACK are tacked onto the cookie...
1534 initack_limit = offset + cookie_len;
1537 * now that we know the INIT/INIT-ACK are in place, create a new TCB
1540 stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
1541 ntohl(initack_cp->init.initiate_tag), vrf_id);
1543 struct mbuf *op_err;
1545 /* memory problem? */
1546 SCTPDBG(SCTP_DEBUG_INPUT1,
1547 "process_cookie_new: no room for another TCB!\n");
1548 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1549 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1550 sh, op_err, vrf_id, table_id);
1553 /* get the correct sctp_nets */
1555 *netp = sctp_findnet(stcb, init_src);
1558 /* save the table id (vrf_id is done in aloc_assoc) */
1559 asoc->table_id = table_id;
1560 /* get scope variables out of cookie */
1561 asoc->ipv4_local_scope = cookie->ipv4_scope;
1562 asoc->site_scope = cookie->site_scope;
1563 asoc->local_scope = cookie->local_scope;
1564 asoc->loopback_scope = cookie->loopback_scope;
1566 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
1567 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
1568 struct mbuf *op_err;
1571 * Houston we have a problem. The EP changed while the
1572 * cookie was in flight. Only recourse is to abort the
1575 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1576 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1577 sh, op_err, vrf_id, table_id);
1580 /* process the INIT-ACK info (my info) */
1581 old_tag = asoc->my_vtag;
1582 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1583 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1584 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1585 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1586 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1587 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1588 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1589 asoc->str_reset_seq_in = asoc->init_seq_number;
1591 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1593 /* process the INIT info (peer's info) */
1595 retval = sctp_process_init(init_cp, stcb, *netp);
1599 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1602 /* load all addresses */
1603 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1604 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
1606 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1610 * verify any preceding AUTH chunk that was skipped
1612 /* pull the local authentication parameters from the cookie/init-ack */
1613 sctp_auth_get_cookie_params(stcb, m,
1614 initack_offset + sizeof(struct sctp_init_ack_chunk),
1615 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
1617 struct sctp_auth_chunk *auth;
1619 auth = (struct sctp_auth_chunk *)
1620 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
1621 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
1622 /* auth HMAC failed, dump the assoc and packet */
1623 SCTPDBG(SCTP_DEBUG_AUTH1,
1624 "COOKIE-ECHO: AUTH failed\n");
1625 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
1628 /* remaining chunks checked... good to go */
1629 stcb->asoc.authenticated = 1;
1632 /* update current state */
1633 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
1634 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1635 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1636 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1637 stcb->sctp_ep, stcb, asoc->primary_destination);
1639 asoc->state = SCTP_STATE_OPEN;
1641 sctp_stop_all_cookie_timers(stcb);
1642 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
1643 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1646 * if we're doing ASCONFs, check to see if we have any new local
1647 * addresses that need to get added to the peer (eg. addresses
1648 * changed while cookie echo in flight). This needs to be done
1649 * after we go to the OPEN state to do the correct asconf
1650 * processing. else, make sure we have the correct addresses in our
1654 /* warning, we re-use sin, sin6, sa_store here! */
1655 /* pull in local_address (our "from" address) */
1656 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
1657 /* source addr is IPv4 */
1658 sin = (struct sockaddr_in *)initack_src;
1659 memset(sin, 0, sizeof(*sin));
1660 sin->sin_family = AF_INET;
1661 sin->sin_len = sizeof(struct sockaddr_in);
1662 sin->sin_addr.s_addr = cookie->laddress[0];
1663 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
1664 /* source addr is IPv6 */
1665 sin6 = (struct sockaddr_in6 *)initack_src;
1666 memset(sin6, 0, sizeof(*sin6));
1667 sin6->sin6_family = AF_INET6;
1668 sin6->sin6_len = sizeof(struct sockaddr_in6);
1669 sin6->sin6_scope_id = cookie->scope_id;
1670 memcpy(&sin6->sin6_addr, cookie->laddress,
1671 sizeof(sin6->sin6_addr));
1673 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
1677 sctp_check_address_list(stcb, m,
1678 initack_offset + sizeof(struct sctp_init_ack_chunk),
1679 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
1680 initack_src, cookie->local_scope, cookie->site_scope,
1681 cookie->ipv4_scope, cookie->loopback_scope);
1684 /* set up to notify upper layer */
1685 *notification = SCTP_NOTIFY_ASSOC_UP;
1686 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1687 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1688 (inp->sctp_socket->so_qlimit == 0)) {
1690 * This is an endpoint that called connect() how it got a
1691 * cookie that is NEW is a bit of a mystery. It must be that
1692 * the INIT was sent, but before it got there.. a complete
1693 * INIT/INIT-ACK/COOKIE arrived. But of course then it
1694 * should have went to the other code.. not here.. oh well..
1695 * a bit of protection is worth having..
1697 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1698 soisconnected(stcb->sctp_ep->sctp_socket);
1699 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1700 (inp->sctp_socket->so_qlimit)) {
1702 * We don't want to do anything with this one. Since it is
1703 * the listening guy. The timer will get started for
1704 * accepted connections in the caller.
1708 /* since we did not send a HB make sure we don't double things */
1709 if ((netp) && (*netp))
1710 (*netp)->hb_responded = 1;
1712 if (stcb->asoc.sctp_autoclose_ticks &&
1713 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1714 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
1716 /* respond with a COOKIE-ACK */
1717 /* calculate the RTT */
1718 if ((netp) && (*netp))
1719 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
1720 &cookie->time_entered);
1721 sctp_send_cookie_ack(stcb);
1727 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
1728 * existing (non-NULL) TCB
1730 static struct mbuf *
1731 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
1732 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
1733 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
1734 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1735 struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint32_t table_id)
1737 struct sctp_state_cookie *cookie;
1738 struct sockaddr_in6 sin6;
1739 struct sockaddr_in sin;
1740 struct sctp_tcb *l_stcb = *stcb;
1741 struct sctp_inpcb *l_inp;
1742 struct sockaddr *to;
1743 sctp_assoc_t sac_restart_id;
1744 struct sctp_pcb *ep;
1746 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
1748 uint8_t cookie_ok = 0;
1749 unsigned int size_of_pkt, sig_offset, cookie_offset;
1750 unsigned int cookie_len;
1752 struct timeval time_expires;
1753 struct sockaddr_storage dest_store;
1754 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
1756 int notification = 0;
1757 struct sctp_nets *netl;
1758 int had_a_existing_tcb = 0;
1760 SCTPDBG(SCTP_DEBUG_INPUT2,
1761 "sctp_handle_cookie: handling COOKIE-ECHO\n");
1763 if (inp_p == NULL) {
1766 /* First get the destination address setup too. */
1767 iph = mtod(m, struct ip *);
1768 if (iph->ip_v == IPVERSION) {
1770 struct sockaddr_in *sin;
1772 sin = (struct sockaddr_in *)(localep_sa);
1773 memset(sin, 0, sizeof(*sin));
1774 sin->sin_family = AF_INET;
1775 sin->sin_len = sizeof(*sin);
1776 sin->sin_port = sh->dest_port;
1777 sin->sin_addr.s_addr = iph->ip_dst.s_addr;
1778 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
1779 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1781 struct ip6_hdr *ip6;
1782 struct sockaddr_in6 *sin6;
1784 sin6 = (struct sockaddr_in6 *)(localep_sa);
1785 memset(sin6, 0, sizeof(*sin6));
1786 sin6->sin6_family = AF_INET6;
1787 sin6->sin6_len = sizeof(struct sockaddr_in6);
1788 ip6 = mtod(m, struct ip6_hdr *);
1789 sin6->sin6_port = sh->dest_port;
1790 sin6->sin6_addr = ip6->ip6_dst;
1791 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
1796 cookie = &cp->cookie;
1797 cookie_offset = offset + sizeof(struct sctp_chunkhdr);
1798 cookie_len = ntohs(cp->ch.chunk_length);
1800 if ((cookie->peerport != sh->src_port) &&
1801 (cookie->myport != sh->dest_port) &&
1802 (cookie->my_vtag != sh->v_tag)) {
1804 * invalid ports or bad tag. Note that we always leave the
1805 * v_tag in the header in network order and when we stored
1806 * it in the my_vtag slot we also left it in network order.
1807 * This maintains the match even though it may be in the
1808 * opposite byte order of the machine :->
1812 if (cookie_len > size_of_pkt ||
1813 cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
1814 sizeof(struct sctp_init_chunk) +
1815 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
1816 /* cookie too long! or too small */
1820 * split off the signature into its own mbuf (since it should not be
1821 * calculated in the sctp_hmac_m() call).
1823 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
1824 if (sig_offset > size_of_pkt) {
1825 /* packet not correct size! */
1826 /* XXX this may already be accounted for earlier... */
1829 m_sig = m_split(m, sig_offset, M_DONTWAIT);
1830 if (m_sig == NULL) {
1831 /* out of memory or ?? */
1835 * compute the signature/digest for the cookie
1837 ep = &(*inp_p)->sctp_ep;
1840 SCTP_TCB_UNLOCK(l_stcb);
1842 SCTP_INP_RLOCK(l_inp);
1844 SCTP_TCB_LOCK(l_stcb);
1846 /* which cookie is it? */
1847 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
1848 (ep->current_secret_number != ep->last_secret_number)) {
1849 /* it's the old cookie */
1850 (void)sctp_hmac_m(SCTP_HMAC,
1851 (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1852 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig);
1854 /* it's the current cookie */
1855 (void)sctp_hmac_m(SCTP_HMAC,
1856 (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
1857 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig);
1859 /* get the signature */
1860 SCTP_INP_RUNLOCK(l_inp);
1861 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
1863 /* couldn't find signature */
1864 sctp_m_freem(m_sig);
1867 /* compare the received digest with the computed digest */
1868 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
1869 /* try the old cookie? */
1870 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
1871 (ep->current_secret_number != ep->last_secret_number)) {
1872 /* compute digest with old */
1873 (void)sctp_hmac_m(SCTP_HMAC,
1874 (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1875 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig);
1877 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
1885 * Now before we continue we must reconstruct our mbuf so that
1886 * normal processing of any other chunks will work.
1892 while (SCTP_BUF_NEXT(m_at) != NULL) {
1893 m_at = SCTP_BUF_NEXT(m_at);
1895 SCTP_BUF_NEXT(m_at) = m_sig;
1898 if (cookie_ok == 0) {
1899 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
1900 SCTPDBG(SCTP_DEBUG_INPUT2,
1901 "offset = %u, cookie_offset = %u, sig_offset = %u\n",
1902 (uint32_t) offset, cookie_offset, sig_offset);
1906 * check the cookie timestamps to be sure it's not stale
1908 (void)SCTP_GETTIME_TIMEVAL(&now);
1909 /* Expire time is in Ticks, so we convert to seconds */
1910 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
1911 time_expires.tv_usec = cookie->time_entered.tv_usec;
1912 if (timevalcmp(&now, &time_expires, >)) {
1913 /* cookie is stale! */
1914 struct mbuf *op_err;
1915 struct sctp_stale_cookie_msg *scm;
1918 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
1919 0, M_DONTWAIT, 1, MT_DATA);
1920 if (op_err == NULL) {
1924 /* pre-reserve some space */
1925 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1926 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1927 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1930 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
1931 scm = mtod(op_err, struct sctp_stale_cookie_msg *);
1932 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
1933 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
1934 (sizeof(uint32_t))));
1935 /* seconds to usec */
1936 tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
1939 tim = now.tv_usec - cookie->time_entered.tv_usec;
1940 scm->time_usec = htonl(tim);
1941 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1946 * Now we must see with the lookup address if we have an existing
1947 * asoc. This will only happen if we were in the COOKIE-WAIT state
1948 * and a INIT collided with us and somewhere the peer sent the
1949 * cookie on another address besides the single address our assoc
1950 * had for him. In this case we will have one of the tie-tags set at
1951 * least AND the address field in the cookie can be used to look it
1955 if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
1956 memset(&sin6, 0, sizeof(sin6));
1957 sin6.sin6_family = AF_INET6;
1958 sin6.sin6_len = sizeof(sin6);
1959 sin6.sin6_port = sh->src_port;
1960 sin6.sin6_scope_id = cookie->scope_id;
1961 memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
1962 sizeof(sin6.sin6_addr.s6_addr));
1963 to = (struct sockaddr *)&sin6;
1964 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
1965 memset(&sin, 0, sizeof(sin));
1966 sin.sin_family = AF_INET;
1967 sin.sin_len = sizeof(sin);
1968 sin.sin_port = sh->src_port;
1969 sin.sin_addr.s_addr = cookie->address[0];
1970 to = (struct sockaddr *)&sin;
1972 /* This should not happen */
1975 if ((*stcb == NULL) && to) {
1976 /* Yep, lets check */
1977 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
1978 if (*stcb == NULL) {
1980 * We should have only got back the same inp. If we
1981 * got back a different ep we have a problem. The
1982 * original findep got back l_inp and now
1984 if (l_inp != *inp_p) {
1985 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
1988 if (*locked_tcb == NULL) {
1990 * In this case we found the assoc only
1991 * after we locked the create lock. This
1992 * means we are in a colliding case and we
1993 * must make sure that we unlock the tcb if
1994 * its one of the cases where we throw away
1995 * the incoming packets.
1997 *locked_tcb = *stcb;
2000 * We must also increment the inp ref count
2001 * since the ref_count flags was set when we
2002 * did not find the TCB, now we found it
2003 * which reduces the refcount.. we must
2004 * raise it back out to balance it all :-)
2006 SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2007 if ((*stcb)->sctp_ep != l_inp) {
2008 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2009 (*stcb)->sctp_ep, l_inp);
2017 cookie_len -= SCTP_SIGNATURE_SIZE;
2018 if (*stcb == NULL) {
2019 /* this is the "normal" case... get a new TCB */
2020 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2021 cookie_len, *inp_p, netp, to, ¬ification,
2022 auth_skipped, auth_offset, auth_len, vrf_id, table_id);
2024 /* this is abnormal... cookie-echo on existing TCB */
2025 had_a_existing_tcb = 1;
2026 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2027 cookie, cookie_len, *inp_p, *stcb, *netp, to, ¬ification,
2028 &sac_restart_id, vrf_id, table_id);
2031 if (*stcb == NULL) {
2032 /* still no TCB... must be bad cookie-echo */
2036 * Ok, we built an association so confirm the address we sent the
2039 netl = sctp_findnet(*stcb, to);
2041 * This code should in theory NOT run but
2044 /* TSNH! Huh, why do I need to add this address here? */
2047 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2048 SCTP_IN_COOKIE_PROC);
2049 netl = sctp_findnet(*stcb, to);
2052 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2053 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2054 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2056 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2057 (*stcb), 0, (void *)netl);
2061 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2064 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2065 if (!had_a_existing_tcb ||
2066 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2068 * If we have a NEW cookie or the connect never
2069 * reached the connected state during collision we
2070 * must do the TCP accept thing.
2072 struct socket *so, *oso;
2073 struct sctp_inpcb *inp;
2075 if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2077 * For a restart we will keep the same
2078 * socket, no need to do anything. I THINK!!
2080 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id);
2083 oso = (*inp_p)->sctp_socket;
2085 * We do this to keep the sockets side happy durin
2086 * the sonewcon ONLY.
2089 SCTP_TCB_UNLOCK((*stcb));
2090 so = sonewconn(oso, 0
2093 SCTP_INP_WLOCK((*stcb)->sctp_ep);
2094 SCTP_TCB_LOCK((*stcb));
2095 SCTP_INP_WUNLOCK((*stcb)->sctp_ep);
2097 struct mbuf *op_err;
2099 /* Too many sockets */
2100 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2101 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2102 sctp_abort_association(*inp_p, NULL, m, iphlen,
2105 sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2108 inp = (struct sctp_inpcb *)so->so_pcb;
2109 SCTP_INP_INCR_REF(inp);
2111 * We add the unbound flag here so that if we get an
2112 * soabort() before we get the move_pcb done, we
2113 * will properly cleanup.
2115 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2116 SCTP_PCB_FLAGS_CONNECTED |
2117 SCTP_PCB_FLAGS_IN_TCPPOOL |
2118 SCTP_PCB_FLAGS_UNBOUND |
2119 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2120 SCTP_PCB_FLAGS_DONT_WAKE);
2121 inp->sctp_features = (*inp_p)->sctp_features;
2122 inp->sctp_socket = so;
2123 inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2124 inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2125 inp->sctp_context = (*inp_p)->sctp_context;
2126 inp->inp_starting_point_for_iterator = NULL;
2128 * copy in the authentication parameters from the
2131 if (inp->sctp_ep.local_hmacs)
2132 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2133 inp->sctp_ep.local_hmacs =
2134 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2135 if (inp->sctp_ep.local_auth_chunks)
2136 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2137 inp->sctp_ep.local_auth_chunks =
2138 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2139 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
2140 &inp->sctp_ep.shared_keys);
2143 * Now we must move it from one hash table to
2144 * another and get the tcb in the right place.
2146 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2148 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2149 SCTP_TCB_UNLOCK((*stcb));
2151 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
2152 SCTP_TCB_LOCK((*stcb));
2153 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2157 * now we must check to see if we were aborted while
2158 * the move was going on and the lock/unlock
2161 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2163 * yep it was, we leave the assoc attached
2164 * to the socket since the sctp_inpcb_free()
2165 * call will send an abort for us.
2167 SCTP_INP_DECR_REF(inp);
2170 SCTP_INP_DECR_REF(inp);
2171 /* Switch over to the new guy */
2173 sctp_ulp_notify(notification, *stcb, 0, NULL);
2176 * Pull it from the incomplete queue and wake the
2183 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2184 sctp_ulp_notify(notification, *stcb, 0, NULL);
2190 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2191 struct sctp_tcb *stcb, struct sctp_nets *net)
2193 /* cp must not be used, others call this without a c-ack :-) */
2194 struct sctp_association *asoc;
2196 SCTPDBG(SCTP_DEBUG_INPUT2,
2197 "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2203 sctp_stop_all_cookie_timers(stcb);
2204 /* process according to association state */
2205 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2206 /* state change only needed when I am in right state */
2207 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2208 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2209 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
2210 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2211 stcb->sctp_ep, stcb, asoc->primary_destination);
2214 asoc->state = SCTP_STATE_OPEN;
2217 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2218 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2219 if (asoc->overall_error_count == 0) {
2220 net->RTO = sctp_calculate_rto(stcb, asoc, net,
2221 &asoc->time_entered);
2223 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2224 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL);
2225 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2226 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2227 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2228 soisconnected(stcb->sctp_ep->sctp_socket);
2230 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2233 * since we did not send a HB make sure we don't double
2236 net->hb_responded = 1;
2238 if (stcb->asoc.sctp_autoclose_ticks &&
2239 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2240 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2241 stcb->sctp_ep, stcb, NULL);
2244 * set ASCONF timer if ASCONFs are pending and allowed (eg.
2245 * addresses changed when init/cookie echo in flight)
2247 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2248 (stcb->asoc.peer_supports_asconf) &&
2249 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2250 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2251 stcb->sctp_ep, stcb,
2252 stcb->asoc.primary_destination);
2255 /* Toss the cookie if I can */
2256 sctp_toss_old_cookies(stcb, asoc);
2257 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2258 /* Restart the timer if we have pending data */
2259 struct sctp_tmit_chunk *chk;
2261 chk = TAILQ_FIRST(&asoc->sent_queue);
2263 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2270 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2271 struct sctp_tcb *stcb)
2273 struct sctp_nets *net;
2274 struct sctp_tmit_chunk *lchk;
2277 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2280 SCTP_STAT_INCR(sctps_recvecne);
2281 tsn = ntohl(cp->tsn);
2282 /* ECN Nonce stuff: need a resync and disable the nonce sum check */
2283 /* Also we make sure we disable the nonce_wait */
2284 lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2286 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2288 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2290 stcb->asoc.nonce_wait_for_ecne = 0;
2291 stcb->asoc.nonce_sum_check = 0;
2293 /* Find where it was sent, if possible */
2295 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2297 if (lchk->rec.data.TSN_seq == tsn) {
2301 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2303 lchk = TAILQ_NEXT(lchk, sctp_next);
2306 /* default is we use the primary */
2307 net = stcb->asoc.primary_destination;
2309 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2310 #ifdef SCTP_CWND_MONITOR
2313 old_cwnd = net->cwnd;
2315 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2316 net->ssthresh = net->cwnd / 2;
2317 if (net->ssthresh < net->mtu) {
2318 net->ssthresh = net->mtu;
2319 /* here back off the timer as well, to slow us down */
2322 net->cwnd = net->ssthresh;
2323 #ifdef SCTP_CWND_MONITOR
2324 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2327 * we reduce once every RTT. So we will only lower cwnd at
2328 * the next sending seq i.e. the resync_tsn.
2330 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2333 * We always send a CWR this way if our previous one was lost our
2334 * peer will get an update, or if it is not time again to reduce we
2335 * still get the cwr to the peer.
2337 sctp_send_cwr(stcb, net, tsn);
2341 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2344 * Here we get a CWR from the peer. We must look in the outqueue and
2345 * make sure that we have a covered ECNE in teh control chunk part.
2348 struct sctp_tmit_chunk *chk;
2349 struct sctp_ecne_chunk *ecne;
2351 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2352 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2356 * Look for and remove if it is the right TSN. Since there
2357 * is only ONE ECNE on the control queue at any one time we
2358 * don't need to worry about more than one!
2360 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2361 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2362 MAX_TSN) || (cp->tsn == ecne->tsn)) {
2363 /* this covers this ECNE, we can remove it */
2364 stcb->asoc.ecn_echo_cnt_onq--;
2365 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2368 sctp_m_freem(chk->data);
2371 stcb->asoc.ctrl_queue_cnt--;
2372 sctp_free_remote_addr(chk->whoTo);
2373 sctp_free_a_chunk(stcb, chk);
2380 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2381 struct sctp_tcb *stcb, struct sctp_nets *net)
2383 struct sctp_association *asoc;
2385 SCTPDBG(SCTP_DEBUG_INPUT2,
2386 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
2391 /* process according to association state */
2392 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
2393 /* unexpected SHUTDOWN-COMPLETE... so ignore... */
2394 SCTP_TCB_UNLOCK(stcb);
2397 /* notify upper layer protocol */
2398 if (stcb->sctp_socket) {
2399 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
2400 /* are the queues empty? they should be */
2401 if (!TAILQ_EMPTY(&asoc->send_queue) ||
2402 !TAILQ_EMPTY(&asoc->sent_queue) ||
2403 !TAILQ_EMPTY(&asoc->out_wheel)) {
2404 sctp_report_all_outbound(stcb, 0);
2407 /* stop the timer */
2408 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2409 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
2411 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2416 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
2417 struct sctp_nets *net, uint8_t flg)
2419 switch (desc->chunk_type) {
2421 /* find the tsn to resend (possibly */
2424 struct sctp_tmit_chunk *tp1;
2426 tsn = ntohl(desc->tsn_ifany);
2427 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2429 if (tp1->rec.data.TSN_seq == tsn) {
2433 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
2439 tp1 = TAILQ_NEXT(tp1, sctp_next);
2443 * Do it the other way , aka without paying
2444 * attention to queue seq order.
2446 SCTP_STAT_INCR(sctps_pdrpdnfnd);
2447 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2449 if (tp1->rec.data.TSN_seq == tsn) {
2453 tp1 = TAILQ_NEXT(tp1, sctp_next);
2457 SCTP_STAT_INCR(sctps_pdrptsnnf);
2459 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
2462 if ((stcb->asoc.peers_rwnd == 0) &&
2463 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
2464 SCTP_STAT_INCR(sctps_pdrpdiwnp);
2467 if (stcb->asoc.peers_rwnd == 0 &&
2468 (flg & SCTP_FROM_MIDDLE_BOX)) {
2469 SCTP_STAT_INCR(sctps_pdrpdizrw);
2472 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
2473 sizeof(struct sctp_data_chunk));
2477 for (iii = 0; iii < sizeof(desc->data_bytes);
2479 if (ddp[iii] != desc->data_bytes[iii]) {
2480 SCTP_STAT_INCR(sctps_pdrpbadd);
2486 * We zero out the nonce so resync not
2489 tp1->rec.data.ect_nonce = 0;
2493 * this guy had a RTO calculation
2494 * pending on it, cancel it
2498 SCTP_STAT_INCR(sctps_pdrpmark);
2499 if (tp1->sent != SCTP_DATAGRAM_RESEND)
2500 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2501 tp1->sent = SCTP_DATAGRAM_RESEND;
2503 * mark it as if we were doing a FR, since
2504 * we will be getting gap ack reports behind
2505 * the info from the router.
2507 tp1->rec.data.doing_fast_retransmit = 1;
2509 * mark the tsn with what sequences can
2512 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
2513 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
2515 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
2518 /* restart the timer */
2519 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2520 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2521 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2524 /* fix counts and things */
2525 #ifdef SCTP_FLIGHT_LOGGING
2526 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
2527 tp1->whoTo->flight_size,
2530 tp1->rec.data.TSN_seq);
2532 sctp_flight_size_decrease(tp1);
2533 sctp_total_flight_decrease(stcb, tp1);
2539 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
2540 if (tp1->sent == SCTP_DATAGRAM_RESEND)
2543 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
2545 if (tp1->sent == SCTP_DATAGRAM_RESEND)
2548 if (audit != stcb->asoc.sent_queue_retran_cnt) {
2549 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
2550 audit, stcb->asoc.sent_queue_retran_cnt);
2551 #ifndef SCTP_AUDITING_ENABLED
2552 stcb->asoc.sent_queue_retran_cnt = audit;
2560 struct sctp_tmit_chunk *asconf;
2562 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
2564 if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
2569 if (asconf->sent != SCTP_DATAGRAM_RESEND)
2570 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2571 asconf->sent = SCTP_DATAGRAM_RESEND;
2572 asconf->snd_count--;
2576 case SCTP_INITIATION:
2577 /* resend the INIT */
2578 stcb->asoc.dropped_special_cnt++;
2579 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
2581 * If we can get it in, in a few attempts we do
2582 * this, otherwise we let the timer fire.
2584 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
2585 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
2586 sctp_send_initiate(stcb->sctp_ep, stcb);
2589 case SCTP_SELECTIVE_ACK:
2590 /* resend the sack */
2591 sctp_send_sack(stcb);
2593 case SCTP_HEARTBEAT_REQUEST:
2594 /* resend a demand HB */
2595 (void)sctp_send_hb(stcb, 1, net);
2598 sctp_send_shutdown(stcb, net);
2600 case SCTP_SHUTDOWN_ACK:
2601 sctp_send_shutdown_ack(stcb, net);
2603 case SCTP_COOKIE_ECHO:
2605 struct sctp_tmit_chunk *cookie;
2608 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
2610 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
2615 if (cookie->sent != SCTP_DATAGRAM_RESEND)
2616 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2617 cookie->sent = SCTP_DATAGRAM_RESEND;
2618 sctp_stop_all_cookie_timers(stcb);
2622 case SCTP_COOKIE_ACK:
2623 sctp_send_cookie_ack(stcb);
2625 case SCTP_ASCONF_ACK:
2626 /* resend last asconf ack */
2627 sctp_send_asconf_ack(stcb, 1);
2629 case SCTP_FORWARD_CUM_TSN:
2630 send_forward_tsn(stcb, &stcb->asoc);
2632 /* can't do anything with these */
2633 case SCTP_PACKET_DROPPED:
2634 case SCTP_INITIATION_ACK: /* this should not happen */
2635 case SCTP_HEARTBEAT_ACK:
2636 case SCTP_ABORT_ASSOCIATION:
2637 case SCTP_OPERATION_ERROR:
2638 case SCTP_SHUTDOWN_COMPLETE:
2648 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2654 * We set things to 0xffff since this is the last delivered sequence
2655 * and we will be sending in 0 after the reset.
2658 if (number_entries) {
2659 for (i = 0; i < number_entries; i++) {
2660 temp = ntohs(list[i]);
2661 if (temp >= stcb->asoc.streamincnt) {
2664 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
2668 for (i = 0; i < stcb->asoc.streamincnt; i++) {
2669 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
2672 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list);
2676 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2680 if (number_entries == 0) {
2681 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2682 stcb->asoc.strmout[i].next_sequence_sent = 0;
2684 } else if (number_entries) {
2685 for (i = 0; i < number_entries; i++) {
2688 temp = ntohs(list[i]);
2689 if (temp >= stcb->asoc.streamoutcnt) {
2690 /* no such stream */
2693 stcb->asoc.strmout[temp].next_sequence_sent = 0;
2696 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list);
2700 struct sctp_stream_reset_out_request *
2701 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
2703 struct sctp_association *asoc;
2704 struct sctp_stream_reset_out_req *req;
2705 struct sctp_stream_reset_out_request *r;
2706 struct sctp_tmit_chunk *chk;
2710 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
2711 asoc->stream_reset_outstanding = 0;
2714 if (stcb->asoc.str_reset == NULL) {
2715 asoc->stream_reset_outstanding = 0;
2718 chk = stcb->asoc.str_reset;
2719 if (chk->data == NULL) {
2723 /* he wants a copy of the chk pointer */
2726 clen = chk->send_size;
2727 req = mtod(chk->data, struct sctp_stream_reset_out_req *);
2729 if (ntohl(r->request_seq) == seq) {
2733 len = SCTP_SIZE32(ntohs(r->ph.param_length));
2734 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
2735 /* move to the next one, there can only be a max of two */
2736 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
2737 if (ntohl(r->request_seq) == seq) {
2741 /* that seq is not here */
2746 sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
2748 struct sctp_association *asoc;
2749 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
2751 if (stcb->asoc.str_reset == NULL) {
2756 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
2757 TAILQ_REMOVE(&asoc->control_send_queue,
2761 sctp_m_freem(chk->data);
2764 asoc->ctrl_queue_cnt--;
2765 sctp_free_remote_addr(chk->whoTo);
2767 sctp_free_a_chunk(stcb, chk);
2768 stcb->asoc.str_reset = NULL;
2773 sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
2774 uint32_t seq, uint32_t action,
2775 struct sctp_stream_reset_response *respin)
2779 struct sctp_association *asoc = &stcb->asoc;
2780 struct sctp_tmit_chunk *chk;
2781 struct sctp_stream_reset_out_request *srparam;
2784 if (asoc->stream_reset_outstanding == 0) {
2788 if (seq == stcb->asoc.str_reset_seq_out) {
2789 srparam = sctp_find_stream_reset(stcb, seq, &chk);
2791 stcb->asoc.str_reset_seq_out++;
2792 type = ntohs(srparam->ph.param_type);
2793 lparm_len = ntohs(srparam->ph.param_length);
2794 if (type == SCTP_STR_RESET_OUT_REQUEST) {
2795 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
2796 asoc->stream_reset_out_is_outstanding = 0;
2797 if (asoc->stream_reset_outstanding)
2798 asoc->stream_reset_outstanding--;
2799 if (action == SCTP_STREAM_RESET_PERFORMED) {
2801 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
2803 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams);
2805 } else if (type == SCTP_STR_RESET_IN_REQUEST) {
2806 /* Answered my request */
2807 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
2808 if (asoc->stream_reset_outstanding)
2809 asoc->stream_reset_outstanding--;
2810 if (action != SCTP_STREAM_RESET_PERFORMED) {
2811 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams);
2813 } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
2815 * a) Adopt the new in tsn.
2817 * c) Adopt the new out-tsn
2819 struct sctp_stream_reset_response_tsn *resp;
2820 struct sctp_forward_tsn_chunk fwdtsn;
2823 if (respin == NULL) {
2827 if (action == SCTP_STREAM_RESET_PERFORMED) {
2828 resp = (struct sctp_stream_reset_response_tsn *)respin;
2829 asoc->stream_reset_outstanding--;
2830 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2831 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2832 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
2833 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2837 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
2838 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2839 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
2840 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2841 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
2842 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
2844 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2845 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2849 /* get rid of the request and get the request flags */
2850 if (asoc->stream_reset_outstanding == 0) {
2851 sctp_clean_up_stream_reset(stcb);
2859 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
2860 struct sctp_tmit_chunk *chk,
2861 struct sctp_stream_reset_in_request *req)
2869 * peer wants me to send a str-reset to him for my outgoing seq's if
2872 struct sctp_association *asoc = &stcb->asoc;
2874 seq = ntohl(req->request_seq);
2875 if (asoc->str_reset_seq_in == seq) {
2876 if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
2877 len = ntohs(req->ph.param_length);
2878 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
2879 for (i = 0; i < number_entries; i++) {
2880 temp = ntohs(req->list_of_streams[i]);
2881 req->list_of_streams[i] = temp;
2883 /* move the reset action back one */
2884 asoc->last_reset_action[1] = asoc->last_reset_action[0];
2885 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2886 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
2887 asoc->str_reset_seq_out,
2888 seq, (asoc->sending_seq - 1));
2889 asoc->stream_reset_out_is_outstanding = 1;
2890 asoc->str_reset = chk;
2891 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
2892 stcb->asoc.stream_reset_outstanding++;
2894 /* Can't do it, since we have sent one out */
2895 asoc->last_reset_action[1] = asoc->last_reset_action[0];
2896 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
2897 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2899 asoc->str_reset_seq_in++;
2900 } else if (asoc->str_reset_seq_in - 1 == seq) {
2901 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2902 } else if (asoc->str_reset_seq_in - 2 == seq) {
2903 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
2905 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2910 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
2911 struct sctp_tmit_chunk *chk,
2912 struct sctp_stream_reset_tsn_request *req)
2914 /* reset all in and out and update the tsn */
2916 * A) reset my str-seq's on in and out. B) Select a receive next,
2917 * and set cum-ack to it. Also process this selected number as a
2918 * fwd-tsn as well. C) set in the response my next sending seq.
2920 struct sctp_forward_tsn_chunk fwdtsn;
2921 struct sctp_association *asoc = &stcb->asoc;
2925 seq = ntohl(req->request_seq);
2926 if (asoc->str_reset_seq_in == seq) {
2927 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2928 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2929 fwdtsn.ch.chunk_flags = 0;
2930 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
2931 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2935 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
2936 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2937 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
2938 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2939 atomic_add_int(&stcb->asoc.sending_seq, 1);
2940 /* save off historical data for retrans */
2941 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
2942 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
2943 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
2944 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
2946 sctp_add_stream_reset_result_tsn(chk,
2947 ntohl(req->request_seq),
2948 SCTP_STREAM_RESET_PERFORMED,
2949 stcb->asoc.sending_seq,
2950 stcb->asoc.mapping_array_base_tsn);
2951 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2952 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2953 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
2954 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2956 asoc->str_reset_seq_in++;
2957 } else if (asoc->str_reset_seq_in - 1 == seq) {
2958 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
2959 stcb->asoc.last_sending_seq[0],
2960 stcb->asoc.last_base_tsnsent[0]
2962 } else if (asoc->str_reset_seq_in - 2 == seq) {
2963 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
2964 stcb->asoc.last_sending_seq[1],
2965 stcb->asoc.last_base_tsnsent[1]
2968 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2974 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
2975 struct sctp_tmit_chunk *chk,
2976 struct sctp_stream_reset_out_request *req)
2979 int number_entries, len;
2980 struct sctp_association *asoc = &stcb->asoc;
2982 seq = ntohl(req->request_seq);
2984 /* now if its not a duplicate we process it */
2985 if (asoc->str_reset_seq_in == seq) {
2986 len = ntohs(req->ph.param_length);
2987 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
2989 * the sender is resetting, handle the list issue.. we must
2990 * a) verify if we can do the reset, if so no problem b) If
2991 * we can't do the reset we must copy the request. c) queue
2992 * it, and setup the data in processor to trigger it off
2993 * when needed and dequeue all the queued data.
2995 tsn = ntohl(req->send_reset_at_tsn);
2997 /* move the reset action back one */
2998 asoc->last_reset_action[1] = asoc->last_reset_action[0];
2999 if ((tsn == asoc->cumulative_tsn) ||
3000 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3001 /* we can do it now */
3002 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3003 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3004 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3007 * we must queue it up and thus wait for the TSN's
3008 * to arrive that are at or before tsn
3010 struct sctp_stream_reset_list *liste;
3013 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3014 SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3016 if (liste == NULL) {
3017 /* gak out of memory */
3018 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3019 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3023 liste->number_entries = number_entries;
3024 memcpy(&liste->req, req,
3025 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3026 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3027 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3028 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3030 asoc->str_reset_seq_in++;
3031 } else if ((asoc->str_reset_seq_in - 1) == seq) {
3033 * one seq back, just echo back last action since my
3034 * response was lost.
3036 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3037 } else if ((asoc->str_reset_seq_in - 2) == seq) {
3039 * two seq back, just echo back last action since my
3040 * response was lost.
3042 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3044 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3049 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct sctp_stream_reset_out_req *sr_req)
3051 int chk_length, param_len, ptype;
3054 struct sctp_tmit_chunk *chk;
3055 struct sctp_chunkhdr *ch;
3056 struct sctp_paramhdr *ph;
3060 /* now it may be a reset or a reset-response */
3061 chk_length = ntohs(sr_req->ch.chunk_length);
3063 /* setup for adding the response */
3064 sctp_alloc_a_chunk(stcb, chk);
3068 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3069 chk->rec.chunk_id.can_take_data = 0;
3070 chk->asoc = &stcb->asoc;
3071 chk->no_fr_allowed = 0;
3072 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3073 chk->book_size_scale = 0;
3074 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3075 if (chk->data == NULL) {
3078 sctp_m_freem(chk->data);
3081 sctp_free_a_chunk(stcb, chk);
3084 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3086 /* setup chunk parameters */
3087 chk->sent = SCTP_DATAGRAM_UNSENT;
3089 chk->whoTo = stcb->asoc.primary_destination;
3090 atomic_add_int(&chk->whoTo->ref_count, 1);
3092 ch = mtod(chk->data, struct sctp_chunkhdr *);
3093 ch->chunk_type = SCTP_STREAM_RESET;
3094 ch->chunk_flags = 0;
3095 ch->chunk_length = htons(chk->send_size);
3096 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3097 ph = (struct sctp_paramhdr *)&sr_req->sr_req;
3098 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3099 param_len = ntohs(ph->param_length);
3100 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3104 ptype = ntohs(ph->param_type);
3106 if (num_param > SCTP_MAX_RESET_PARAMS) {
3107 /* hit the max of parameters already sorry.. */
3110 if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3111 struct sctp_stream_reset_out_request *req_out;
3113 req_out = (struct sctp_stream_reset_out_request *)ph;
3115 if (stcb->asoc.stream_reset_outstanding) {
3116 seq = ntohl(req_out->response_seq);
3117 if (seq == stcb->asoc.str_reset_seq_out) {
3119 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3122 sctp_handle_str_reset_request_out(stcb, chk, req_out);
3123 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3124 struct sctp_stream_reset_in_request *req_in;
3127 req_in = (struct sctp_stream_reset_in_request *)ph;
3128 sctp_handle_str_reset_request_in(stcb, chk, req_in);
3129 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3130 struct sctp_stream_reset_tsn_request *req_tsn;
3133 req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3134 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3136 goto strres_nochunk;
3140 } else if (ptype == SCTP_STR_RESET_RESPONSE) {
3141 struct sctp_stream_reset_response *resp;
3144 resp = (struct sctp_stream_reset_response *)ph;
3145 seq = ntohl(resp->response_seq);
3146 result = ntohl(resp->result);
3147 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3149 goto strres_nochunk;
3155 ph = (struct sctp_paramhdr *)((caddr_t)ph + SCTP_SIZE32(param_len));
3156 chk_length -= SCTP_SIZE32(param_len);
3159 /* we have no response free the stuff */
3160 goto strres_nochunk;
3162 /* ok we have a chunk to link in */
3163 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3166 stcb->asoc.ctrl_queue_cnt++;
3171 * Handle a router or endpoints report of a packet loss, there are two ways
3172 * to handle this, either we get the whole packet and must disect it
3173 * ourselves (possibly with truncation and or corruption) or it is a summary
3174 * from a middle box that did the disectting for us.
3177 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3178 struct sctp_tcb *stcb, struct sctp_nets *net)
3180 uint32_t bottle_bw, on_queue;
3184 struct sctp_chunk_desc desc;
3185 struct sctp_chunkhdr *ch;
3187 chlen = ntohs(cp->ch.chunk_length);
3188 chlen -= sizeof(struct sctp_pktdrop_chunk);
3189 /* XXX possible chlen underflow */
3192 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3193 SCTP_STAT_INCR(sctps_pdrpbwrpt);
3195 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3196 chlen -= sizeof(struct sctphdr);
3197 /* XXX possible chlen underflow */
3198 memset(&desc, 0, sizeof(desc));
3200 trunc_len = (uint16_t) ntohs(cp->trunc_len);
3201 /* now the chunks themselves */
3202 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3203 desc.chunk_type = ch->chunk_type;
3204 /* get amount we need to move */
3205 at = ntohs(ch->chunk_length);
3206 if (at < sizeof(struct sctp_chunkhdr)) {
3207 /* corrupt chunk, maybe at the end? */
3208 SCTP_STAT_INCR(sctps_pdrpcrupt);
3211 if (trunc_len == 0) {
3212 /* we are supposed to have all of it */
3214 /* corrupt skip it */
3215 SCTP_STAT_INCR(sctps_pdrpcrupt);
3219 /* is there enough of it left ? */
3220 if (desc.chunk_type == SCTP_DATA) {
3221 if (chlen < (sizeof(struct sctp_data_chunk) +
3222 sizeof(desc.data_bytes))) {
3226 if (chlen < sizeof(struct sctp_chunkhdr)) {
3231 if (desc.chunk_type == SCTP_DATA) {
3232 /* can we get out the tsn? */
3233 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3234 SCTP_STAT_INCR(sctps_pdrpmbda);
3236 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
3238 struct sctp_data_chunk *dcp;
3242 dcp = (struct sctp_data_chunk *)ch;
3243 ddp = (uint8_t *) (dcp + 1);
3244 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
3245 desc.data_bytes[iii] = ddp[iii];
3247 desc.tsn_ifany = dcp->dp.tsn;
3249 /* nope we are done. */
3250 SCTP_STAT_INCR(sctps_pdrpnedat);
3254 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3255 SCTP_STAT_INCR(sctps_pdrpmbct);
3258 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
3259 SCTP_STAT_INCR(sctps_pdrppdbrk);
3262 if (SCTP_SIZE32(at) > chlen) {
3265 chlen -= SCTP_SIZE32(at);
3266 if (chlen < sizeof(struct sctp_chunkhdr)) {
3267 /* done, none left */
3270 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
3272 /* Now update any rwnd --- possibly */
3273 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
3274 /* From a peer, we get a rwnd report */
3277 SCTP_STAT_INCR(sctps_pdrpfehos);
3279 bottle_bw = ntohl(cp->bottle_bw);
3280 on_queue = ntohl(cp->current_onq);
3281 if (bottle_bw && on_queue) {
3282 /* a rwnd report is in here */
3283 if (bottle_bw > on_queue)
3284 a_rwnd = bottle_bw - on_queue;
3289 stcb->asoc.peers_rwnd = 0;
3291 if (a_rwnd > stcb->asoc.total_flight) {
3292 stcb->asoc.peers_rwnd =
3293 a_rwnd - stcb->asoc.total_flight;
3295 stcb->asoc.peers_rwnd = 0;
3297 if (stcb->asoc.peers_rwnd <
3298 stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3299 /* SWS sender side engages */
3300 stcb->asoc.peers_rwnd = 0;
3305 SCTP_STAT_INCR(sctps_pdrpfmbox);
3308 /* now middle boxes in sat networks get a cwnd bump */
3309 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
3310 (stcb->asoc.sat_t3_loss_recovery == 0) &&
3311 (stcb->asoc.sat_network)) {
3313 * This is debateable but for sat networks it makes sense
3314 * Note if a T3 timer has went off, we will prohibit any
3315 * changes to cwnd until we exit the t3 loss recovery.
3320 #ifdef SCTP_CWND_MONITOR
3321 int old_cwnd = net->cwnd;
3324 /* need real RTT for this calc */
3325 rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
3326 /* get bottle neck bw */
3327 bottle_bw = ntohl(cp->bottle_bw);
3328 /* and whats on queue */
3329 on_queue = ntohl(cp->current_onq);
3331 * adjust the on-queue if our flight is more it could be
3332 * that the router has not yet gotten data "in-flight" to it
3334 if (on_queue < net->flight_size)
3335 on_queue = net->flight_size;
3337 /* calculate the available space */
3338 bw_avail = (bottle_bw * rtt) / 1000;
3339 if (bw_avail > bottle_bw) {
3341 * Cap the growth to no more than the bottle neck.
3342 * This can happen as RTT slides up due to queues.
3343 * It also means if you have more than a 1 second
3344 * RTT with a empty queue you will be limited to the
3345 * bottle_bw per second no matter if other points
3346 * have 1/2 the RTT and you could get more out...
3348 bw_avail = bottle_bw;
3350 if (on_queue > bw_avail) {
3352 * No room for anything else don't allow anything
3353 * else to be "added to the fire".
3355 int seg_inflight, seg_onqueue, my_portion;
3357 net->partial_bytes_acked = 0;
3359 /* how much are we over queue size? */
3360 incr = on_queue - bw_avail;
3361 if (stcb->asoc.seen_a_sack_this_pkt) {
3363 * undo any cwnd adjustment that the sack
3366 net->cwnd = net->prev_cwnd;
3368 /* Now how much of that is mine? */
3369 seg_inflight = net->flight_size / net->mtu;
3370 seg_onqueue = on_queue / net->mtu;
3371 my_portion = (incr * seg_inflight) / seg_onqueue;
3373 /* Have I made an adjustment already */
3374 if (net->cwnd > net->flight_size) {
3376 * for this flight I made an adjustment we
3377 * need to decrease the portion by a share
3378 * our previous adjustment.
3382 diff_adj = net->cwnd - net->flight_size;
3383 if (diff_adj > my_portion)
3386 my_portion -= diff_adj;
3389 * back down to the previous cwnd (assume we have
3390 * had a sack before this packet). minus what ever
3391 * portion of the overage is my fault.
3393 net->cwnd -= my_portion;
3395 /* we will NOT back down more than 1 MTU */
3396 if (net->cwnd <= net->mtu) {
3397 net->cwnd = net->mtu;
3400 net->ssthresh = net->cwnd - 1;
3403 * Take 1/4 of the space left or max burst up ..
3404 * whichever is less.
3406 incr = min((bw_avail - on_queue) >> 2,
3407 (int)stcb->asoc.max_burst * (int)net->mtu);
3410 if (net->cwnd > bw_avail) {
3411 /* We can't exceed the pipe size */
3412 net->cwnd = bw_avail;
3414 if (net->cwnd < net->mtu) {
3415 /* We always have 1 MTU */
3416 net->cwnd = net->mtu;
3418 #ifdef SCTP_CWND_MONITOR
3419 if (net->cwnd - old_cwnd != 0) {
3420 /* log only changes */
3421 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
3422 SCTP_CWND_LOG_FROM_SAT);
3429 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
3430 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
3431 * offset: offset into the mbuf chain to first chunkhdr - length: is the
3432 * length of the complete packet outputs: - length: modified to remaining
3433 * length after control processing - netp: modified to new sctp_nets after
3434 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
3435 * bad packet,...) otherwise return the tcb for this packet
3437 static struct sctp_tcb *
3438 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
3439 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
3440 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
3441 uint32_t vrf_id, uint32_t table_id)
3443 struct sctp_association *asoc;
3445 int num_chunks = 0; /* number of control chunks processed */
3448 int abort_no_unlock = 0;
3451 * How big should this be, and should it be alloc'd? Lets try the
3452 * d-mtu-ceiling for now (2k) and that should hopefully work ...
3453 * until we get into jumbo grams and such..
3455 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
3456 struct sctp_tcb *locked_tcb = stcb;
3458 uint32_t auth_offset = 0, auth_len = 0;
3459 int auth_skipped = 0;
3461 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
3462 iphlen, *offset, length, stcb);
3464 /* validate chunk header length... */
3465 if (ntohs(ch->chunk_length) < sizeof(*ch)) {
3466 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
3467 ntohs(ch->chunk_length));
3471 * validate the verification tag
3473 vtag_in = ntohl(sh->v_tag);
3476 SCTP_TCB_LOCK_ASSERT(locked_tcb);
3478 if (ch->chunk_type == SCTP_INITIATION) {
3479 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
3480 ntohs(ch->chunk_length), vtag_in);
3482 /* protocol error- silently discard... */
3483 SCTP_STAT_INCR(sctps_badvtag);
3485 SCTP_TCB_UNLOCK(locked_tcb);
3489 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
3491 * If there is no stcb, skip the AUTH chunk and process
3492 * later after a stcb is found (to validate the lookup was
3495 if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
3496 (stcb == NULL) && !sctp_auth_disable) {
3497 /* save this chunk for later processing */
3499 auth_offset = *offset;
3500 auth_len = ntohs(ch->chunk_length);
3502 /* (temporarily) move past this chunk */
3503 *offset += SCTP_SIZE32(auth_len);
3504 if (*offset >= length) {
3505 /* no more data left in the mbuf chain */
3509 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3510 sizeof(struct sctp_chunkhdr), chunk_buf);
3517 if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3518 goto process_control_chunks;
3521 * first check if it's an ASCONF with an unknown src addr we
3522 * need to look inside to find the association
3524 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
3525 /* inp's refcount may be reduced */
3526 SCTP_INP_INCR_REF(inp);
3528 stcb = sctp_findassociation_ep_asconf(m, iphlen,
3529 *offset, sh, &inp, netp);
3532 * reduce inp's refcount if not reduced in
3533 * sctp_findassociation_ep_asconf().
3535 SCTP_INP_DECR_REF(inp);
3537 /* now go back and verify any auth chunk to be sure */
3538 if (auth_skipped && (stcb != NULL)) {
3539 struct sctp_auth_chunk *auth;
3541 auth = (struct sctp_auth_chunk *)
3542 sctp_m_getptr(m, auth_offset,
3543 auth_len, chunk_buf);
3546 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
3548 /* auth HMAC failed so dump it */
3552 /* remaining chunks are HMAC checked */
3553 stcb->asoc.authenticated = 1;
3558 /* no association, so it's out of the blue... */
3559 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
3563 SCTP_TCB_UNLOCK(locked_tcb);
3568 /* ABORT and SHUTDOWN can use either v_tag... */
3569 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
3570 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
3571 (ch->chunk_type == SCTP_PACKET_DROPPED)) {
3572 if ((vtag_in == asoc->my_vtag) ||
3573 ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
3574 (vtag_in == asoc->peer_vtag))) {
3577 /* drop this packet... */
3578 SCTP_STAT_INCR(sctps_badvtag);
3580 SCTP_TCB_UNLOCK(locked_tcb);
3584 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
3585 if (vtag_in != asoc->my_vtag) {
3587 * this could be a stale SHUTDOWN-ACK or the
3588 * peer never got the SHUTDOWN-COMPLETE and
3589 * is still hung; we have started a new asoc
3590 * but it won't complete until the shutdown
3594 SCTP_TCB_UNLOCK(locked_tcb);
3596 sctp_handle_ootb(m, iphlen, *offset, sh, inp,
3597 NULL, vrf_id, table_id);
3601 /* for all other chunks, vtag must match */
3602 if (vtag_in != asoc->my_vtag) {
3603 /* invalid vtag... */
3604 SCTPDBG(SCTP_DEBUG_INPUT3,
3605 "invalid vtag: %xh, expect %xh\n",
3606 vtag_in, asoc->my_vtag);
3607 SCTP_STAT_INCR(sctps_badvtag);
3609 SCTP_TCB_UNLOCK(locked_tcb);
3615 } /* end if !SCTP_COOKIE_ECHO */
3617 * process all control chunks...
3619 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
3620 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
3621 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
3622 /* implied cookie-ack.. we must have lost the ack */
3623 stcb->asoc.overall_error_count = 0;
3624 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
3627 process_control_chunks:
3628 while (IS_SCTP_CONTROL(ch)) {
3629 /* validate chunk length */
3630 chk_length = ntohs(ch->chunk_length);
3631 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
3632 ch->chunk_type, chk_length);
3633 if ((size_t)chk_length < sizeof(*ch) ||
3634 (*offset + chk_length) > length) {
3637 SCTP_TCB_UNLOCK(locked_tcb);
3641 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
3643 * INIT-ACK only gets the init ack "header" portion only
3644 * because we don't have to process the peer's COOKIE. All
3645 * others get a complete chunk.
3647 if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
3648 (ch->chunk_type == SCTP_INITIATION)) {
3649 /* get an init-ack chunk */
3650 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3651 sizeof(struct sctp_init_ack_chunk), chunk_buf);
3655 SCTP_TCB_UNLOCK(locked_tcb);
3659 } else if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3660 if (chk_length > sizeof(chunk_buf)) {
3662 * use just the size of the chunk buffer so
3663 * the front part of our cookie is intact.
3664 * The rest of cookie processing should use
3665 * the sctp_m_getptr() function to access
3668 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3669 (sizeof(chunk_buf) - 4),
3674 SCTP_TCB_UNLOCK(locked_tcb);
3679 /* We can fit it all */
3683 /* get a complete chunk... */
3684 if ((size_t)chk_length > sizeof(chunk_buf)) {
3686 struct sctp_paramhdr *phdr;
3690 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
3691 0, M_DONTWAIT, 1, MT_DATA);
3694 /* pre-reserve some space */
3695 SCTP_BUF_RESV_UF(oper, sizeof(struct sctp_chunkhdr));
3696 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
3697 phdr = mtod(oper, struct sctp_paramhdr *);
3698 phdr->param_type = htons(SCTP_CAUSE_OUT_OF_RESC);
3699 phdr->param_length = htons(sizeof(struct sctp_paramhdr));
3700 sctp_queue_op_err(stcb, oper);
3704 SCTP_TCB_UNLOCK(locked_tcb);
3709 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3710 chk_length, chunk_buf);
3712 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
3715 SCTP_TCB_UNLOCK(locked_tcb);
3721 /* Save off the last place we got a control from */
3723 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
3725 * allow last_control to be NULL if
3726 * ASCONF... ASCONF processing will find the
3729 if ((netp != NULL) && (*netp != NULL))
3730 stcb->asoc.last_control_chunk_from = *netp;
3733 #ifdef SCTP_AUDITING_ENABLED
3734 sctp_audit_log(0xB0, ch->chunk_type);
3737 /* check to see if this chunk required auth, but isn't */
3738 if ((stcb != NULL) && !sctp_auth_disable &&
3739 sctp_auth_is_required_chunk(ch->chunk_type,
3740 stcb->asoc.local_auth_chunks) &&
3741 !stcb->asoc.authenticated) {
3742 /* "silently" ignore */
3743 SCTP_STAT_INCR(sctps_recvauthmissing);
3746 switch (ch->chunk_type) {
3747 case SCTP_INITIATION:
3748 /* must be first and only chunk */
3749 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
3750 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3751 /* We are not interested anymore? */
3752 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3754 * collision case where we are
3755 * sending to them too
3760 SCTP_TCB_UNLOCK(locked_tcb);
3766 if ((num_chunks > 1) ||
3767 (sctp_strict_init && (length - *offset > SCTP_SIZE32(chk_length)))) {
3770 SCTP_TCB_UNLOCK(locked_tcb);
3774 if ((stcb != NULL) &&
3775 (SCTP_GET_STATE(&stcb->asoc) ==
3776 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
3777 sctp_send_shutdown_ack(stcb,
3778 stcb->asoc.primary_destination);
3780 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3782 SCTP_TCB_UNLOCK(locked_tcb);
3787 sctp_handle_init(m, iphlen, *offset, sh,
3788 (struct sctp_init_chunk *)ch, inp,
3789 stcb, *netp, &abort_no_unlock, vrf_id, table_id);
3791 if (abort_no_unlock)
3796 SCTP_TCB_UNLOCK(locked_tcb);
3800 case SCTP_INITIATION_ACK:
3801 /* must be first and only chunk */
3802 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
3803 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3804 /* We are not interested anymore */
3805 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3809 SCTP_TCB_UNLOCK(locked_tcb);
3813 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3818 if ((num_chunks > 1) ||
3819 (sctp_strict_init && (length - *offset > SCTP_SIZE32(chk_length)))) {
3822 SCTP_TCB_UNLOCK(locked_tcb);
3826 if ((netp) && (*netp)) {
3827 ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
3828 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id, table_id);
3833 * Special case, I must call the output routine to
3834 * get the cookie echoed
3836 if (abort_no_unlock)
3839 if ((stcb) && ret == 0)
3840 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3843 SCTP_TCB_UNLOCK(locked_tcb);
3847 case SCTP_SELECTIVE_ACK:
3848 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
3849 SCTP_STAT_INCR(sctps_recvsacks);
3851 struct sctp_sack_chunk *sack;
3853 uint32_t a_rwnd, cum_ack;
3857 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
3858 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
3861 SCTP_TCB_UNLOCK(locked_tcb);
3865 sack = (struct sctp_sack_chunk *)ch;
3866 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
3867 cum_ack = ntohl(sack->sack.cum_tsn_ack);
3868 num_seg = ntohs(sack->sack.num_gap_ack_blks);
3869 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
3870 stcb->asoc.seen_a_sack_this_pkt = 1;
3871 if ((stcb->asoc.pr_sctp_cnt == 0) &&
3873 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
3874 (cum_ack == stcb->asoc.last_acked_seq)) &&
3875 (stcb->asoc.saw_sack_with_frags == 0) &&
3876 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
3879 * We have a SIMPLE sack having no
3880 * prior segments and data on sent
3881 * queue to be acked.. Use the
3882 * faster path sack processing. We
3883 * also allow window update sacks
3884 * with no missing segments to go
3887 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
3891 sctp_handle_sack(sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
3894 /* ABORT signal from sack processing */
3900 case SCTP_HEARTBEAT_REQUEST:
3901 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
3902 if ((stcb) && netp && *netp) {
3903 SCTP_STAT_INCR(sctps_recvheartbeat);
3904 sctp_send_heartbeat_ack(stcb, m, *offset,
3907 /* He's alive so give him credit */
3908 stcb->asoc.overall_error_count = 0;
3911 case SCTP_HEARTBEAT_ACK:
3912 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
3913 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
3917 SCTP_TCB_UNLOCK(locked_tcb);
3921 /* He's alive so give him credit */
3922 stcb->asoc.overall_error_count = 0;
3923 SCTP_STAT_INCR(sctps_recvheartbeatack);
3925 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
3928 case SCTP_ABORT_ASSOCIATION:
3929 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT\n");
3930 if ((stcb) && netp && *netp)
3931 sctp_handle_abort((struct sctp_abort_chunk *)ch,
3937 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN\n");
3938 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
3941 SCTP_TCB_UNLOCK(locked_tcb);
3946 if (netp && *netp) {
3949 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
3950 stcb, *netp, &abort_flag);
3957 case SCTP_SHUTDOWN_ACK:
3958 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK\n");
3959 if ((stcb) && (netp) && (*netp))
3960 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
3965 case SCTP_OPERATION_ERROR:
3966 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
3967 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
3973 case SCTP_COOKIE_ECHO:
3974 SCTPDBG(SCTP_DEBUG_INPUT3,
3975 "SCTP_COOKIE-ECHO stcb is %p\n", stcb);
3976 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3979 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3980 /* We are not interested anymore */
3986 * First are we accepting? We do this again here
3987 * sincen it is possible that a previous endpoint
3988 * WAS listening responded to a INIT-ACK and then
3989 * closed. We opened and bound.. and are now no
3992 if (inp->sctp_socket->so_qlimit == 0) {
3993 if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3995 * special case, is this a retran'd
3996 * COOKIE-ECHO or a restarting assoc
3997 * that is a peeled off or
3998 * one-to-one style socket.
4000 goto process_cookie_anyway;
4002 sctp_abort_association(inp, stcb, m, iphlen,
4007 } else if (inp->sctp_socket->so_qlimit) {
4008 /* we are accepting so check limits like TCP */
4009 if (inp->sctp_socket->so_qlen >
4010 inp->sctp_socket->so_qlimit) {
4013 struct sctp_paramhdr *phdr;
4015 if (sctp_abort_if_one_2_one_hits_limit) {
4017 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4018 0, M_DONTWAIT, 1, MT_DATA);
4020 SCTP_BUF_LEN(oper) =
4021 sizeof(struct sctp_paramhdr);
4023 struct sctp_paramhdr *);
4025 htons(SCTP_CAUSE_OUT_OF_RESC);
4026 phdr->param_length =
4027 htons(sizeof(struct sctp_paramhdr));
4029 sctp_abort_association(inp, stcb, m,
4030 iphlen, sh, oper, vrf_id, table_id);
4036 process_cookie_anyway:
4038 struct mbuf *ret_buf;
4039 struct sctp_inpcb *linp;
4048 SCTP_ASOC_CREATE_LOCK(linp);
4052 sctp_handle_cookie_echo(m, iphlen,
4054 (struct sctp_cookie_echo_chunk *)ch,
4066 SCTP_ASOC_CREATE_UNLOCK(linp);
4068 if (ret_buf == NULL) {
4070 SCTP_TCB_UNLOCK(locked_tcb);
4072 SCTPDBG(SCTP_DEBUG_INPUT3,
4073 "GAK, null buffer\n");
4078 /* if AUTH skipped, see if it verified... */
4083 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4085 * Restart the timer if we have
4088 struct sctp_tmit_chunk *chk;
4090 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4092 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4093 stcb->sctp_ep, stcb,
4099 case SCTP_COOKIE_ACK:
4100 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK\n");
4101 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4103 SCTP_TCB_UNLOCK(locked_tcb);
4107 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4108 /* We are not interested anymore */
4109 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4112 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4117 /* He's alive so give him credit */
4118 if ((stcb) && netp && *netp) {
4119 stcb->asoc.overall_error_count = 0;
4120 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4124 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4125 /* He's alive so give him credit */
4126 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4129 SCTP_TCB_UNLOCK(locked_tcb);
4135 stcb->asoc.overall_error_count = 0;
4136 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
4141 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
4142 /* He's alive so give him credit */
4143 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
4146 SCTP_TCB_UNLOCK(locked_tcb);
4152 stcb->asoc.overall_error_count = 0;
4153 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
4156 case SCTP_SHUTDOWN_COMPLETE:
4157 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE\n");
4158 /* must be first and only chunk */
4159 if ((num_chunks > 1) ||
4160 (length - *offset > SCTP_SIZE32(chk_length))) {
4163 SCTP_TCB_UNLOCK(locked_tcb);
4167 if ((stcb) && netp && *netp) {
4168 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
4175 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
4176 /* He's alive so give him credit */
4178 stcb->asoc.overall_error_count = 0;
4179 sctp_handle_asconf(m, *offset,
4180 (struct sctp_asconf_chunk *)ch, stcb);
4183 case SCTP_ASCONF_ACK:
4184 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
4185 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
4188 SCTP_TCB_UNLOCK(locked_tcb);
4193 if ((stcb) && netp && *netp) {
4194 /* He's alive so give him credit */
4195 stcb->asoc.overall_error_count = 0;
4196 sctp_handle_asconf_ack(m, *offset,
4197 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp);
4200 case SCTP_FORWARD_CUM_TSN:
4201 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
4202 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
4205 SCTP_TCB_UNLOCK(locked_tcb);
4210 /* He's alive so give him credit */
4214 stcb->asoc.overall_error_count = 0;
4216 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4217 /* We are not interested anymore */
4218 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
4222 sctp_handle_forward_tsn(stcb,
4223 (struct sctp_forward_tsn_chunk *)ch, &abort_flag);
4228 stcb->asoc.overall_error_count = 0;
4233 case SCTP_STREAM_RESET:
4234 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
4235 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4236 chk_length, chunk_buf);
4237 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
4240 SCTP_TCB_UNLOCK(locked_tcb);
4245 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4246 /* We are not interested anymore */
4247 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4251 if (stcb->asoc.peer_supports_strreset == 0) {
4253 * hmm, peer should have announced this, but
4254 * we will turn it on since he is sending us
4257 stcb->asoc.peer_supports_strreset = 1;
4259 if (sctp_handle_stream_reset(stcb, (struct sctp_stream_reset_out_req *)ch)) {
4260 /* stop processing */
4265 case SCTP_PACKET_DROPPED:
4266 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
4267 /* re-get it all please */
4268 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
4271 SCTP_TCB_UNLOCK(locked_tcb);
4276 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4277 chk_length, chunk_buf);
4279 if (ch && (stcb) && netp && (*netp)) {
4280 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
4285 case SCTP_AUTHENTICATION:
4286 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
4287 if (sctp_auth_disable)
4291 /* save the first AUTH for later processing */
4292 if (auth_skipped == 0) {
4293 auth_offset = *offset;
4294 auth_len = chk_length;
4297 /* skip this chunk (temporarily) */
4300 if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
4301 (chk_length > (sizeof(struct sctp_auth_chunk) +
4302 SCTP_AUTH_DIGEST_LEN_MAX))) {
4305 SCTP_TCB_UNLOCK(locked_tcb);
4310 if (got_auth == 1) {
4311 /* skip this chunk... it's already auth'd */
4314 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4315 chk_length, chunk_buf);
4317 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
4319 /* auth HMAC failed so dump the packet */
4323 /* remaining chunks are HMAC checked */
4324 stcb->asoc.authenticated = 1;
4330 /* it's an unknown chunk! */
4331 if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
4333 struct sctp_paramhdr *phd;
4335 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4336 0, M_DONTWAIT, 1, MT_DATA);
4338 phd = mtod(mm, struct sctp_paramhdr *);
4340 * We cheat and use param type since
4341 * we did not bother to define a
4342 * error cause struct. They are the
4343 * same basic format with different
4346 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
4347 phd->param_length = htons(chk_length + sizeof(*phd));
4348 SCTP_BUF_LEN(mm) = sizeof(*phd);
4349 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
4351 if (SCTP_BUF_NEXT(mm)) {
4352 sctp_queue_op_err(stcb, mm);
4358 if ((ch->chunk_type & 0x80) == 0) {
4359 /* discard this packet */
4362 } /* else skip this bad chunk and continue... */
4364 } /* switch (ch->chunk_type) */
4368 /* get the next chunk */
4369 *offset += SCTP_SIZE32(chk_length);
4370 if (*offset >= length) {
4371 /* no more data left in the mbuf chain */
4374 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4375 sizeof(struct sctp_chunkhdr), chunk_buf);
4378 SCTP_TCB_UNLOCK(locked_tcb);
4389 * Process the ECN bits we have something set so we must look to see if it is
4390 * ECN(0) or ECN(1) or CE
4392 static __inline void
4393 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
4396 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4398 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
4400 * we only add to the nonce sum for ECT1, ECT0 does not
4401 * change the NS bit (that we have yet to find a way to send
4405 /* ECN Nonce stuff */
4406 stcb->asoc.receiver_nonce_sum++;
4407 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
4410 * Drag up the last_echo point if cumack is larger since we
4411 * don't want the point falling way behind by more than
4412 * 2^^31 and then having it be incorrect.
4414 if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4415 stcb->asoc.last_echo_tsn, MAX_TSN)) {
4416 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4418 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
4420 * Drag up the last_echo point if cumack is larger since we
4421 * don't want the point falling way behind by more than
4422 * 2^^31 and then having it be incorrect.
4424 if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4425 stcb->asoc.last_echo_tsn, MAX_TSN)) {
4426 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4431 static __inline void
4432 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
4433 uint32_t high_tsn, uint8_t ecn_bits)
4435 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4437 * we possibly must notify the sender that a congestion
4438 * window reduction is in order. We do this by adding a ECNE
4439 * chunk to the output chunk queue. The incoming CWR will
4440 * remove this chunk.
4442 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
4444 /* Yep, we need to add a ECNE */
4445 sctp_send_ecn_echo(stcb, net, high_tsn);
4446 stcb->asoc.last_echo_tsn = high_tsn;
4452 * common input chunk processing (v4 and v6)
4455 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
4456 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
4457 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
4458 uint8_t ecn_bits, uint32_t vrf_id, uint32_t table_id)
4461 * Control chunk processing
4464 int fwd_tsn_seen = 0, data_processed = 0;
4465 struct mbuf *m = *mm;
4469 SCTP_STAT_INCR(sctps_recvdatagrams);
4470 #ifdef SCTP_AUDITING_ENABLED
4471 sctp_audit_log(0xE0, 1);
4472 sctp_auditing(0, inp, stcb, net);
4475 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n",
4479 /* always clear this before beginning a packet */
4480 stcb->asoc.authenticated = 0;
4481 stcb->asoc.seen_a_sack_this_pkt = 0;
4483 if (IS_SCTP_CONTROL(ch)) {
4484 /* process the control portion of the SCTP packet */
4485 /* sa_ignore NO_NULL_CHK */
4486 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
4487 inp, stcb, &net, &fwd_tsn_seen, vrf_id, table_id);
4490 * This covers us if the cookie-echo was there and
4491 * it changes our INP.
4493 inp = stcb->sctp_ep;
4497 * no control chunks, so pre-process DATA chunks (these
4498 * checks are taken care of by control processing)
4502 * if DATA only packet, and auth is required, then punt...
4503 * can't have authenticated without any AUTH (control)
4506 if ((stcb != NULL) && !sctp_auth_disable &&
4507 sctp_auth_is_required_chunk(SCTP_DATA,
4508 stcb->asoc.local_auth_chunks)) {
4509 /* "silently" ignore */
4510 SCTP_STAT_INCR(sctps_recvauthmissing);
4511 SCTP_TCB_UNLOCK(stcb);
4515 /* out of the blue DATA chunk */
4516 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4520 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
4521 /* v_tag mismatch! */
4522 SCTP_STAT_INCR(sctps_badvtag);
4523 SCTP_TCB_UNLOCK(stcb);
4530 * no valid TCB for this packet, or we found it's a bad
4531 * packet while processing control, or we're done with this
4532 * packet (done or skip rest of data), so we drop it...
4537 * DATA chunk processing
4539 /* plow through the data chunks while length > offset */
4542 * Rest should be DATA only. Check authentication state if AUTH for
4545 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable &&
4546 sctp_auth_is_required_chunk(SCTP_DATA,
4547 stcb->asoc.local_auth_chunks) &&
4548 !stcb->asoc.authenticated) {
4549 /* "silently" ignore */
4550 SCTP_STAT_INCR(sctps_recvauthmissing);
4551 SCTPDBG(SCTP_DEBUG_AUTH1,
4552 "Data chunk requires AUTH, skipped\n");
4555 if (length > offset) {
4559 * First check to make sure our state is correct. We would
4560 * not get here unless we really did have a tag, so we don't
4561 * abort if this happens, just dump the chunk silently.
4563 switch (SCTP_GET_STATE(&stcb->asoc)) {
4564 case SCTP_STATE_COOKIE_ECHOED:
4566 * we consider data with valid tags in this state
4567 * shows us the cookie-ack was lost. Imply it was
4570 stcb->asoc.overall_error_count = 0;
4571 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
4573 case SCTP_STATE_COOKIE_WAIT:
4575 * We consider OOTB any data sent during asoc setup.
4577 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4579 SCTP_TCB_UNLOCK(stcb);
4582 case SCTP_STATE_EMPTY: /* should not happen */
4583 case SCTP_STATE_INUSE: /* should not happen */
4584 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
4585 case SCTP_STATE_SHUTDOWN_ACK_SENT:
4587 SCTP_TCB_UNLOCK(stcb);
4590 case SCTP_STATE_OPEN:
4591 case SCTP_STATE_SHUTDOWN_SENT:
4594 /* take care of ECN, part 1. */
4595 if (stcb->asoc.ecn_allowed &&
4596 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4597 sctp_process_ecn_marked_a(stcb, net, ecn_bits);
4599 /* plow through the data chunks while length > offset */
4600 retval = sctp_process_data(mm, iphlen, &offset, length, sh,
4601 inp, stcb, net, &high_tsn);
4604 * The association aborted, NO UNLOCK needed since
4605 * the association is destroyed.
4611 /* take care of ecn part 2. */
4612 if (stcb->asoc.ecn_allowed &&
4613 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4614 sctp_process_ecn_marked_b(stcb, net, high_tsn,
4619 * Anything important needs to have been m_copy'ed in
4623 if ((data_processed == 0) && (fwd_tsn_seen)) {
4626 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
4627 stcb->asoc.cumulative_tsn, MAX_TSN)) {
4628 /* there was a gap before this data was processed */
4631 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
4633 /* Again, we aborted so NO UNLOCK needed */
4637 /* trigger send of any chunks in queue... */
4639 #ifdef SCTP_AUDITING_ENABLED
4640 sctp_audit_log(0xE0, 2);
4641 sctp_auditing(1, inp, stcb, net);
4643 SCTPDBG(SCTP_DEBUG_INPUT1,
4644 "Check for chunk output prw:%d tqe:%d tf=%d\n",
4645 stcb->asoc.peers_rwnd,
4646 TAILQ_EMPTY(&stcb->asoc.control_send_queue),
4647 stcb->asoc.total_flight);
4648 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
4650 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
4652 (stcb->asoc.peers_rwnd > 0 ||
4653 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
4654 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
4655 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
4656 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
4658 #ifdef SCTP_AUDITING_ENABLED
4659 sctp_audit_log(0xE0, 3);
4660 sctp_auditing(2, inp, stcb, net);
4662 SCTP_TCB_UNLOCK(stcb);
4669 sctp_input(i_pak, off)
4674 #ifdef SCTP_MBUF_LOGGING
4680 uint32_t vrf_id = 0, table_id = 0;
4684 struct sctp_inpcb *inp = NULL;
4686 uint32_t check, calc_check;
4687 struct sctp_nets *net;
4688 struct sctp_tcb *stcb = NULL;
4689 struct sctp_chunkhdr *ch;
4690 int refcount_up = 0;
4691 int length, mlen, offset;
4694 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
4695 SCTP_RELEASE_PKT(i_pak);
4698 if (SCTP_GET_PKT_TABLEID(i_pak, table_id)) {
4699 SCTP_RELEASE_PKT(i_pak);
4702 mlen = SCTP_HEADER_LEN(i_pak);
4704 m = SCTP_HEADER_TO_CHAIN(i_pak);
4707 SCTP_STAT_INCR(sctps_recvpackets);
4708 SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
4710 #ifdef SCTP_MBUF_LOGGING
4711 /* Log in any input mbufs */
4714 if (SCTP_BUF_IS_EXTENDED(mat)) {
4715 sctp_log_mb(mat, SCTP_MBUF_INPUT);
4717 mat = SCTP_BUF_NEXT(mat);
4722 * Get IP, SCTP, and first chunk header together in first mbuf.
4724 ip = mtod(m, struct ip *);
4725 offset = iphlen + sizeof(*sh) + sizeof(*ch);
4726 if (SCTP_BUF_LEN(m) < offset) {
4727 if ((m = m_pullup(m, offset)) == 0) {
4728 SCTP_STAT_INCR(sctps_hdrops);
4731 ip = mtod(m, struct ip *);
4733 sh = (struct sctphdr *)((caddr_t)ip + iphlen);
4734 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
4736 /* SCTP does not allow broadcasts or multicasts */
4737 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
4740 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
4742 * We only look at broadcast if its a front state, All
4743 * others we will not have a tcb for anyway.
4747 /* validate SCTP checksum */
4748 if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) {
4750 * we do NOT validate things from the loopback if the sysctl
4753 check = sh->checksum; /* save incoming checksum */
4754 if ((check == 0) && (sctp_no_csum_on_loopback)) {
4756 * special hook for where we got a local address
4757 * somehow routed across a non IFT_LOOP type
4760 if (ip->ip_src.s_addr == ip->ip_dst.s_addr)
4761 goto sctp_skip_csum_4;
4763 sh->checksum = 0; /* prepare for calc */
4764 calc_check = sctp_calculate_sum(m, &mlen, iphlen);
4765 if (calc_check != check) {
4766 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
4767 calc_check, check, m, mlen, iphlen);
4769 stcb = sctp_findassociation_addr(m, iphlen,
4770 offset - sizeof(*ch),
4773 if ((inp) && (stcb)) {
4774 sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
4775 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR);
4776 } else if ((inp != NULL) && (stcb == NULL)) {
4779 SCTP_STAT_INCR(sctps_badsum);
4780 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
4783 sh->checksum = calc_check;
4786 /* destination port of 0 is illegal, based on RFC2960. */
4787 if (sh->dest_port == 0) {
4788 SCTP_STAT_INCR(sctps_hdrops);
4791 /* validate mbuf chain length with IP payload length */
4792 if (mlen < (ip->ip_len - iphlen)) {
4793 SCTP_STAT_INCR(sctps_hdrops);
4797 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
4798 * IP/SCTP/first chunk header...
4800 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
4801 sh, ch, &inp, &net, vrf_id);
4802 /* inp's ref-count increased && stcb locked */
4804 struct sctp_init_chunk *init_chk, chunk_buf;
4806 SCTP_STAT_INCR(sctps_noport);
4809 * we use the bandwidth limiting to protect against sending
4810 * too many ABORTS all at once. In this case these count the
4811 * same as an ICMP message.
4813 if (badport_bandlim(0) < 0)
4815 #endif /* ICMP_BANDLIM */
4816 SCTPDBG(SCTP_DEBUG_INPUT1,
4817 "Sending a ABORT from packet entry!\n");
4818 if (ch->chunk_type == SCTP_INITIATION) {
4820 * we do a trick here to get the INIT tag, dig in
4821 * and get the tag from the INIT and put it in the
4824 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4825 iphlen + sizeof(*sh), sizeof(*init_chk),
4826 (uint8_t *) & chunk_buf);
4827 if (init_chk != NULL)
4828 sh->v_tag = init_chk->init.initiate_tag;
4830 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4831 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id,
4835 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
4838 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
4839 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id,
4842 } else if (stcb == NULL) {
4847 * I very much doubt any of the IPSEC stuff will work but I have no
4848 * idea, so I will leave it in place.
4851 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
4852 ipsecstat.in_polvio++;
4853 SCTP_STAT_INCR(sctps_hdrops);
4859 * common chunk processing
4861 length = ip->ip_len + iphlen;
4862 offset -= sizeof(struct sctp_chunkhdr);
4864 ecn_bits = ip->ip_tos;
4866 /* sa_ignore NO_NULL_CHK */
4867 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
4868 inp, stcb, net, ecn_bits, vrf_id,
4870 /* inp's ref-count reduced && stcb unlocked */
4874 if ((inp) && (refcount_up)) {
4875 /* reduce ref-count */
4876 SCTP_INP_WLOCK(inp);
4877 SCTP_INP_DECR_REF(inp);
4878 SCTP_INP_WUNLOCK(inp);
4883 SCTP_TCB_UNLOCK(stcb);
4885 if ((inp) && (refcount_up)) {
4886 /* reduce ref-count */
4887 SCTP_INP_WLOCK(inp);
4888 SCTP_INP_DECR_REF(inp);
4889 SCTP_INP_WUNLOCK(inp);