2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_auth.h>
45 #include <netinet/sctp_indata.h>
46 #include <netinet/sctp_asconf.h>
47 #include <netinet/sctp_bsd_addr.h>
52 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
54 struct sctp_nets *net;
57 * This now not only stops all cookie timers it also stops any INIT
58 * timers as well. This will make sure that the timers are stopped
59 * in all collision cases.
61 SCTP_TCB_LOCK_ASSERT(stcb);
62 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
63 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
64 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
67 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
68 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
69 sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
72 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
79 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
80 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
81 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
83 struct sctp_init *init;
87 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
91 /* First are we accepting? */
92 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) {
93 SCTPDBG(SCTP_DEBUG_INPUT2,
94 "sctp_handle_init: Abort, so_qlimit:%d\n",
95 inp->sctp_socket->so_qlimit);
97 * FIX ME ?? What about TCP model and we have a
100 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
103 *abort_no_unlock = 1;
106 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
108 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
109 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
112 *abort_no_unlock = 1;
115 /* validate parameters */
116 if (init->initiate_tag == 0) {
117 /* protocol error... send abort */
118 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
119 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
122 *abort_no_unlock = 1;
125 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
126 /* invalid parameter... send abort */
127 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
128 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
132 if (init->num_inbound_streams == 0) {
133 /* protocol error... send abort */
134 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
135 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
138 *abort_no_unlock = 1;
141 if (init->num_outbound_streams == 0) {
142 /* protocol error... send abort */
143 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
144 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err,
147 *abort_no_unlock = 1;
150 init_limit = offset + ntohs(cp->ch.chunk_length);
151 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
153 /* auth parameter(s) error... send abort */
154 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id);
156 *abort_no_unlock = 1;
159 /* send an INIT-ACK w/cookie */
160 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
161 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id);
165 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
168 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
169 struct sctp_nets *net)
171 struct sctp_init *init;
172 struct sctp_association *asoc;
173 struct sctp_nets *lnet;
178 /* save off parameters */
179 asoc->peer_vtag = ntohl(init->initiate_tag);
180 asoc->peers_rwnd = ntohl(init->a_rwnd);
181 if (TAILQ_FIRST(&asoc->nets)) {
182 /* update any ssthresh's that may have a default */
183 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
184 lnet->ssthresh = asoc->peers_rwnd;
186 if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
187 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
191 SCTP_TCB_SEND_LOCK(stcb);
192 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
194 struct sctp_stream_out *outs;
195 struct sctp_stream_queue_pending *sp;
197 /* cut back on number of streams */
198 newcnt = ntohs(init->num_inbound_streams);
199 /* This if is probably not needed but I am cautious */
201 /* First make sure no data chunks are trapped */
202 for (i = newcnt; i < asoc->pre_open_streams; i++) {
203 outs = &asoc->strmout[i];
204 sp = TAILQ_FIRST(&outs->outqueue);
206 TAILQ_REMOVE(&outs->outqueue, sp,
208 asoc->stream_queue_cnt--;
209 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
210 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT,
213 sctp_m_freem(sp->data);
216 sctp_free_remote_addr(sp->net);
219 SCTP_PRINTF("sp:%p tcb:%p weird free case\n",
222 sctp_free_a_strmoq(stcb, sp);
223 /* sa_ignore FREED_MEMORY */
224 sp = TAILQ_FIRST(&outs->outqueue);
228 /* cut back the count and abandon the upper streams */
229 asoc->pre_open_streams = newcnt;
231 SCTP_TCB_SEND_UNLOCK(stcb);
232 asoc->streamoutcnt = asoc->pre_open_streams;
234 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
235 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) {
236 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
238 /* This is the next one we expect */
239 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
241 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
242 asoc->cumulative_tsn = asoc->asconf_seq_in;
243 asoc->last_echo_tsn = asoc->asconf_seq_in;
244 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
245 /* open the requested streams */
247 if (asoc->strmin != NULL) {
248 /* Free the old ones */
249 struct sctp_queued_to_read *ctl;
251 for (i = 0; i < asoc->streamincnt; i++) {
252 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
254 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next);
255 sctp_free_remote_addr(ctl->whoFrom);
256 sctp_m_freem(ctl->data);
258 sctp_free_a_readq(stcb, ctl);
259 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
262 SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
264 asoc->streamincnt = ntohs(init->num_outbound_streams);
265 if (asoc->streamincnt > MAX_SCTP_STREAMS) {
266 asoc->streamincnt = MAX_SCTP_STREAMS;
268 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
269 sizeof(struct sctp_stream_in), SCTP_M_STRMI);
270 if (asoc->strmin == NULL) {
271 /* we didn't get memory for the streams! */
272 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
275 for (i = 0; i < asoc->streamincnt; i++) {
276 asoc->strmin[i].stream_no = i;
277 asoc->strmin[i].last_sequence_delivered = 0xffff;
279 * U-stream ranges will be set when the cookie is unpacked.
280 * Or for the INIT sender they are un set (if pr-sctp not
281 * supported) when the INIT-ACK arrives.
283 TAILQ_INIT(&asoc->strmin[i].inqueue);
284 asoc->strmin[i].delivery_started = 0;
287 * load_address_from_init will put the addresses into the
288 * association when the COOKIE is processed or the INIT-ACK is
289 * processed. Both types of COOKIE's existing and new call this
290 * routine. It will remove addresses that are no longer in the
291 * association (for the restarting case where addresses are
292 * removed). Up front when the INIT arrives we will discard it if it
293 * is a restart and new addresses have been added.
295 /* sa_ignore MEMLEAK */
300 * INIT-ACK message processing/consumption returns value < 0 on error
303 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
304 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
305 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
307 struct sctp_association *asoc;
309 int retval, abort_flag;
310 uint32_t initack_limit;
312 /* First verify that we have no illegal param's */
316 op_err = sctp_arethere_unrecognized_parameters(m,
317 (offset + sizeof(struct sctp_init_chunk)),
318 &abort_flag, (struct sctp_chunkhdr *)cp);
320 /* Send an abort and notify peer */
321 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err);
322 *abort_no_unlock = 1;
326 /* process the peer's parameters in the INIT-ACK */
327 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net);
331 initack_limit = offset + ntohs(cp->ch.chunk_length);
332 /* load all addresses */
333 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen,
334 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh,
336 /* Huh, we should abort */
337 SCTPDBG(SCTP_DEBUG_INPUT1,
338 "Load addresses from INIT causes an abort %d\n",
340 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
342 *abort_no_unlock = 1;
345 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
346 stcb->asoc.local_hmacs);
348 sctp_queue_op_err(stcb, op_err);
349 /* queuing will steal away the mbuf chain to the out queue */
352 /* extract the cookie and queue it to "echo" it back... */
353 stcb->asoc.overall_error_count = 0;
354 net->error_count = 0;
357 * Cancel the INIT timer, We do this first before queueing the
358 * cookie. We always cancel at the primary to assue that we are
359 * canceling the timer started by the INIT which always goes to the
362 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
363 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
365 /* calculate the RTO */
366 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered);
368 retval = sctp_send_cookie_echo(m, offset, stcb, net);
371 * No cookie, we probably should send a op error. But in any
372 * case if there is no cookie in the INIT-ACK, we can
373 * abandon the peer, its broke.
376 /* We abort with an error of missing mandatory param */
378 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM);
381 * Expand beyond to include the mandatory
384 struct sctp_inv_mandatory_param *mp;
386 SCTP_BUF_LEN(op_err) =
387 sizeof(struct sctp_inv_mandatory_param);
389 struct sctp_inv_mandatory_param *);
390 /* Subtract the reserved param */
392 htons(sizeof(struct sctp_inv_mandatory_param) - 2);
393 mp->num_param = htonl(1);
394 mp->param = htons(SCTP_STATE_COOKIE);
397 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
399 *abort_no_unlock = 1;
407 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
408 struct sctp_tcb *stcb, struct sctp_nets *net)
410 struct sockaddr_storage store;
411 struct sockaddr_in *sin;
412 struct sockaddr_in6 *sin6;
413 struct sctp_nets *r_net;
416 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
420 sin = (struct sockaddr_in *)&store;
421 sin6 = (struct sockaddr_in6 *)&store;
423 memset(&store, 0, sizeof(store));
424 if (cp->heartbeat.hb_info.addr_family == AF_INET &&
425 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
426 sin->sin_family = cp->heartbeat.hb_info.addr_family;
427 sin->sin_len = cp->heartbeat.hb_info.addr_len;
428 sin->sin_port = stcb->rport;
429 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address,
430 sizeof(sin->sin_addr));
431 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 &&
432 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
433 sin6->sin6_family = cp->heartbeat.hb_info.addr_family;
434 sin6->sin6_len = cp->heartbeat.hb_info.addr_len;
435 sin6->sin6_port = stcb->rport;
436 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address,
437 sizeof(sin6->sin6_addr));
441 r_net = sctp_findnet(stcb, (struct sockaddr *)sin);
443 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
446 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
447 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
448 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
450 * If the its a HB and it's random value is correct when can
451 * confirm the destination.
453 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
454 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
455 stcb->asoc.primary_destination = r_net;
456 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
457 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
458 r_net = TAILQ_FIRST(&stcb->asoc.nets);
459 if (r_net != stcb->asoc.primary_destination) {
461 * first one on the list is NOT the primary
462 * sctp_cmpaddr() is much more efficent if
463 * the primary is the first on the list,
466 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
467 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
470 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
471 stcb, 0, (void *)r_net);
473 r_net->error_count = 0;
474 r_net->hb_responded = 1;
475 tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
476 tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
477 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
478 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
479 r_net->dest_state |= SCTP_ADDR_REACHABLE;
480 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
481 SCTP_HEARTBEAT_SUCCESS, (void *)r_net);
482 /* now was it the primary? if so restore */
483 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
484 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net);
487 /* Now lets do a RTO with this */
488 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv);
492 sctp_handle_abort(struct sctp_abort_chunk *cp,
493 struct sctp_tcb *stcb, struct sctp_nets *net)
495 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
499 /* stop any receive timers */
500 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
501 /* notify user of the abort and clean up... */
502 sctp_abort_notification(stcb, 0);
504 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
505 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
506 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
507 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
509 #ifdef SCTP_ASOCLOG_OF_TSNS
510 sctp_print_out_track_log(stcb);
512 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
513 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
517 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
518 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
520 struct sctp_association *asoc;
521 int some_on_streamwheel;
523 SCTPDBG(SCTP_DEBUG_INPUT2,
524 "sctp_handle_shutdown: handling SHUTDOWN\n");
528 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
529 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
532 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
533 /* Shutdown NOT the expected size */
536 sctp_update_acked(stcb, cp, net, abort_flag);
538 if (asoc->control_pdapi) {
540 * With a normal shutdown we assume the end of last record.
542 SCTP_INP_READ_LOCK(stcb->sctp_ep);
543 asoc->control_pdapi->end_added = 1;
544 asoc->control_pdapi->pdapi_aborted = 1;
545 asoc->control_pdapi = NULL;
546 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
547 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
549 /* goto SHUTDOWN_RECEIVED state to block new requests */
550 if (stcb->sctp_socket) {
551 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
552 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
553 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
554 asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED;
556 * notify upper layer that peer has initiated a
559 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL);
562 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
565 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
567 * stop the shutdown timer, since we WILL move to
570 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
572 /* Now are we there yet? */
573 some_on_streamwheel = 0;
574 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
575 /* Check to see if some data queued */
576 struct sctp_stream_out *outs;
578 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
579 if (!TAILQ_EMPTY(&outs->outqueue)) {
580 some_on_streamwheel = 1;
585 if (!TAILQ_EMPTY(&asoc->send_queue) ||
586 !TAILQ_EMPTY(&asoc->sent_queue) ||
587 some_on_streamwheel) {
588 /* By returning we will push more data out */
591 /* no outstanding data to send, so move on... */
592 /* send SHUTDOWN-ACK */
593 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
594 /* move to SHUTDOWN-ACK-SENT state */
595 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
596 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
597 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
599 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
601 /* start SHUTDOWN timer */
602 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep,
608 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp,
609 struct sctp_tcb *stcb, struct sctp_nets *net)
611 struct sctp_association *asoc;
613 SCTPDBG(SCTP_DEBUG_INPUT2,
614 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
619 /* process according to association state */
620 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
621 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
622 /* unexpected SHUTDOWN-ACK... so ignore... */
623 SCTP_TCB_UNLOCK(stcb);
626 if (asoc->control_pdapi) {
628 * With a normal shutdown we assume the end of last record.
630 SCTP_INP_READ_LOCK(stcb->sctp_ep);
631 asoc->control_pdapi->end_added = 1;
632 asoc->control_pdapi->pdapi_aborted = 1;
633 asoc->control_pdapi = NULL;
634 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
635 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
637 /* are the queues empty? */
638 if (!TAILQ_EMPTY(&asoc->send_queue) ||
639 !TAILQ_EMPTY(&asoc->sent_queue) ||
640 !TAILQ_EMPTY(&asoc->out_wheel)) {
641 sctp_report_all_outbound(stcb, 0);
644 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
645 /* send SHUTDOWN-COMPLETE */
646 sctp_send_shutdown_complete(stcb, net);
647 /* notify upper layer protocol */
648 if (stcb->sctp_socket) {
649 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
650 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
651 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
652 /* Set the connected flag to disconnected */
653 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
656 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
657 /* free the TCB but first save off the ep */
658 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
659 SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
663 * Skip past the param header and then we will find the chunk that caused the
664 * problem. There are two possiblities ASCONF or FWD-TSN other than that and
665 * our peer must be broken.
668 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
669 struct sctp_nets *net)
671 struct sctp_chunkhdr *chk;
673 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
674 switch (chk->chunk_type) {
675 case SCTP_ASCONF_ACK:
677 sctp_asconf_cleanup(stcb, net);
679 case SCTP_FORWARD_CUM_TSN:
680 stcb->asoc.peer_supports_prsctp = 0;
683 SCTPDBG(SCTP_DEBUG_INPUT2,
684 "Peer does not support chunk type %d(%x)??\n",
685 chk->chunk_type, (uint32_t) chk->chunk_type);
691 * Skip past the param header and then we will find the param that caused the
692 * problem. There are a number of param's in a ASCONF OR the prsctp param
693 * these will turn of specific features.
696 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
698 struct sctp_paramhdr *pbad;
701 switch (ntohs(pbad->param_type)) {
703 case SCTP_PRSCTP_SUPPORTED:
704 stcb->asoc.peer_supports_prsctp = 0;
706 case SCTP_SUPPORTED_CHUNK_EXT:
708 /* draft-ietf-tsvwg-addip-sctp */
709 case SCTP_ECN_NONCE_SUPPORTED:
710 stcb->asoc.peer_supports_ecn_nonce = 0;
711 stcb->asoc.ecn_nonce_allowed = 0;
712 stcb->asoc.ecn_allowed = 0;
714 case SCTP_ADD_IP_ADDRESS:
715 case SCTP_DEL_IP_ADDRESS:
716 case SCTP_SET_PRIM_ADDR:
717 stcb->asoc.peer_supports_asconf = 0;
719 case SCTP_SUCCESS_REPORT:
720 case SCTP_ERROR_CAUSE_IND:
721 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
722 SCTPDBG(SCTP_DEBUG_INPUT2,
723 "Turning off ASCONF to this strange peer\n");
724 stcb->asoc.peer_supports_asconf = 0;
727 SCTPDBG(SCTP_DEBUG_INPUT2,
728 "Peer does not support param type %d(%x)??\n",
729 pbad->param_type, (uint32_t) pbad->param_type);
735 sctp_handle_error(struct sctp_chunkhdr *ch,
736 struct sctp_tcb *stcb, struct sctp_nets *net)
739 struct sctp_paramhdr *phdr;
742 struct sctp_association *asoc;
746 /* parse through all of the errors and process */
748 phdr = (struct sctp_paramhdr *)((caddr_t)ch +
749 sizeof(struct sctp_chunkhdr));
750 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
751 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
752 /* Process an Error Cause */
753 error_type = ntohs(phdr->param_type);
754 error_len = ntohs(phdr->param_length);
755 if ((error_len > chklen) || (error_len == 0)) {
756 /* invalid param length for this param */
757 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
761 switch (error_type) {
762 case SCTP_CAUSE_INVALID_STREAM:
763 case SCTP_CAUSE_MISSING_PARAM:
764 case SCTP_CAUSE_INVALID_PARAM:
765 case SCTP_CAUSE_NO_USER_DATA:
766 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
769 case SCTP_CAUSE_STALE_COOKIE:
771 * We only act if we have echoed a cookie and are
774 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
777 p = (int *)((caddr_t)phdr + sizeof(*phdr));
778 /* Save the time doubled */
779 asoc->cookie_preserve_req = ntohl(*p) << 1;
780 asoc->stale_cookie_count++;
781 if (asoc->stale_cookie_count >
782 asoc->max_init_times) {
783 sctp_abort_notification(stcb, 0);
784 /* now free the asoc */
785 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
788 /* blast back to INIT state */
789 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
790 asoc->state |= SCTP_STATE_COOKIE_WAIT;
792 sctp_stop_all_cookie_timers(stcb);
793 sctp_send_initiate(stcb->sctp_ep, stcb);
796 case SCTP_CAUSE_UNRESOLVABLE_ADDR:
798 * Nothing we can do here, we don't do hostname
799 * addresses so if the peer does not like my IPv6
800 * (or IPv4 for that matter) it does not matter. If
801 * they don't support that type of address, they can
802 * NOT possibly get that packet type... i.e. with no
803 * IPv6 you can't recieve a IPv6 packet. so we can
804 * safely ignore this one. If we ever added support
805 * for HOSTNAME Addresses, then we would need to do
809 case SCTP_CAUSE_UNRECOG_CHUNK:
810 sctp_process_unrecog_chunk(stcb, phdr, net);
812 case SCTP_CAUSE_UNRECOG_PARAM:
813 sctp_process_unrecog_param(stcb, phdr);
815 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
817 * We ignore this since the timer will drive out a
818 * new cookie anyway and there timer will drive us
819 * to send a SHUTDOWN_COMPLETE. We can't send one
820 * here since we don't have their tag.
823 case SCTP_CAUSE_DELETING_LAST_ADDR:
824 case SCTP_CAUSE_RESOURCE_SHORTAGE:
825 case SCTP_CAUSE_DELETING_SRC_ADDR:
827 * We should NOT get these here, but in a
830 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
833 case SCTP_CAUSE_OUT_OF_RESC:
835 * And what, pray tell do we do with the fact that
836 * the peer is out of resources? Not really sure we
837 * could do anything but abort. I suspect this
838 * should have came WITH an abort instead of in a
843 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
847 adjust = SCTP_SIZE32(error_len);
849 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
855 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
856 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
857 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id)
859 struct sctp_init_ack *init_ack;
863 SCTPDBG(SCTP_DEBUG_INPUT2,
864 "sctp_handle_init_ack: handling INIT-ACK\n");
867 SCTPDBG(SCTP_DEBUG_INPUT2,
868 "sctp_handle_init_ack: TCB is null\n");
871 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
873 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
874 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
876 *abort_no_unlock = 1;
879 init_ack = &cp->init;
880 /* validate parameters */
881 if (init_ack->initiate_tag == 0) {
882 /* protocol error... send an abort */
883 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
884 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
886 *abort_no_unlock = 1;
889 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
890 /* protocol error... send an abort */
891 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
892 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
894 *abort_no_unlock = 1;
897 if (init_ack->num_inbound_streams == 0) {
898 /* protocol error... send an abort */
899 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
900 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
902 *abort_no_unlock = 1;
905 if (init_ack->num_outbound_streams == 0) {
906 /* protocol error... send an abort */
907 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM);
908 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh,
910 *abort_no_unlock = 1;
913 /* process according to association state... */
914 state = &stcb->asoc.state;
915 switch (*state & SCTP_STATE_MASK) {
916 case SCTP_STATE_COOKIE_WAIT:
917 /* this is the expected state for this chunk */
918 /* process the INIT-ACK parameters */
919 if (stcb->asoc.primary_destination->dest_state &
920 SCTP_ADDR_UNCONFIRMED) {
922 * The primary is where we sent the INIT, we can
923 * always consider it confirmed when the INIT-ACK is
924 * returned. Do this before we load addresses
927 stcb->asoc.primary_destination->dest_state &=
928 ~SCTP_ADDR_UNCONFIRMED;
929 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
930 stcb, 0, (void *)stcb->asoc.primary_destination);
932 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb,
933 net, abort_no_unlock, vrf_id) < 0) {
934 /* error in parsing parameters */
937 /* update our state */
938 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
939 if (*state & SCTP_STATE_SHUTDOWN_PENDING) {
940 *state = SCTP_STATE_COOKIE_ECHOED |
941 SCTP_STATE_SHUTDOWN_PENDING;
943 *state = SCTP_STATE_COOKIE_ECHOED;
946 /* reset the RTO calc */
947 stcb->asoc.overall_error_count = 0;
948 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
950 * collapse the init timer back in case of a exponential
953 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
956 * the send at the end of the inbound data processing will
957 * cause the cookie to be sent
960 case SCTP_STATE_SHUTDOWN_SENT:
961 /* incorrect state... discard */
963 case SCTP_STATE_COOKIE_ECHOED:
964 /* incorrect state... discard */
966 case SCTP_STATE_OPEN:
967 /* incorrect state... discard */
969 case SCTP_STATE_EMPTY:
970 case SCTP_STATE_INUSE:
972 /* incorrect state... discard */
976 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
982 * handle a state cookie for an existing association m: input packet mbuf
983 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
984 * "split" mbuf and the cookie signature does not exist offset: offset into
985 * mbuf to the cookie-echo chunk
987 static struct sctp_tcb *
988 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
989 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
990 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
991 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id,
994 struct sctp_association *asoc;
995 struct sctp_init_chunk *init_cp, init_buf;
996 struct sctp_init_ack_chunk *initack_cp, initack_buf;
998 int init_offset, initack_offset, i;
1003 /* I know that the TCB is non-NULL from the caller */
1005 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) {
1006 if (asoc->cookie_how[how_indx] == 0)
1009 if (how_indx < sizeof(asoc->cookie_how)) {
1010 asoc->cookie_how[how_indx] = 1;
1012 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
1013 /* SHUTDOWN came in after sending INIT-ACK */
1014 struct mbuf *op_err;
1015 struct sctp_paramhdr *ph;
1017 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
1018 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
1019 0, M_DONTWAIT, 1, MT_DATA);
1020 if (op_err == NULL) {
1024 /* pre-reserve some space */
1025 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1026 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1027 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1029 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr);
1030 ph = mtod(op_err, struct sctp_paramhdr *);
1031 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN);
1032 ph->param_length = htons(sizeof(struct sctp_paramhdr));
1033 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1035 if (how_indx < sizeof(asoc->cookie_how))
1036 asoc->cookie_how[how_indx] = 2;
1040 * find and validate the INIT chunk in the cookie (peer's info) the
1041 * INIT should start after the cookie-echo header struct (chunk
1042 * header, state cookie header struct)
1044 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
1046 init_cp = (struct sctp_init_chunk *)
1047 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1048 (uint8_t *) & init_buf);
1049 if (init_cp == NULL) {
1050 /* could not pull a INIT chunk in cookie */
1053 chk_length = ntohs(init_cp->ch.chunk_length);
1054 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1058 * find and validate the INIT-ACK chunk in the cookie (my info) the
1059 * INIT-ACK follows the INIT chunk
1061 initack_offset = init_offset + SCTP_SIZE32(chk_length);
1062 initack_cp = (struct sctp_init_ack_chunk *)
1063 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1064 (uint8_t *) & initack_buf);
1065 if (initack_cp == NULL) {
1066 /* could not pull INIT-ACK chunk in cookie */
1069 chk_length = ntohs(initack_cp->ch.chunk_length);
1070 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1073 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
1074 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
1076 * case D in Section 5.2.4 Table 2: MMAA process accordingly
1077 * to get into the OPEN state
1079 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1081 panic("Case D and non-match seq?");
1083 SCTP_PRINTF("Case D, seq non-match %x vs %x?\n",
1084 ntohl(initack_cp->init.initial_tsn),
1085 asoc->init_seq_number);
1088 switch SCTP_GET_STATE
1090 case SCTP_STATE_COOKIE_WAIT:
1091 case SCTP_STATE_COOKIE_ECHOED:
1093 * INIT was sent but got a COOKIE_ECHO with the
1094 * correct tags... just accept it...but we must
1095 * process the init so that we can make sure we have
1096 * the right seq no's.
1098 /* First we must process the INIT !! */
1099 retval = sctp_process_init(init_cp, stcb, net);
1101 if (how_indx < sizeof(asoc->cookie_how))
1102 asoc->cookie_how[how_indx] = 3;
1105 /* we have already processed the INIT so no problem */
1106 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb,
1107 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
1108 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
1109 /* update current state */
1110 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1111 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1113 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1114 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1115 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1116 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1117 stcb->sctp_ep, stcb, asoc->primary_destination);
1120 /* if ok, move to OPEN state */
1121 asoc->state = SCTP_STATE_OPEN;
1123 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1124 sctp_stop_all_cookie_timers(stcb);
1125 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1126 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1127 (inp->sctp_socket->so_qlimit == 0)
1130 * Here is where collision would go if we
1131 * did a connect() and instead got a
1132 * init/init-ack/cookie done before the
1133 * init-ack came back..
1135 stcb->sctp_ep->sctp_flags |=
1136 SCTP_PCB_FLAGS_CONNECTED;
1137 soisconnected(stcb->sctp_ep->sctp_socket);
1139 /* notify upper layer */
1140 *notification = SCTP_NOTIFY_ASSOC_UP;
1142 * since we did not send a HB make sure we don't
1145 net->hb_responded = 1;
1146 net->RTO = sctp_calculate_rto(stcb, asoc, net,
1147 &cookie->time_entered);
1149 if (stcb->asoc.sctp_autoclose_ticks &&
1150 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
1151 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
1157 * we're in the OPEN state (or beyond), so peer must
1158 * have simply lost the COOKIE-ACK
1162 sctp_stop_all_cookie_timers(stcb);
1164 * We ignore the return code here.. not sure if we should
1165 * somehow abort.. but we do have an existing asoc. This
1166 * really should not fail.
1168 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1169 init_offset + sizeof(struct sctp_init_chunk),
1170 initack_offset, sh, init_src)) {
1171 if (how_indx < sizeof(asoc->cookie_how))
1172 asoc->cookie_how[how_indx] = 4;
1175 /* respond with a COOKIE-ACK */
1176 sctp_toss_old_cookies(stcb, asoc);
1177 sctp_send_cookie_ack(stcb);
1178 if (how_indx < sizeof(asoc->cookie_how))
1179 asoc->cookie_how[how_indx] = 5;
1182 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1183 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
1184 cookie->tie_tag_my_vtag == 0 &&
1185 cookie->tie_tag_peer_vtag == 0) {
1187 * case C in Section 5.2.4 Table 2: XMOO silently discard
1189 if (how_indx < sizeof(asoc->cookie_how))
1190 asoc->cookie_how[how_indx] = 6;
1193 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag &&
1194 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag ||
1195 init_cp->init.initiate_tag == 0)) {
1197 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
1198 * should be ok, re-accept peer info
1200 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
1202 * Extension of case C. If we hit this, then the
1203 * random number generator returned the same vtag
1204 * when we first sent our INIT-ACK and when we later
1205 * sent our INIT. The side with the seq numbers that
1206 * are different will be the one that normnally
1207 * would have hit case C. This in effect "extends"
1208 * our vtags in this collision case to be 64 bits.
1209 * The same collision could occur aka you get both
1210 * vtag and seq number the same twice in a row.. but
1211 * is much less likely. If it did happen then we
1212 * would proceed through and bring up the assoc.. we
1213 * may end up with the wrong stream setup however..
1214 * which would be bad.. but there is no way to
1215 * tell.. until we send on a stream that does not
1218 if (how_indx < sizeof(asoc->cookie_how))
1219 asoc->cookie_how[how_indx] = 7;
1223 if (how_indx < sizeof(asoc->cookie_how))
1224 asoc->cookie_how[how_indx] = 8;
1225 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
1226 sctp_stop_all_cookie_timers(stcb);
1228 * since we did not send a HB make sure we don't double
1231 net->hb_responded = 1;
1232 if (stcb->asoc.sctp_autoclose_ticks &&
1233 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1234 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1237 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1238 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1240 /* Note last_cwr_tsn? where is this used? */
1241 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1242 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
1244 * Ok the peer probably discarded our data (if we
1245 * echoed a cookie+data). So anything on the
1246 * sent_queue should be marked for retransmit, we
1247 * may not get something to kick us so it COULD
1248 * still take a timeout to move these.. but it can't
1249 * hurt to mark them.
1251 struct sctp_tmit_chunk *chk;
1253 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
1254 if (chk->sent < SCTP_DATAGRAM_RESEND) {
1255 chk->sent = SCTP_DATAGRAM_RESEND;
1256 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1262 /* process the INIT info (peer's info) */
1263 retval = sctp_process_init(init_cp, stcb, net);
1265 if (how_indx < sizeof(asoc->cookie_how))
1266 asoc->cookie_how[how_indx] = 9;
1269 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1270 init_offset + sizeof(struct sctp_init_chunk),
1271 initack_offset, sh, init_src)) {
1272 if (how_indx < sizeof(asoc->cookie_how))
1273 asoc->cookie_how[how_indx] = 10;
1276 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
1277 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
1278 *notification = SCTP_NOTIFY_ASSOC_UP;
1280 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1281 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1282 (inp->sctp_socket->so_qlimit == 0)) {
1283 stcb->sctp_ep->sctp_flags |=
1284 SCTP_PCB_FLAGS_CONNECTED;
1285 soisconnected(stcb->sctp_ep->sctp_socket);
1287 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
1288 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1290 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1291 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
1292 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1293 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1294 SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
1296 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
1298 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1299 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1300 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1301 stcb->sctp_ep, stcb, asoc->primary_destination);
1304 asoc->state = SCTP_STATE_OPEN;
1306 sctp_stop_all_cookie_timers(stcb);
1307 sctp_toss_old_cookies(stcb, asoc);
1308 sctp_send_cookie_ack(stcb);
1311 * only if we have retrans set do we do this. What
1312 * this call does is get only the COOKIE-ACK out and
1313 * then when we return the normal call to
1314 * sctp_chunk_output will get the retrans out behind
1317 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK);
1319 if (how_indx < sizeof(asoc->cookie_how))
1320 asoc->cookie_how[how_indx] = 11;
1324 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
1325 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
1326 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
1327 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
1328 cookie->tie_tag_peer_vtag != 0) {
1329 struct sctpasochead *head;
1332 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
1335 if (how_indx < sizeof(asoc->cookie_how))
1336 asoc->cookie_how[how_indx] = 12;
1337 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
1338 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
1340 *sac_assoc_id = sctp_get_associd(stcb);
1341 /* notify upper layer */
1342 *notification = SCTP_NOTIFY_ASSOC_RESTART;
1343 atomic_add_int(&stcb->asoc.refcnt, 1);
1344 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
1345 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
1346 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
1347 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1349 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
1350 SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
1351 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1352 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
1354 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1355 asoc->state = SCTP_STATE_OPEN |
1356 SCTP_STATE_SHUTDOWN_PENDING;
1357 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1358 stcb->sctp_ep, stcb, asoc->primary_destination);
1360 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
1361 /* move to OPEN state, if not in SHUTDOWN_SENT */
1362 asoc->state = SCTP_STATE_OPEN;
1364 asoc->pre_open_streams =
1365 ntohs(initack_cp->init.num_outbound_streams);
1366 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1367 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1369 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1370 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1372 asoc->str_reset_seq_in = asoc->init_seq_number;
1374 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1375 if (asoc->mapping_array) {
1376 memset(asoc->mapping_array, 0,
1377 asoc->mapping_array_size);
1379 SCTP_TCB_UNLOCK(stcb);
1380 SCTP_INP_INFO_WLOCK();
1381 SCTP_INP_WLOCK(stcb->sctp_ep);
1382 SCTP_TCB_LOCK(stcb);
1383 atomic_add_int(&stcb->asoc.refcnt, -1);
1384 /* send up all the data */
1385 SCTP_TCB_SEND_LOCK(stcb);
1387 sctp_report_all_outbound(stcb, 1);
1388 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1389 stcb->asoc.strmout[i].stream_no = i;
1390 stcb->asoc.strmout[i].next_sequence_sent = 0;
1391 stcb->asoc.strmout[i].last_msg_incomplete = 0;
1393 /* process the INIT-ACK info (my info) */
1394 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1395 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1397 /* pull from vtag hash */
1398 LIST_REMOVE(stcb, sctp_asocs);
1399 /* re-insert to new vtag position */
1400 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
1401 sctppcbinfo.hashasocmark)];
1403 * put it in the bucket in the vtag hash of assoc's for the
1406 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
1408 /* Is this the first restart? */
1409 if (stcb->asoc.in_restart_hash == 0) {
1410 /* Ok add it to assoc_id vtag hash */
1411 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
1412 sctppcbinfo.hashrestartmark)];
1413 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash);
1414 stcb->asoc.in_restart_hash = 1;
1416 /* process the INIT info (peer's info) */
1417 SCTP_TCB_SEND_UNLOCK(stcb);
1418 SCTP_INP_WUNLOCK(stcb->sctp_ep);
1419 SCTP_INP_INFO_WUNLOCK();
1421 retval = sctp_process_init(init_cp, stcb, net);
1423 if (how_indx < sizeof(asoc->cookie_how))
1424 asoc->cookie_how[how_indx] = 13;
1429 * since we did not send a HB make sure we don't double
1432 net->hb_responded = 1;
1434 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1435 init_offset + sizeof(struct sctp_init_chunk),
1436 initack_offset, sh, init_src)) {
1437 if (how_indx < sizeof(asoc->cookie_how))
1438 asoc->cookie_how[how_indx] = 14;
1442 /* respond with a COOKIE-ACK */
1443 sctp_stop_all_cookie_timers(stcb);
1444 sctp_toss_old_cookies(stcb, asoc);
1445 sctp_send_cookie_ack(stcb);
1446 if (how_indx < sizeof(asoc->cookie_how))
1447 asoc->cookie_how[how_indx] = 15;
1451 if (how_indx < sizeof(asoc->cookie_how))
1452 asoc->cookie_how[how_indx] = 16;
1453 /* all other cases... */
1459 * handle a state cookie for a new association m: input packet mbuf chain--
1460 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
1461 * and the cookie signature does not exist offset: offset into mbuf to the
1462 * cookie-echo chunk length: length of the cookie chunk to: where the init
1463 * was from returns a new TCB
1465 static struct sctp_tcb *
1466 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
1467 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
1468 struct sctp_inpcb *inp, struct sctp_nets **netp,
1469 struct sockaddr *init_src, int *notification,
1470 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1473 struct sctp_tcb *stcb;
1474 struct sctp_init_chunk *init_cp, init_buf;
1475 struct sctp_init_ack_chunk *initack_cp, initack_buf;
1476 struct sockaddr_storage sa_store;
1477 struct sockaddr *initack_src = (struct sockaddr *)&sa_store;
1478 struct sockaddr_in *sin;
1479 struct sockaddr_in6 *sin6;
1480 struct sctp_association *asoc;
1482 int init_offset, initack_offset, initack_limit;
1486 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
1489 * find and validate the INIT chunk in the cookie (peer's info) the
1490 * INIT should start after the cookie-echo header struct (chunk
1491 * header, state cookie header struct)
1493 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
1494 init_cp = (struct sctp_init_chunk *)
1495 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
1496 (uint8_t *) & init_buf);
1497 if (init_cp == NULL) {
1498 /* could not pull a INIT chunk in cookie */
1499 SCTPDBG(SCTP_DEBUG_INPUT1,
1500 "process_cookie_new: could not pull INIT chunk hdr\n");
1503 chk_length = ntohs(init_cp->ch.chunk_length);
1504 if (init_cp->ch.chunk_type != SCTP_INITIATION) {
1505 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
1508 initack_offset = init_offset + SCTP_SIZE32(chk_length);
1510 * find and validate the INIT-ACK chunk in the cookie (my info) the
1511 * INIT-ACK follows the INIT chunk
1513 initack_cp = (struct sctp_init_ack_chunk *)
1514 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
1515 (uint8_t *) & initack_buf);
1516 if (initack_cp == NULL) {
1517 /* could not pull INIT-ACK chunk in cookie */
1518 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
1521 chk_length = ntohs(initack_cp->ch.chunk_length);
1522 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
1526 * NOTE: We can't use the INIT_ACK's chk_length to determine the
1527 * "initack_limit" value. This is because the chk_length field
1528 * includes the length of the cookie, but the cookie is omitted when
1529 * the INIT and INIT_ACK are tacked onto the cookie...
1531 initack_limit = offset + cookie_len;
1534 * now that we know the INIT/INIT-ACK are in place, create a new TCB
1537 stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
1538 ntohl(initack_cp->init.initiate_tag), vrf_id);
1540 struct mbuf *op_err;
1542 /* memory problem? */
1543 SCTPDBG(SCTP_DEBUG_INPUT1,
1544 "process_cookie_new: no room for another TCB!\n");
1545 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1547 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1548 sh, op_err, vrf_id);
1551 /* get the correct sctp_nets */
1553 *netp = sctp_findnet(stcb, init_src);
1556 /* get scope variables out of cookie */
1557 asoc->ipv4_local_scope = cookie->ipv4_scope;
1558 asoc->site_scope = cookie->site_scope;
1559 asoc->local_scope = cookie->local_scope;
1560 asoc->loopback_scope = cookie->loopback_scope;
1562 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) ||
1563 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) {
1564 struct mbuf *op_err;
1567 * Houston we have a problem. The EP changed while the
1568 * cookie was in flight. Only recourse is to abort the
1571 atomic_add_int(&stcb->asoc.refcnt, 1);
1572 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1573 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
1574 sh, op_err, vrf_id);
1575 atomic_add_int(&stcb->asoc.refcnt, -1);
1578 /* process the INIT-ACK info (my info) */
1579 old_tag = asoc->my_vtag;
1580 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
1581 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
1582 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
1583 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
1584 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
1585 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
1586 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
1587 asoc->str_reset_seq_in = asoc->init_seq_number;
1589 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1591 /* process the INIT info (peer's info) */
1593 retval = sctp_process_init(init_cp, stcb, *netp);
1597 atomic_add_int(&stcb->asoc.refcnt, 1);
1598 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
1599 atomic_add_int(&stcb->asoc.refcnt, -1);
1602 /* load all addresses */
1603 if (sctp_load_addresses_from_init(stcb, m, iphlen,
1604 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh,
1606 atomic_add_int(&stcb->asoc.refcnt, 1);
1607 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
1608 atomic_add_int(&stcb->asoc.refcnt, -1);
1612 * verify any preceding AUTH chunk that was skipped
1614 /* pull the local authentication parameters from the cookie/init-ack */
1615 sctp_auth_get_cookie_params(stcb, m,
1616 initack_offset + sizeof(struct sctp_init_ack_chunk),
1617 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
1619 struct sctp_auth_chunk *auth;
1621 auth = (struct sctp_auth_chunk *)
1622 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
1623 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
1624 /* auth HMAC failed, dump the assoc and packet */
1625 SCTPDBG(SCTP_DEBUG_AUTH1,
1626 "COOKIE-ECHO: AUTH failed\n");
1627 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
1630 /* remaining chunks checked... good to go */
1631 stcb->asoc.authenticated = 1;
1634 /* update current state */
1635 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
1636 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
1637 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
1638 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1639 stcb->sctp_ep, stcb, asoc->primary_destination);
1641 asoc->state = SCTP_STATE_OPEN;
1643 sctp_stop_all_cookie_timers(stcb);
1644 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
1645 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
1648 * if we're doing ASCONFs, check to see if we have any new local
1649 * addresses that need to get added to the peer (eg. addresses
1650 * changed while cookie echo in flight). This needs to be done
1651 * after we go to the OPEN state to do the correct asconf
1652 * processing. else, make sure we have the correct addresses in our
1656 /* warning, we re-use sin, sin6, sa_store here! */
1657 /* pull in local_address (our "from" address) */
1658 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) {
1659 /* source addr is IPv4 */
1660 sin = (struct sockaddr_in *)initack_src;
1661 memset(sin, 0, sizeof(*sin));
1662 sin->sin_family = AF_INET;
1663 sin->sin_len = sizeof(struct sockaddr_in);
1664 sin->sin_addr.s_addr = cookie->laddress[0];
1665 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) {
1666 /* source addr is IPv6 */
1667 sin6 = (struct sockaddr_in6 *)initack_src;
1668 memset(sin6, 0, sizeof(*sin6));
1669 sin6->sin6_family = AF_INET6;
1670 sin6->sin6_len = sizeof(struct sockaddr_in6);
1671 sin6->sin6_scope_id = cookie->scope_id;
1672 memcpy(&sin6->sin6_addr, cookie->laddress,
1673 sizeof(sin6->sin6_addr));
1675 atomic_add_int(&stcb->asoc.refcnt, 1);
1676 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
1677 atomic_add_int(&stcb->asoc.refcnt, -1);
1681 sctp_check_address_list(stcb, m,
1682 initack_offset + sizeof(struct sctp_init_ack_chunk),
1683 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
1684 initack_src, cookie->local_scope, cookie->site_scope,
1685 cookie->ipv4_scope, cookie->loopback_scope);
1688 /* set up to notify upper layer */
1689 *notification = SCTP_NOTIFY_ASSOC_UP;
1690 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
1691 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
1692 (inp->sctp_socket->so_qlimit == 0)) {
1694 * This is an endpoint that called connect() how it got a
1695 * cookie that is NEW is a bit of a mystery. It must be that
1696 * the INIT was sent, but before it got there.. a complete
1697 * INIT/INIT-ACK/COOKIE arrived. But of course then it
1698 * should have went to the other code.. not here.. oh well..
1699 * a bit of protection is worth having..
1701 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1702 soisconnected(stcb->sctp_ep->sctp_socket);
1703 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1704 (inp->sctp_socket->so_qlimit)) {
1706 * We don't want to do anything with this one. Since it is
1707 * the listening guy. The timer will get started for
1708 * accepted connections in the caller.
1712 /* since we did not send a HB make sure we don't double things */
1713 if ((netp) && (*netp))
1714 (*netp)->hb_responded = 1;
1716 if (stcb->asoc.sctp_autoclose_ticks &&
1717 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1718 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
1720 /* respond with a COOKIE-ACK */
1721 /* calculate the RTT */
1722 if ((netp) && (*netp)) {
1723 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
1724 &cookie->time_entered);
1726 sctp_send_cookie_ack(stcb);
1732 * handles a COOKIE-ECHO message stcb: modified to either a new or left as
1733 * existing (non-NULL) TCB
1735 static struct mbuf *
1736 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
1737 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
1738 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
1739 int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
1740 struct sctp_tcb **locked_tcb, uint32_t vrf_id)
1742 struct sctp_state_cookie *cookie;
1743 struct sockaddr_in6 sin6;
1744 struct sockaddr_in sin;
1745 struct sctp_tcb *l_stcb = *stcb;
1746 struct sctp_inpcb *l_inp;
1747 struct sockaddr *to;
1748 sctp_assoc_t sac_restart_id;
1749 struct sctp_pcb *ep;
1751 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
1753 uint8_t cookie_ok = 0;
1754 unsigned int size_of_pkt, sig_offset, cookie_offset;
1755 unsigned int cookie_len;
1757 struct timeval time_expires;
1758 struct sockaddr_storage dest_store;
1759 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store;
1761 int notification = 0;
1762 struct sctp_nets *netl;
1763 int had_a_existing_tcb = 0;
1765 SCTPDBG(SCTP_DEBUG_INPUT2,
1766 "sctp_handle_cookie: handling COOKIE-ECHO\n");
1768 if (inp_p == NULL) {
1771 /* First get the destination address setup too. */
1772 iph = mtod(m, struct ip *);
1773 if (iph->ip_v == IPVERSION) {
1775 struct sockaddr_in *lsin;
1777 lsin = (struct sockaddr_in *)(localep_sa);
1778 memset(lsin, 0, sizeof(*lsin));
1779 lsin->sin_family = AF_INET;
1780 lsin->sin_len = sizeof(*lsin);
1781 lsin->sin_port = sh->dest_port;
1782 lsin->sin_addr.s_addr = iph->ip_dst.s_addr;
1783 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph);
1784 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1786 struct ip6_hdr *ip6;
1787 struct sockaddr_in6 *lsin6;
1789 lsin6 = (struct sockaddr_in6 *)(localep_sa);
1790 memset(lsin6, 0, sizeof(*lsin6));
1791 lsin6->sin6_family = AF_INET6;
1792 lsin6->sin6_len = sizeof(struct sockaddr_in6);
1793 ip6 = mtod(m, struct ip6_hdr *);
1794 lsin6->sin6_port = sh->dest_port;
1795 lsin6->sin6_addr = ip6->ip6_dst;
1796 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen;
1801 cookie = &cp->cookie;
1802 cookie_offset = offset + sizeof(struct sctp_chunkhdr);
1803 cookie_len = ntohs(cp->ch.chunk_length);
1805 if ((cookie->peerport != sh->src_port) &&
1806 (cookie->myport != sh->dest_port) &&
1807 (cookie->my_vtag != sh->v_tag)) {
1809 * invalid ports or bad tag. Note that we always leave the
1810 * v_tag in the header in network order and when we stored
1811 * it in the my_vtag slot we also left it in network order.
1812 * This maintains the match even though it may be in the
1813 * opposite byte order of the machine :->
1817 if (cookie_len > size_of_pkt ||
1818 cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
1819 sizeof(struct sctp_init_chunk) +
1820 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
1821 /* cookie too long! or too small */
1825 * split off the signature into its own mbuf (since it should not be
1826 * calculated in the sctp_hmac_m() call).
1828 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
1829 if (sig_offset > size_of_pkt) {
1830 /* packet not correct size! */
1831 /* XXX this may already be accounted for earlier... */
1834 m_sig = m_split(m, sig_offset, M_DONTWAIT);
1835 if (m_sig == NULL) {
1836 /* out of memory or ?? */
1840 * compute the signature/digest for the cookie
1842 ep = &(*inp_p)->sctp_ep;
1845 SCTP_TCB_UNLOCK(l_stcb);
1847 SCTP_INP_RLOCK(l_inp);
1849 SCTP_TCB_LOCK(l_stcb);
1851 /* which cookie is it? */
1852 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
1853 (ep->current_secret_number != ep->last_secret_number)) {
1854 /* it's the old cookie */
1855 (void)sctp_hmac_m(SCTP_HMAC,
1856 (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1857 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1859 /* it's the current cookie */
1860 (void)sctp_hmac_m(SCTP_HMAC,
1861 (uint8_t *) ep->secret_key[(int)ep->current_secret_number],
1862 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1864 /* get the signature */
1865 SCTP_INP_RUNLOCK(l_inp);
1866 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
1868 /* couldn't find signature */
1869 sctp_m_freem(m_sig);
1872 /* compare the received digest with the computed digest */
1873 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
1874 /* try the old cookie? */
1875 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
1876 (ep->current_secret_number != ep->last_secret_number)) {
1877 /* compute digest with old */
1878 (void)sctp_hmac_m(SCTP_HMAC,
1879 (uint8_t *) ep->secret_key[(int)ep->last_secret_number],
1880 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
1882 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
1890 * Now before we continue we must reconstruct our mbuf so that
1891 * normal processing of any other chunks will work.
1897 while (SCTP_BUF_NEXT(m_at) != NULL) {
1898 m_at = SCTP_BUF_NEXT(m_at);
1900 SCTP_BUF_NEXT(m_at) = m_sig;
1903 if (cookie_ok == 0) {
1904 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
1905 SCTPDBG(SCTP_DEBUG_INPUT2,
1906 "offset = %u, cookie_offset = %u, sig_offset = %u\n",
1907 (uint32_t) offset, cookie_offset, sig_offset);
1911 * check the cookie timestamps to be sure it's not stale
1913 (void)SCTP_GETTIME_TIMEVAL(&now);
1914 /* Expire time is in Ticks, so we convert to seconds */
1915 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
1916 time_expires.tv_usec = cookie->time_entered.tv_usec;
1917 if (timevalcmp(&now, &time_expires, >)) {
1918 /* cookie is stale! */
1919 struct mbuf *op_err;
1920 struct sctp_stale_cookie_msg *scm;
1923 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg),
1924 0, M_DONTWAIT, 1, MT_DATA);
1925 if (op_err == NULL) {
1929 /* pre-reserve some space */
1930 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
1931 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
1932 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1935 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg);
1936 scm = mtod(op_err, struct sctp_stale_cookie_msg *);
1937 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE);
1938 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) +
1939 (sizeof(uint32_t))));
1940 /* seconds to usec */
1941 tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
1944 tim = now.tv_usec - cookie->time_entered.tv_usec;
1945 scm->time_usec = htonl(tim);
1946 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag,
1951 * Now we must see with the lookup address if we have an existing
1952 * asoc. This will only happen if we were in the COOKIE-WAIT state
1953 * and a INIT collided with us and somewhere the peer sent the
1954 * cookie on another address besides the single address our assoc
1955 * had for him. In this case we will have one of the tie-tags set at
1956 * least AND the address field in the cookie can be used to look it
1960 if (cookie->addr_type == SCTP_IPV6_ADDRESS) {
1961 memset(&sin6, 0, sizeof(sin6));
1962 sin6.sin6_family = AF_INET6;
1963 sin6.sin6_len = sizeof(sin6);
1964 sin6.sin6_port = sh->src_port;
1965 sin6.sin6_scope_id = cookie->scope_id;
1966 memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
1967 sizeof(sin6.sin6_addr.s6_addr));
1968 to = (struct sockaddr *)&sin6;
1969 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) {
1970 memset(&sin, 0, sizeof(sin));
1971 sin.sin_family = AF_INET;
1972 sin.sin_len = sizeof(sin);
1973 sin.sin_port = sh->src_port;
1974 sin.sin_addr.s_addr = cookie->address[0];
1975 to = (struct sockaddr *)&sin;
1977 /* This should not happen */
1980 if ((*stcb == NULL) && to) {
1981 /* Yep, lets check */
1982 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL);
1983 if (*stcb == NULL) {
1985 * We should have only got back the same inp. If we
1986 * got back a different ep we have a problem. The
1987 * original findep got back l_inp and now
1989 if (l_inp != *inp_p) {
1990 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
1993 if (*locked_tcb == NULL) {
1995 * In this case we found the assoc only
1996 * after we locked the create lock. This
1997 * means we are in a colliding case and we
1998 * must make sure that we unlock the tcb if
1999 * its one of the cases where we throw away
2000 * the incoming packets.
2002 *locked_tcb = *stcb;
2005 * We must also increment the inp ref count
2006 * since the ref_count flags was set when we
2007 * did not find the TCB, now we found it
2008 * which reduces the refcount.. we must
2009 * raise it back out to balance it all :-)
2011 SCTP_INP_INCR_REF((*stcb)->sctp_ep);
2012 if ((*stcb)->sctp_ep != l_inp) {
2013 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
2014 (*stcb)->sctp_ep, l_inp);
2022 cookie_len -= SCTP_SIGNATURE_SIZE;
2023 if (*stcb == NULL) {
2024 /* this is the "normal" case... get a new TCB */
2025 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie,
2026 cookie_len, *inp_p, netp, to, ¬ification,
2027 auth_skipped, auth_offset, auth_len, vrf_id);
2029 /* this is abnormal... cookie-echo on existing TCB */
2030 had_a_existing_tcb = 1;
2031 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh,
2032 cookie, cookie_len, *inp_p, *stcb, *netp, to,
2033 ¬ification, &sac_restart_id, vrf_id);
2036 if (*stcb == NULL) {
2037 /* still no TCB... must be bad cookie-echo */
2041 * Ok, we built an association so confirm the address we sent the
2044 netl = sctp_findnet(*stcb, to);
2046 * This code should in theory NOT run but
2049 /* TSNH! Huh, why do I need to add this address here? */
2052 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE,
2053 SCTP_IN_COOKIE_PROC);
2054 netl = sctp_findnet(*stcb, to);
2057 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
2058 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2059 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
2061 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
2062 (*stcb), 0, (void *)netl);
2066 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p,
2069 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
2070 if (!had_a_existing_tcb ||
2071 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
2073 * If we have a NEW cookie or the connect never
2074 * reached the connected state during collision we
2075 * must do the TCP accept thing.
2077 struct socket *so, *oso;
2078 struct sctp_inpcb *inp;
2080 if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
2082 * For a restart we will keep the same
2083 * socket, no need to do anything. I THINK!!
2085 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id);
2088 oso = (*inp_p)->sctp_socket;
2090 * We do this to keep the sockets side happy durin
2091 * the sonewcon ONLY.
2094 SCTP_TCB_UNLOCK((*stcb));
2095 so = sonewconn(oso, 0
2098 SCTP_INP_WLOCK((*stcb)->sctp_ep);
2099 SCTP_TCB_LOCK((*stcb));
2100 SCTP_INP_WUNLOCK((*stcb)->sctp_ep);
2102 struct mbuf *op_err;
2104 /* Too many sockets */
2105 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
2106 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
2107 sctp_abort_association(*inp_p, NULL, m, iphlen,
2108 sh, op_err, vrf_id);
2109 sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
2112 inp = (struct sctp_inpcb *)so->so_pcb;
2113 SCTP_INP_INCR_REF(inp);
2115 * We add the unbound flag here so that if we get an
2116 * soabort() before we get the move_pcb done, we
2117 * will properly cleanup.
2119 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
2120 SCTP_PCB_FLAGS_CONNECTED |
2121 SCTP_PCB_FLAGS_IN_TCPPOOL |
2122 SCTP_PCB_FLAGS_UNBOUND |
2123 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
2124 SCTP_PCB_FLAGS_DONT_WAKE);
2125 inp->sctp_features = (*inp_p)->sctp_features;
2126 inp->sctp_socket = so;
2127 inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
2128 inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
2129 inp->sctp_context = (*inp_p)->sctp_context;
2130 inp->inp_starting_point_for_iterator = NULL;
2132 * copy in the authentication parameters from the
2135 if (inp->sctp_ep.local_hmacs)
2136 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2137 inp->sctp_ep.local_hmacs =
2138 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
2139 if (inp->sctp_ep.local_auth_chunks)
2140 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2141 inp->sctp_ep.local_auth_chunks =
2142 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
2143 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys,
2144 &inp->sctp_ep.shared_keys);
2147 * Now we must move it from one hash table to
2148 * another and get the tcb in the right place.
2150 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
2152 atomic_add_int(&(*stcb)->asoc.refcnt, 1);
2153 SCTP_TCB_UNLOCK((*stcb));
2155 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
2156 SCTP_TCB_LOCK((*stcb));
2157 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
2161 * now we must check to see if we were aborted while
2162 * the move was going on and the lock/unlock
2165 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
2167 * yep it was, we leave the assoc attached
2168 * to the socket since the sctp_inpcb_free()
2169 * call will send an abort for us.
2171 SCTP_INP_DECR_REF(inp);
2174 SCTP_INP_DECR_REF(inp);
2175 /* Switch over to the new guy */
2177 sctp_ulp_notify(notification, *stcb, 0, NULL);
2180 * Pull it from the incomplete queue and wake the
2187 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2188 sctp_ulp_notify(notification, *stcb, 0, NULL);
2194 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
2195 struct sctp_tcb *stcb, struct sctp_nets *net)
2197 /* cp must not be used, others call this without a c-ack :-) */
2198 struct sctp_association *asoc;
2200 SCTPDBG(SCTP_DEBUG_INPUT2,
2201 "sctp_handle_cookie_ack: handling COOKIE-ACK\n");
2207 sctp_stop_all_cookie_timers(stcb);
2208 /* process according to association state */
2209 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
2210 /* state change only needed when I am in right state */
2211 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
2212 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
2213 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING;
2214 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
2215 stcb->sctp_ep, stcb, asoc->primary_destination);
2218 asoc->state = SCTP_STATE_OPEN;
2221 SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
2222 SCTP_STAT_INCR_GAUGE32(sctps_currestab);
2223 if (asoc->overall_error_count == 0) {
2224 net->RTO = sctp_calculate_rto(stcb, asoc, net,
2225 &asoc->time_entered);
2227 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
2228 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL);
2229 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2230 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2231 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
2232 soisconnected(stcb->sctp_ep->sctp_socket);
2234 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
2237 * since we did not send a HB make sure we don't double
2240 net->hb_responded = 1;
2242 if (stcb->asoc.sctp_autoclose_ticks &&
2243 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
2244 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
2245 stcb->sctp_ep, stcb, NULL);
2248 * set ASCONF timer if ASCONFs are pending and allowed (eg.
2249 * addresses changed when init/cookie echo in flight)
2251 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
2252 (stcb->asoc.peer_supports_asconf) &&
2253 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
2254 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
2255 stcb->sctp_ep, stcb,
2256 stcb->asoc.primary_destination);
2259 /* Toss the cookie if I can */
2260 sctp_toss_old_cookies(stcb, asoc);
2261 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
2262 /* Restart the timer if we have pending data */
2263 struct sctp_tmit_chunk *chk;
2265 chk = TAILQ_FIRST(&asoc->sent_queue);
2267 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2274 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
2275 struct sctp_tcb *stcb)
2277 struct sctp_nets *net;
2278 struct sctp_tmit_chunk *lchk;
2281 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) {
2284 SCTP_STAT_INCR(sctps_recvecne);
2285 tsn = ntohl(cp->tsn);
2286 /* ECN Nonce stuff: need a resync and disable the nonce sum check */
2287 /* Also we make sure we disable the nonce_wait */
2288 lchk = TAILQ_FIRST(&stcb->asoc.send_queue);
2290 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
2292 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq;
2294 stcb->asoc.nonce_wait_for_ecne = 0;
2295 stcb->asoc.nonce_sum_check = 0;
2297 /* Find where it was sent, if possible */
2299 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue);
2301 if (lchk->rec.data.TSN_seq == tsn) {
2305 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ))
2307 lchk = TAILQ_NEXT(lchk, sctp_next);
2310 /* default is we use the primary */
2311 net = stcb->asoc.primary_destination;
2313 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) {
2316 old_cwnd = net->cwnd;
2317 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2318 net->ssthresh = net->cwnd / 2;
2319 if (net->ssthresh < net->mtu) {
2320 net->ssthresh = net->mtu;
2321 /* here back off the timer as well, to slow us down */
2324 net->cwnd = net->ssthresh;
2325 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
2326 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2329 * we reduce once every RTT. So we will only lower cwnd at
2330 * the next sending seq i.e. the resync_tsn.
2332 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn;
2335 * We always send a CWR this way if our previous one was lost our
2336 * peer will get an update, or if it is not time again to reduce we
2337 * still get the cwr to the peer.
2339 sctp_send_cwr(stcb, net, tsn);
2343 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb)
2346 * Here we get a CWR from the peer. We must look in the outqueue and
2347 * make sure that we have a covered ECNE in teh control chunk part.
2350 struct sctp_tmit_chunk *chk;
2351 struct sctp_ecne_chunk *ecne;
2353 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
2354 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
2358 * Look for and remove if it is the right TSN. Since there
2359 * is only ONE ECNE on the control queue at any one time we
2360 * don't need to worry about more than one!
2362 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
2363 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn),
2364 MAX_TSN) || (cp->tsn == ecne->tsn)) {
2365 /* this covers this ECNE, we can remove it */
2366 stcb->asoc.ecn_echo_cnt_onq--;
2367 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
2370 sctp_m_freem(chk->data);
2373 stcb->asoc.ctrl_queue_cnt--;
2374 sctp_free_remote_addr(chk->whoTo);
2375 sctp_free_a_chunk(stcb, chk);
2382 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp,
2383 struct sctp_tcb *stcb, struct sctp_nets *net)
2385 struct sctp_association *asoc;
2387 SCTPDBG(SCTP_DEBUG_INPUT2,
2388 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
2393 /* process according to association state */
2394 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
2395 /* unexpected SHUTDOWN-COMPLETE... so ignore... */
2396 SCTP_TCB_UNLOCK(stcb);
2399 /* notify upper layer protocol */
2400 if (stcb->sctp_socket) {
2401 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL);
2402 /* are the queues empty? they should be */
2403 if (!TAILQ_EMPTY(&asoc->send_queue) ||
2404 !TAILQ_EMPTY(&asoc->sent_queue) ||
2405 !TAILQ_EMPTY(&asoc->out_wheel)) {
2406 sctp_report_all_outbound(stcb, 0);
2409 /* stop the timer */
2410 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
2411 SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
2413 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
2418 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
2419 struct sctp_nets *net, uint8_t flg)
2421 switch (desc->chunk_type) {
2423 /* find the tsn to resend (possibly */
2426 struct sctp_tmit_chunk *tp1;
2428 tsn = ntohl(desc->tsn_ifany);
2429 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2431 if (tp1->rec.data.TSN_seq == tsn) {
2435 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn,
2441 tp1 = TAILQ_NEXT(tp1, sctp_next);
2445 * Do it the other way , aka without paying
2446 * attention to queue seq order.
2448 SCTP_STAT_INCR(sctps_pdrpdnfnd);
2449 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2451 if (tp1->rec.data.TSN_seq == tsn) {
2455 tp1 = TAILQ_NEXT(tp1, sctp_next);
2459 SCTP_STAT_INCR(sctps_pdrptsnnf);
2461 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
2464 if ((stcb->asoc.peers_rwnd == 0) &&
2465 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
2466 SCTP_STAT_INCR(sctps_pdrpdiwnp);
2469 if (stcb->asoc.peers_rwnd == 0 &&
2470 (flg & SCTP_FROM_MIDDLE_BOX)) {
2471 SCTP_STAT_INCR(sctps_pdrpdizrw);
2474 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+
2475 sizeof(struct sctp_data_chunk));
2479 for (iii = 0; iii < sizeof(desc->data_bytes);
2481 if (ddp[iii] != desc->data_bytes[iii]) {
2482 SCTP_STAT_INCR(sctps_pdrpbadd);
2488 * We zero out the nonce so resync not
2491 tp1->rec.data.ect_nonce = 0;
2495 * this guy had a RTO calculation
2496 * pending on it, cancel it
2500 SCTP_STAT_INCR(sctps_pdrpmark);
2501 if (tp1->sent != SCTP_DATAGRAM_RESEND)
2502 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2503 tp1->sent = SCTP_DATAGRAM_RESEND;
2505 * mark it as if we were doing a FR, since
2506 * we will be getting gap ack reports behind
2507 * the info from the router.
2509 tp1->rec.data.doing_fast_retransmit = 1;
2511 * mark the tsn with what sequences can
2514 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
2515 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
2517 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
2520 /* restart the timer */
2521 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2522 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
2523 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
2526 /* fix counts and things */
2527 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
2528 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
2529 tp1->whoTo->flight_size,
2532 tp1->rec.data.TSN_seq);
2534 sctp_flight_size_decrease(tp1);
2535 sctp_total_flight_decrease(stcb, tp1);
2541 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
2542 if (tp1->sent == SCTP_DATAGRAM_RESEND)
2545 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
2547 if (tp1->sent == SCTP_DATAGRAM_RESEND)
2550 if (audit != stcb->asoc.sent_queue_retran_cnt) {
2551 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
2552 audit, stcb->asoc.sent_queue_retran_cnt);
2553 #ifndef SCTP_AUDITING_ENABLED
2554 stcb->asoc.sent_queue_retran_cnt = audit;
2562 struct sctp_tmit_chunk *asconf;
2564 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
2566 if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
2571 if (asconf->sent != SCTP_DATAGRAM_RESEND)
2572 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2573 asconf->sent = SCTP_DATAGRAM_RESEND;
2574 asconf->snd_count--;
2578 case SCTP_INITIATION:
2579 /* resend the INIT */
2580 stcb->asoc.dropped_special_cnt++;
2581 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
2583 * If we can get it in, in a few attempts we do
2584 * this, otherwise we let the timer fire.
2586 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
2587 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
2588 sctp_send_initiate(stcb->sctp_ep, stcb);
2591 case SCTP_SELECTIVE_ACK:
2592 /* resend the sack */
2593 sctp_send_sack(stcb);
2595 case SCTP_HEARTBEAT_REQUEST:
2596 /* resend a demand HB */
2597 (void)sctp_send_hb(stcb, 1, net);
2600 sctp_send_shutdown(stcb, net);
2602 case SCTP_SHUTDOWN_ACK:
2603 sctp_send_shutdown_ack(stcb, net);
2605 case SCTP_COOKIE_ECHO:
2607 struct sctp_tmit_chunk *cookie;
2610 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
2612 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
2617 if (cookie->sent != SCTP_DATAGRAM_RESEND)
2618 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
2619 cookie->sent = SCTP_DATAGRAM_RESEND;
2620 sctp_stop_all_cookie_timers(stcb);
2624 case SCTP_COOKIE_ACK:
2625 sctp_send_cookie_ack(stcb);
2627 case SCTP_ASCONF_ACK:
2628 /* resend last asconf ack */
2629 sctp_send_asconf_ack(stcb, 1);
2631 case SCTP_FORWARD_CUM_TSN:
2632 send_forward_tsn(stcb, &stcb->asoc);
2634 /* can't do anything with these */
2635 case SCTP_PACKET_DROPPED:
2636 case SCTP_INITIATION_ACK: /* this should not happen */
2637 case SCTP_HEARTBEAT_ACK:
2638 case SCTP_ABORT_ASSOCIATION:
2639 case SCTP_OPERATION_ERROR:
2640 case SCTP_SHUTDOWN_COMPLETE:
2650 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2656 * We set things to 0xffff since this is the last delivered sequence
2657 * and we will be sending in 0 after the reset.
2660 if (number_entries) {
2661 for (i = 0; i < number_entries; i++) {
2662 temp = ntohs(list[i]);
2663 if (temp >= stcb->asoc.streamincnt) {
2666 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff;
2670 for (i = 0; i < stcb->asoc.streamincnt; i++) {
2671 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff;
2674 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list);
2678 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list)
2682 if (number_entries == 0) {
2683 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
2684 stcb->asoc.strmout[i].next_sequence_sent = 0;
2686 } else if (number_entries) {
2687 for (i = 0; i < number_entries; i++) {
2690 temp = ntohs(list[i]);
2691 if (temp >= stcb->asoc.streamoutcnt) {
2692 /* no such stream */
2695 stcb->asoc.strmout[temp].next_sequence_sent = 0;
2698 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list);
2702 struct sctp_stream_reset_out_request *
2703 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
2705 struct sctp_association *asoc;
2706 struct sctp_stream_reset_out_req *req;
2707 struct sctp_stream_reset_out_request *r;
2708 struct sctp_tmit_chunk *chk;
2712 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
2713 asoc->stream_reset_outstanding = 0;
2716 if (stcb->asoc.str_reset == NULL) {
2717 asoc->stream_reset_outstanding = 0;
2720 chk = stcb->asoc.str_reset;
2721 if (chk->data == NULL) {
2725 /* he wants a copy of the chk pointer */
2728 clen = chk->send_size;
2729 req = mtod(chk->data, struct sctp_stream_reset_out_req *);
2731 if (ntohl(r->request_seq) == seq) {
2735 len = SCTP_SIZE32(ntohs(r->ph.param_length));
2736 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
2737 /* move to the next one, there can only be a max of two */
2738 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len);
2739 if (ntohl(r->request_seq) == seq) {
2743 /* that seq is not here */
2748 sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
2750 struct sctp_association *asoc;
2751 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
2753 if (stcb->asoc.str_reset == NULL) {
2758 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
2759 TAILQ_REMOVE(&asoc->control_send_queue,
2763 sctp_m_freem(chk->data);
2766 asoc->ctrl_queue_cnt--;
2767 sctp_free_remote_addr(chk->whoTo);
2769 sctp_free_a_chunk(stcb, chk);
2770 stcb->asoc.str_reset = NULL;
2775 sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
2776 uint32_t seq, uint32_t action,
2777 struct sctp_stream_reset_response *respin)
2781 struct sctp_association *asoc = &stcb->asoc;
2782 struct sctp_tmit_chunk *chk;
2783 struct sctp_stream_reset_out_request *srparam;
2786 if (asoc->stream_reset_outstanding == 0) {
2790 if (seq == stcb->asoc.str_reset_seq_out) {
2791 srparam = sctp_find_stream_reset(stcb, seq, &chk);
2793 stcb->asoc.str_reset_seq_out++;
2794 type = ntohs(srparam->ph.param_type);
2795 lparm_len = ntohs(srparam->ph.param_length);
2796 if (type == SCTP_STR_RESET_OUT_REQUEST) {
2797 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
2798 asoc->stream_reset_out_is_outstanding = 0;
2799 if (asoc->stream_reset_outstanding)
2800 asoc->stream_reset_outstanding--;
2801 if (action == SCTP_STREAM_RESET_PERFORMED) {
2803 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams);
2805 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams);
2807 } else if (type == SCTP_STR_RESET_IN_REQUEST) {
2808 /* Answered my request */
2809 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
2810 if (asoc->stream_reset_outstanding)
2811 asoc->stream_reset_outstanding--;
2812 if (action != SCTP_STREAM_RESET_PERFORMED) {
2813 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams);
2815 } else if (type == SCTP_STR_RESET_TSN_REQUEST) {
2817 * a) Adopt the new in tsn.
2819 * c) Adopt the new out-tsn
2821 struct sctp_stream_reset_response_tsn *resp;
2822 struct sctp_forward_tsn_chunk fwdtsn;
2825 if (respin == NULL) {
2829 if (action == SCTP_STREAM_RESET_PERFORMED) {
2830 resp = (struct sctp_stream_reset_response_tsn *)respin;
2831 asoc->stream_reset_outstanding--;
2832 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2833 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2834 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
2835 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2839 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
2840 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2841 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
2842 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2843 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
2844 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
2846 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2847 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2851 /* get rid of the request and get the request flags */
2852 if (asoc->stream_reset_outstanding == 0) {
2853 sctp_clean_up_stream_reset(stcb);
2861 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
2862 struct sctp_tmit_chunk *chk,
2863 struct sctp_stream_reset_in_request *req)
2871 * peer wants me to send a str-reset to him for my outgoing seq's if
2874 struct sctp_association *asoc = &stcb->asoc;
2876 seq = ntohl(req->request_seq);
2877 if (asoc->str_reset_seq_in == seq) {
2878 if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
2879 len = ntohs(req->ph.param_length);
2880 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
2881 for (i = 0; i < number_entries; i++) {
2882 temp = ntohs(req->list_of_streams[i]);
2883 req->list_of_streams[i] = temp;
2885 /* move the reset action back one */
2886 asoc->last_reset_action[1] = asoc->last_reset_action[0];
2887 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2888 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams,
2889 asoc->str_reset_seq_out,
2890 seq, (asoc->sending_seq - 1));
2891 asoc->stream_reset_out_is_outstanding = 1;
2892 asoc->str_reset = chk;
2893 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
2894 stcb->asoc.stream_reset_outstanding++;
2896 /* Can't do it, since we have sent one out */
2897 asoc->last_reset_action[1] = asoc->last_reset_action[0];
2898 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER;
2899 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2901 asoc->str_reset_seq_in++;
2902 } else if (asoc->str_reset_seq_in - 1 == seq) {
2903 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
2904 } else if (asoc->str_reset_seq_in - 2 == seq) {
2905 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
2907 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2912 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
2913 struct sctp_tmit_chunk *chk,
2914 struct sctp_stream_reset_tsn_request *req)
2916 /* reset all in and out and update the tsn */
2918 * A) reset my str-seq's on in and out. B) Select a receive next,
2919 * and set cum-ack to it. Also process this selected number as a
2920 * fwd-tsn as well. C) set in the response my next sending seq.
2922 struct sctp_forward_tsn_chunk fwdtsn;
2923 struct sctp_association *asoc = &stcb->asoc;
2927 seq = ntohl(req->request_seq);
2928 if (asoc->str_reset_seq_in == seq) {
2929 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
2930 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
2931 fwdtsn.ch.chunk_flags = 0;
2932 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
2933 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag);
2937 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
2938 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
2939 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
2940 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
2941 atomic_add_int(&stcb->asoc.sending_seq, 1);
2942 /* save off historical data for retrans */
2943 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
2944 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq;
2945 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0];
2946 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn;
2948 sctp_add_stream_reset_result_tsn(chk,
2949 ntohl(req->request_seq),
2950 SCTP_STREAM_RESET_PERFORMED,
2951 stcb->asoc.sending_seq,
2952 stcb->asoc.mapping_array_base_tsn);
2953 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
2954 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
2955 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
2956 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
2958 asoc->str_reset_seq_in++;
2959 } else if (asoc->str_reset_seq_in - 1 == seq) {
2960 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
2961 stcb->asoc.last_sending_seq[0],
2962 stcb->asoc.last_base_tsnsent[0]
2964 } else if (asoc->str_reset_seq_in - 2 == seq) {
2965 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
2966 stcb->asoc.last_sending_seq[1],
2967 stcb->asoc.last_base_tsnsent[1]
2970 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
2976 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
2977 struct sctp_tmit_chunk *chk,
2978 struct sctp_stream_reset_out_request *req)
2981 int number_entries, len;
2982 struct sctp_association *asoc = &stcb->asoc;
2984 seq = ntohl(req->request_seq);
2986 /* now if its not a duplicate we process it */
2987 if (asoc->str_reset_seq_in == seq) {
2988 len = ntohs(req->ph.param_length);
2989 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
2991 * the sender is resetting, handle the list issue.. we must
2992 * a) verify if we can do the reset, if so no problem b) If
2993 * we can't do the reset we must copy the request. c) queue
2994 * it, and setup the data in processor to trigger it off
2995 * when needed and dequeue all the queued data.
2997 tsn = ntohl(req->send_reset_at_tsn);
2999 /* move the reset action back one */
3000 asoc->last_reset_action[1] = asoc->last_reset_action[0];
3001 if ((tsn == asoc->cumulative_tsn) ||
3002 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) {
3003 /* we can do it now */
3004 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
3005 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3006 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3009 * we must queue it up and thus wait for the TSN's
3010 * to arrive that are at or before tsn
3012 struct sctp_stream_reset_list *liste;
3015 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
3016 SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
3017 siz, SCTP_M_STRESET);
3018 if (liste == NULL) {
3019 /* gak out of memory */
3020 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED);
3021 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED;
3025 liste->number_entries = number_entries;
3026 memcpy(&liste->req, req,
3027 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t))));
3028 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
3029 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED);
3030 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED;
3032 asoc->str_reset_seq_in++;
3033 } else if ((asoc->str_reset_seq_in - 1) == seq) {
3035 * one seq back, just echo back last action since my
3036 * response was lost.
3038 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
3039 } else if ((asoc->str_reset_seq_in - 2) == seq) {
3041 * two seq back, just echo back last action since my
3042 * response was lost.
3044 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
3046 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO);
3051 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct sctp_stream_reset_out_req *sr_req)
3053 int chk_length, param_len, ptype;
3056 struct sctp_tmit_chunk *chk;
3057 struct sctp_chunkhdr *ch;
3058 struct sctp_paramhdr *ph;
3062 /* now it may be a reset or a reset-response */
3063 chk_length = ntohs(sr_req->ch.chunk_length);
3065 /* setup for adding the response */
3066 sctp_alloc_a_chunk(stcb, chk);
3070 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
3071 chk->rec.chunk_id.can_take_data = 0;
3072 chk->asoc = &stcb->asoc;
3073 chk->no_fr_allowed = 0;
3074 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
3075 chk->book_size_scale = 0;
3076 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3077 if (chk->data == NULL) {
3080 sctp_m_freem(chk->data);
3083 sctp_free_a_chunk(stcb, chk);
3086 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
3088 /* setup chunk parameters */
3089 chk->sent = SCTP_DATAGRAM_UNSENT;
3091 chk->whoTo = stcb->asoc.primary_destination;
3092 atomic_add_int(&chk->whoTo->ref_count, 1);
3094 ch = mtod(chk->data, struct sctp_chunkhdr *);
3095 ch->chunk_type = SCTP_STREAM_RESET;
3096 ch->chunk_flags = 0;
3097 ch->chunk_length = htons(chk->send_size);
3098 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
3099 ph = (struct sctp_paramhdr *)&sr_req->sr_req;
3100 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) {
3101 param_len = ntohs(ph->param_length);
3102 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) {
3106 ptype = ntohs(ph->param_type);
3108 if (num_param > SCTP_MAX_RESET_PARAMS) {
3109 /* hit the max of parameters already sorry.. */
3112 if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
3113 struct sctp_stream_reset_out_request *req_out;
3115 req_out = (struct sctp_stream_reset_out_request *)ph;
3117 if (stcb->asoc.stream_reset_outstanding) {
3118 seq = ntohl(req_out->response_seq);
3119 if (seq == stcb->asoc.str_reset_seq_out) {
3121 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL);
3124 sctp_handle_str_reset_request_out(stcb, chk, req_out);
3125 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
3126 struct sctp_stream_reset_in_request *req_in;
3129 req_in = (struct sctp_stream_reset_in_request *)ph;
3130 sctp_handle_str_reset_request_in(stcb, chk, req_in);
3131 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
3132 struct sctp_stream_reset_tsn_request *req_tsn;
3135 req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
3136 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
3138 goto strres_nochunk;
3142 } else if (ptype == SCTP_STR_RESET_RESPONSE) {
3143 struct sctp_stream_reset_response *resp;
3146 resp = (struct sctp_stream_reset_response *)ph;
3147 seq = ntohl(resp->response_seq);
3148 result = ntohl(resp->result);
3149 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
3151 goto strres_nochunk;
3157 ph = (struct sctp_paramhdr *)((caddr_t)ph + SCTP_SIZE32(param_len));
3158 chk_length -= SCTP_SIZE32(param_len);
3161 /* we have no response free the stuff */
3162 goto strres_nochunk;
3164 /* ok we have a chunk to link in */
3165 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
3168 stcb->asoc.ctrl_queue_cnt++;
3173 * Handle a router or endpoints report of a packet loss, there are two ways
3174 * to handle this, either we get the whole packet and must disect it
3175 * ourselves (possibly with truncation and or corruption) or it is a summary
3176 * from a middle box that did the disectting for us.
3179 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
3180 struct sctp_tcb *stcb, struct sctp_nets *net)
3182 uint32_t bottle_bw, on_queue;
3186 struct sctp_chunk_desc desc;
3187 struct sctp_chunkhdr *ch;
3189 chlen = ntohs(cp->ch.chunk_length);
3190 chlen -= sizeof(struct sctp_pktdrop_chunk);
3191 /* XXX possible chlen underflow */
3194 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
3195 SCTP_STAT_INCR(sctps_pdrpbwrpt);
3197 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
3198 chlen -= sizeof(struct sctphdr);
3199 /* XXX possible chlen underflow */
3200 memset(&desc, 0, sizeof(desc));
3202 trunc_len = (uint16_t) ntohs(cp->trunc_len);
3203 /* now the chunks themselves */
3204 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
3205 desc.chunk_type = ch->chunk_type;
3206 /* get amount we need to move */
3207 at = ntohs(ch->chunk_length);
3208 if (at < sizeof(struct sctp_chunkhdr)) {
3209 /* corrupt chunk, maybe at the end? */
3210 SCTP_STAT_INCR(sctps_pdrpcrupt);
3213 if (trunc_len == 0) {
3214 /* we are supposed to have all of it */
3216 /* corrupt skip it */
3217 SCTP_STAT_INCR(sctps_pdrpcrupt);
3221 /* is there enough of it left ? */
3222 if (desc.chunk_type == SCTP_DATA) {
3223 if (chlen < (sizeof(struct sctp_data_chunk) +
3224 sizeof(desc.data_bytes))) {
3228 if (chlen < sizeof(struct sctp_chunkhdr)) {
3233 if (desc.chunk_type == SCTP_DATA) {
3234 /* can we get out the tsn? */
3235 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3236 SCTP_STAT_INCR(sctps_pdrpmbda);
3238 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
3240 struct sctp_data_chunk *dcp;
3244 dcp = (struct sctp_data_chunk *)ch;
3245 ddp = (uint8_t *) (dcp + 1);
3246 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
3247 desc.data_bytes[iii] = ddp[iii];
3249 desc.tsn_ifany = dcp->dp.tsn;
3251 /* nope we are done. */
3252 SCTP_STAT_INCR(sctps_pdrpnedat);
3256 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
3257 SCTP_STAT_INCR(sctps_pdrpmbct);
3260 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
3261 SCTP_STAT_INCR(sctps_pdrppdbrk);
3264 if (SCTP_SIZE32(at) > chlen) {
3267 chlen -= SCTP_SIZE32(at);
3268 if (chlen < sizeof(struct sctp_chunkhdr)) {
3269 /* done, none left */
3272 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
3274 /* Now update any rwnd --- possibly */
3275 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
3276 /* From a peer, we get a rwnd report */
3279 SCTP_STAT_INCR(sctps_pdrpfehos);
3281 bottle_bw = ntohl(cp->bottle_bw);
3282 on_queue = ntohl(cp->current_onq);
3283 if (bottle_bw && on_queue) {
3284 /* a rwnd report is in here */
3285 if (bottle_bw > on_queue)
3286 a_rwnd = bottle_bw - on_queue;
3291 stcb->asoc.peers_rwnd = 0;
3293 if (a_rwnd > stcb->asoc.total_flight) {
3294 stcb->asoc.peers_rwnd =
3295 a_rwnd - stcb->asoc.total_flight;
3297 stcb->asoc.peers_rwnd = 0;
3299 if (stcb->asoc.peers_rwnd <
3300 stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3301 /* SWS sender side engages */
3302 stcb->asoc.peers_rwnd = 0;
3307 SCTP_STAT_INCR(sctps_pdrpfmbox);
3310 /* now middle boxes in sat networks get a cwnd bump */
3311 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
3312 (stcb->asoc.sat_t3_loss_recovery == 0) &&
3313 (stcb->asoc.sat_network)) {
3315 * This is debateable but for sat networks it makes sense
3316 * Note if a T3 timer has went off, we will prohibit any
3317 * changes to cwnd until we exit the t3 loss recovery.
3322 int old_cwnd = net->cwnd;
3324 /* need real RTT for this calc */
3325 rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
3326 /* get bottle neck bw */
3327 bottle_bw = ntohl(cp->bottle_bw);
3328 /* and whats on queue */
3329 on_queue = ntohl(cp->current_onq);
3331 * adjust the on-queue if our flight is more it could be
3332 * that the router has not yet gotten data "in-flight" to it
3334 if (on_queue < net->flight_size)
3335 on_queue = net->flight_size;
3337 /* calculate the available space */
3338 bw_avail = (bottle_bw * rtt) / 1000;
3339 if (bw_avail > bottle_bw) {
3341 * Cap the growth to no more than the bottle neck.
3342 * This can happen as RTT slides up due to queues.
3343 * It also means if you have more than a 1 second
3344 * RTT with a empty queue you will be limited to the
3345 * bottle_bw per second no matter if other points
3346 * have 1/2 the RTT and you could get more out...
3348 bw_avail = bottle_bw;
3350 if (on_queue > bw_avail) {
3352 * No room for anything else don't allow anything
3353 * else to be "added to the fire".
3355 int seg_inflight, seg_onqueue, my_portion;
3357 net->partial_bytes_acked = 0;
3359 /* how much are we over queue size? */
3360 incr = on_queue - bw_avail;
3361 if (stcb->asoc.seen_a_sack_this_pkt) {
3363 * undo any cwnd adjustment that the sack
3366 net->cwnd = net->prev_cwnd;
3368 /* Now how much of that is mine? */
3369 seg_inflight = net->flight_size / net->mtu;
3370 seg_onqueue = on_queue / net->mtu;
3371 my_portion = (incr * seg_inflight) / seg_onqueue;
3373 /* Have I made an adjustment already */
3374 if (net->cwnd > net->flight_size) {
3376 * for this flight I made an adjustment we
3377 * need to decrease the portion by a share
3378 * our previous adjustment.
3382 diff_adj = net->cwnd - net->flight_size;
3383 if (diff_adj > my_portion)
3386 my_portion -= diff_adj;
3389 * back down to the previous cwnd (assume we have
3390 * had a sack before this packet). minus what ever
3391 * portion of the overage is my fault.
3393 net->cwnd -= my_portion;
3395 /* we will NOT back down more than 1 MTU */
3396 if (net->cwnd <= net->mtu) {
3397 net->cwnd = net->mtu;
3400 net->ssthresh = net->cwnd - 1;
3403 * Take 1/4 of the space left or max burst up ..
3404 * whichever is less.
3406 incr = min((bw_avail - on_queue) >> 2,
3407 stcb->asoc.max_burst * net->mtu);
3410 if (net->cwnd > bw_avail) {
3411 /* We can't exceed the pipe size */
3412 net->cwnd = bw_avail;
3414 if (net->cwnd < net->mtu) {
3415 /* We always have 1 MTU */
3416 net->cwnd = net->mtu;
3418 if (net->cwnd - old_cwnd != 0) {
3419 /* log only changes */
3420 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
3421 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
3422 SCTP_CWND_LOG_FROM_SAT);
3429 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
3430 * still contain IP/SCTP header - stcb: is the tcb found for this packet -
3431 * offset: offset into the mbuf chain to first chunkhdr - length: is the
3432 * length of the complete packet outputs: - length: modified to remaining
3433 * length after control processing - netp: modified to new sctp_nets after
3434 * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
3435 * bad packet,...) otherwise return the tcb for this packet
3438 __attribute__((noinline))
3440 static struct sctp_tcb *
3441 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
3442 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
3443 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
3446 struct sctp_association *asoc;
3448 int num_chunks = 0; /* number of control chunks processed */
3449 uint32_t chk_length;
3451 int abort_no_unlock = 0;
3454 * How big should this be, and should it be alloc'd? Lets try the
3455 * d-mtu-ceiling for now (2k) and that should hopefully work ...
3456 * until we get into jumbo grams and such..
3458 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
3459 struct sctp_tcb *locked_tcb = stcb;
3461 uint32_t auth_offset = 0, auth_len = 0;
3462 int auth_skipped = 0;
3464 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
3465 iphlen, *offset, length, stcb);
3467 /* validate chunk header length... */
3468 if (ntohs(ch->chunk_length) < sizeof(*ch)) {
3469 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
3470 ntohs(ch->chunk_length));
3472 SCTP_TCB_UNLOCK(locked_tcb);
3477 * validate the verification tag
3479 vtag_in = ntohl(sh->v_tag);
3482 SCTP_TCB_LOCK_ASSERT(locked_tcb);
3484 if (ch->chunk_type == SCTP_INITIATION) {
3485 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
3486 ntohs(ch->chunk_length), vtag_in);
3488 /* protocol error- silently discard... */
3489 SCTP_STAT_INCR(sctps_badvtag);
3491 SCTP_TCB_UNLOCK(locked_tcb);
3495 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
3497 * If there is no stcb, skip the AUTH chunk and process
3498 * later after a stcb is found (to validate the lookup was
3501 if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
3502 (stcb == NULL) && !sctp_auth_disable) {
3503 /* save this chunk for later processing */
3505 auth_offset = *offset;
3506 auth_len = ntohs(ch->chunk_length);
3508 /* (temporarily) move past this chunk */
3509 *offset += SCTP_SIZE32(auth_len);
3510 if (*offset >= length) {
3511 /* no more data left in the mbuf chain */
3514 SCTP_TCB_UNLOCK(locked_tcb);
3518 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3519 sizeof(struct sctp_chunkhdr), chunk_buf);
3525 SCTP_TCB_UNLOCK(locked_tcb);
3529 if (ch->chunk_type == SCTP_COOKIE_ECHO) {
3530 goto process_control_chunks;
3533 * first check if it's an ASCONF with an unknown src addr we
3534 * need to look inside to find the association
3536 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
3537 /* inp's refcount may be reduced */
3538 SCTP_INP_INCR_REF(inp);
3540 stcb = sctp_findassociation_ep_asconf(m, iphlen,
3541 *offset, sh, &inp, netp);
3544 * reduce inp's refcount if not reduced in
3545 * sctp_findassociation_ep_asconf().
3547 SCTP_INP_DECR_REF(inp);
3549 /* now go back and verify any auth chunk to be sure */
3550 if (auth_skipped && (stcb != NULL)) {
3551 struct sctp_auth_chunk *auth;
3553 auth = (struct sctp_auth_chunk *)
3554 sctp_m_getptr(m, auth_offset,
3555 auth_len, chunk_buf);
3558 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
3560 /* auth HMAC failed so dump it */
3563 SCTP_TCB_UNLOCK(locked_tcb);
3567 /* remaining chunks are HMAC checked */
3568 stcb->asoc.authenticated = 1;
3573 /* no association, so it's out of the blue... */
3574 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL,
3578 SCTP_TCB_UNLOCK(locked_tcb);
3583 /* ABORT and SHUTDOWN can use either v_tag... */
3584 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
3585 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
3586 (ch->chunk_type == SCTP_PACKET_DROPPED)) {
3587 if ((vtag_in == asoc->my_vtag) ||
3588 ((ch->chunk_flags & SCTP_HAD_NO_TCB) &&
3589 (vtag_in == asoc->peer_vtag))) {
3592 /* drop this packet... */
3593 SCTP_STAT_INCR(sctps_badvtag);
3595 SCTP_TCB_UNLOCK(locked_tcb);
3599 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
3600 if (vtag_in != asoc->my_vtag) {
3602 * this could be a stale SHUTDOWN-ACK or the
3603 * peer never got the SHUTDOWN-COMPLETE and
3604 * is still hung; we have started a new asoc
3605 * but it won't complete until the shutdown
3609 SCTP_TCB_UNLOCK(locked_tcb);
3611 sctp_handle_ootb(m, iphlen, *offset, sh, inp,
3616 /* for all other chunks, vtag must match */
3617 if (vtag_in != asoc->my_vtag) {
3618 /* invalid vtag... */
3619 SCTPDBG(SCTP_DEBUG_INPUT3,
3620 "invalid vtag: %xh, expect %xh\n",
3621 vtag_in, asoc->my_vtag);
3622 SCTP_STAT_INCR(sctps_badvtag);
3624 SCTP_TCB_UNLOCK(locked_tcb);
3630 } /* end if !SCTP_COOKIE_ECHO */
3632 * process all control chunks...
3634 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
3635 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
3636 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
3637 /* implied cookie-ack.. we must have lost the ack */
3638 stcb->asoc.overall_error_count = 0;
3639 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
3642 process_control_chunks:
3643 while (IS_SCTP_CONTROL(ch)) {
3644 /* validate chunk length */
3645 chk_length = ntohs(ch->chunk_length);
3646 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
3647 ch->chunk_type, chk_length);
3648 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
3649 if (chk_length < sizeof(*ch) ||
3650 (*offset + (int)chk_length) > length) {
3653 SCTP_TCB_UNLOCK(locked_tcb);
3657 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
3659 * INIT-ACK only gets the init ack "header" portion only
3660 * because we don't have to process the peer's COOKIE. All
3661 * others get a complete chunk.
3663 if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
3664 (ch->chunk_type == SCTP_INITIATION)) {
3665 /* get an init-ack chunk */
3666 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3667 sizeof(struct sctp_init_ack_chunk), chunk_buf);
3671 SCTP_TCB_UNLOCK(locked_tcb);
3677 * For cookies and all other chunks. if the
3679 if (chk_length > sizeof(chunk_buf)) {
3681 * use just the size of the chunk buffer so
3682 * the front part of our chunks fit in
3683 * contiguous space up to the chunk buffer
3684 * size (508 bytes). For chunks that need to
3685 * get more than that they mus use the
3686 * sctp_m_getptr() function or other means
3687 * (know how to parse mbuf chains). Cookies
3690 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3691 (sizeof(chunk_buf) - 4),
3696 SCTP_TCB_UNLOCK(locked_tcb);
3701 /* We can fit it all */
3702 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
3703 chk_length, chunk_buf);
3705 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
3708 SCTP_TCB_UNLOCK(locked_tcb);
3715 /* Save off the last place we got a control from */
3717 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
3719 * allow last_control to be NULL if
3720 * ASCONF... ASCONF processing will find the
3723 if ((netp != NULL) && (*netp != NULL))
3724 stcb->asoc.last_control_chunk_from = *netp;
3727 #ifdef SCTP_AUDITING_ENABLED
3728 sctp_audit_log(0xB0, ch->chunk_type);
3731 /* check to see if this chunk required auth, but isn't */
3732 if ((stcb != NULL) && !sctp_auth_disable &&
3733 sctp_auth_is_required_chunk(ch->chunk_type,
3734 stcb->asoc.local_auth_chunks) &&
3735 !stcb->asoc.authenticated) {
3736 /* "silently" ignore */
3737 SCTP_STAT_INCR(sctps_recvauthmissing);
3740 switch (ch->chunk_type) {
3741 case SCTP_INITIATION:
3742 /* must be first and only chunk */
3743 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
3744 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3745 /* We are not interested anymore? */
3746 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3748 * collision case where we are
3749 * sending to them too
3754 SCTP_TCB_UNLOCK(locked_tcb);
3760 if ((num_chunks > 1) ||
3761 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3764 SCTP_TCB_UNLOCK(locked_tcb);
3768 if ((stcb != NULL) &&
3769 (SCTP_GET_STATE(&stcb->asoc) ==
3770 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
3771 sctp_send_shutdown_ack(stcb,
3772 stcb->asoc.primary_destination);
3774 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3776 SCTP_TCB_UNLOCK(locked_tcb);
3781 sctp_handle_init(m, iphlen, *offset, sh,
3782 (struct sctp_init_chunk *)ch, inp,
3783 stcb, *netp, &abort_no_unlock, vrf_id);
3785 if (abort_no_unlock)
3790 SCTP_TCB_UNLOCK(locked_tcb);
3794 case SCTP_PAD_CHUNK:
3796 case SCTP_INITIATION_ACK:
3797 /* must be first and only chunk */
3798 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
3799 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3800 /* We are not interested anymore */
3801 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3805 SCTP_TCB_UNLOCK(locked_tcb);
3809 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
3814 if ((num_chunks > 1) ||
3815 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) {
3818 SCTP_TCB_UNLOCK(locked_tcb);
3822 if ((netp) && (*netp)) {
3823 ret = sctp_handle_init_ack(m, iphlen, *offset, sh,
3824 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id);
3829 * Special case, I must call the output routine to
3830 * get the cookie echoed
3832 if (abort_no_unlock)
3835 if ((stcb) && ret == 0)
3836 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
3839 SCTP_TCB_UNLOCK(locked_tcb);
3843 case SCTP_SELECTIVE_ACK:
3844 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
3845 SCTP_STAT_INCR(sctps_recvsacks);
3847 struct sctp_sack_chunk *sack;
3849 uint32_t a_rwnd, cum_ack;
3853 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) {
3854 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n");
3857 SCTP_TCB_UNLOCK(locked_tcb);
3861 sack = (struct sctp_sack_chunk *)ch;
3862 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM;
3863 cum_ack = ntohl(sack->sack.cum_tsn_ack);
3864 num_seg = ntohs(sack->sack.num_gap_ack_blks);
3865 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
3866 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
3871 stcb->asoc.seen_a_sack_this_pkt = 1;
3872 if ((stcb->asoc.pr_sctp_cnt == 0) &&
3874 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) ||
3875 (cum_ack == stcb->asoc.last_acked_seq)) &&
3876 (stcb->asoc.saw_sack_with_frags == 0) &&
3877 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
3880 * We have a SIMPLE sack having no
3881 * prior segments and data on sent
3882 * queue to be acked.. Use the
3883 * faster path sack processing. We
3884 * also allow window update sacks
3885 * with no missing segments to go
3888 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag,
3892 sctp_handle_sack(sack, stcb, *netp, &abort_now, chk_length, a_rwnd);
3895 /* ABORT signal from sack processing */
3901 case SCTP_HEARTBEAT_REQUEST:
3902 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
3903 if ((stcb) && netp && *netp) {
3904 SCTP_STAT_INCR(sctps_recvheartbeat);
3905 sctp_send_heartbeat_ack(stcb, m, *offset,
3908 /* He's alive so give him credit */
3909 stcb->asoc.overall_error_count = 0;
3912 case SCTP_HEARTBEAT_ACK:
3913 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
3914 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
3918 SCTP_TCB_UNLOCK(locked_tcb);
3922 /* He's alive so give him credit */
3923 stcb->asoc.overall_error_count = 0;
3924 SCTP_STAT_INCR(sctps_recvheartbeatack);
3926 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
3929 case SCTP_ABORT_ASSOCIATION:
3930 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
3932 if ((stcb) && netp && *netp)
3933 sctp_handle_abort((struct sctp_abort_chunk *)ch,
3939 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
3941 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
3944 SCTP_TCB_UNLOCK(locked_tcb);
3949 if (netp && *netp) {
3952 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
3953 stcb, *netp, &abort_flag);
3960 case SCTP_SHUTDOWN_ACK:
3961 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb);
3962 if ((stcb) && (netp) && (*netp))
3963 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
3968 case SCTP_OPERATION_ERROR:
3969 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
3970 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
3976 case SCTP_COOKIE_ECHO:
3977 SCTPDBG(SCTP_DEBUG_INPUT3,
3978 "SCTP_COOKIE-ECHO, stcb %p\n", stcb);
3979 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
3982 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3983 /* We are not interested anymore */
3989 * First are we accepting? We do this again here
3990 * sincen it is possible that a previous endpoint
3991 * WAS listening responded to a INIT-ACK and then
3992 * closed. We opened and bound.. and are now no
3995 if (inp->sctp_socket->so_qlimit == 0) {
3996 if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3998 * special case, is this a retran'd
3999 * COOKIE-ECHO or a restarting assoc
4000 * that is a peeled off or
4001 * one-to-one style socket.
4003 goto process_cookie_anyway;
4005 sctp_abort_association(inp, stcb, m, iphlen,
4009 } else if (inp->sctp_socket->so_qlimit) {
4010 /* we are accepting so check limits like TCP */
4011 if (inp->sctp_socket->so_qlen >
4012 inp->sctp_socket->so_qlimit) {
4015 struct sctp_paramhdr *phdr;
4017 if (sctp_abort_if_one_2_one_hits_limit) {
4019 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4020 0, M_DONTWAIT, 1, MT_DATA);
4022 SCTP_BUF_LEN(oper) =
4023 sizeof(struct sctp_paramhdr);
4025 struct sctp_paramhdr *);
4027 htons(SCTP_CAUSE_OUT_OF_RESC);
4028 phdr->param_length =
4029 htons(sizeof(struct sctp_paramhdr));
4031 sctp_abort_association(inp, stcb, m,
4032 iphlen, sh, oper, vrf_id);
4038 process_cookie_anyway:
4040 struct mbuf *ret_buf;
4041 struct sctp_inpcb *linp;
4050 SCTP_ASOC_CREATE_LOCK(linp);
4054 sctp_handle_cookie_echo(m, iphlen,
4056 (struct sctp_cookie_echo_chunk *)ch,
4067 SCTP_ASOC_CREATE_UNLOCK(linp);
4069 if (ret_buf == NULL) {
4071 SCTP_TCB_UNLOCK(locked_tcb);
4073 SCTPDBG(SCTP_DEBUG_INPUT3,
4074 "GAK, null buffer\n");
4079 /* if AUTH skipped, see if it verified... */
4084 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
4086 * Restart the timer if we have
4089 struct sctp_tmit_chunk *chk;
4091 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
4093 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4094 stcb->sctp_ep, stcb,
4100 case SCTP_COOKIE_ACK:
4101 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb);
4102 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
4104 SCTP_TCB_UNLOCK(locked_tcb);
4108 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4109 /* We are not interested anymore */
4110 if ((stcb) && (stcb->asoc.total_output_queue_size)) {
4113 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
4118 /* He's alive so give him credit */
4119 if ((stcb) && netp && *netp) {
4120 stcb->asoc.overall_error_count = 0;
4121 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp);
4125 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
4126 /* He's alive so give him credit */
4127 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
4130 SCTP_TCB_UNLOCK(locked_tcb);
4136 stcb->asoc.overall_error_count = 0;
4137 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
4142 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
4143 /* He's alive so give him credit */
4144 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
4147 SCTP_TCB_UNLOCK(locked_tcb);
4153 stcb->asoc.overall_error_count = 0;
4154 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb);
4157 case SCTP_SHUTDOWN_COMPLETE:
4158 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb);
4159 /* must be first and only chunk */
4160 if ((num_chunks > 1) ||
4161 (length - *offset > (int)SCTP_SIZE32(chk_length))) {
4164 SCTP_TCB_UNLOCK(locked_tcb);
4168 if ((stcb) && netp && *netp) {
4169 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
4176 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
4177 /* He's alive so give him credit */
4179 stcb->asoc.overall_error_count = 0;
4180 sctp_handle_asconf(m, *offset,
4181 (struct sctp_asconf_chunk *)ch, stcb);
4184 case SCTP_ASCONF_ACK:
4185 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
4186 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
4189 SCTP_TCB_UNLOCK(locked_tcb);
4194 if ((stcb) && netp && *netp) {
4195 /* He's alive so give him credit */
4196 stcb->asoc.overall_error_count = 0;
4197 sctp_handle_asconf_ack(m, *offset,
4198 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp);
4201 case SCTP_FORWARD_CUM_TSN:
4202 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
4203 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
4206 SCTP_TCB_UNLOCK(locked_tcb);
4211 /* He's alive so give him credit */
4215 stcb->asoc.overall_error_count = 0;
4217 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4218 /* We are not interested anymore */
4219 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
4223 sctp_handle_forward_tsn(stcb,
4224 (struct sctp_forward_tsn_chunk *)ch, &abort_flag);
4229 stcb->asoc.overall_error_count = 0;
4234 case SCTP_STREAM_RESET:
4235 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
4236 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4237 chk_length, chunk_buf);
4238 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
4241 SCTP_TCB_UNLOCK(locked_tcb);
4246 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4247 /* We are not interested anymore */
4248 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
4252 if (stcb->asoc.peer_supports_strreset == 0) {
4254 * hmm, peer should have announced this, but
4255 * we will turn it on since he is sending us
4258 stcb->asoc.peer_supports_strreset = 1;
4260 if (sctp_handle_stream_reset(stcb, (struct sctp_stream_reset_out_req *)ch)) {
4261 /* stop processing */
4266 case SCTP_PACKET_DROPPED:
4267 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
4268 /* re-get it all please */
4269 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
4272 SCTP_TCB_UNLOCK(locked_tcb);
4277 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4278 chk_length, chunk_buf);
4280 if (ch && (stcb) && netp && (*netp)) {
4281 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
4286 case SCTP_AUTHENTICATION:
4287 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
4288 if (sctp_auth_disable)
4292 /* save the first AUTH for later processing */
4293 if (auth_skipped == 0) {
4294 auth_offset = *offset;
4295 auth_len = chk_length;
4298 /* skip this chunk (temporarily) */
4301 if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
4302 (chk_length > (sizeof(struct sctp_auth_chunk) +
4303 SCTP_AUTH_DIGEST_LEN_MAX))) {
4306 SCTP_TCB_UNLOCK(locked_tcb);
4311 if (got_auth == 1) {
4312 /* skip this chunk... it's already auth'd */
4315 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4316 chk_length, chunk_buf);
4318 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
4320 /* auth HMAC failed so dump the packet */
4324 /* remaining chunks are HMAC checked */
4325 stcb->asoc.authenticated = 1;
4331 /* it's an unknown chunk! */
4332 if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
4334 struct sctp_paramhdr *phd;
4336 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4337 0, M_DONTWAIT, 1, MT_DATA);
4339 phd = mtod(mm, struct sctp_paramhdr *);
4341 * We cheat and use param type since
4342 * we did not bother to define a
4343 * error cause struct. They are the
4344 * same basic format with different
4347 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK);
4348 phd->param_length = htons(chk_length + sizeof(*phd));
4349 SCTP_BUF_LEN(mm) = sizeof(*phd);
4350 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length),
4352 if (SCTP_BUF_NEXT(mm)) {
4353 sctp_queue_op_err(stcb, mm);
4359 if ((ch->chunk_type & 0x80) == 0) {
4360 /* discard this packet */
4363 } /* else skip this bad chunk and continue... */
4365 } /* switch (ch->chunk_type) */
4369 /* get the next chunk */
4370 *offset += SCTP_SIZE32(chk_length);
4371 if (*offset >= length) {
4372 /* no more data left in the mbuf chain */
4375 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
4376 sizeof(struct sctp_chunkhdr), chunk_buf);
4379 SCTP_TCB_UNLOCK(locked_tcb);
4390 * Process the ECN bits we have something set so we must look to see if it is
4391 * ECN(0) or ECN(1) or CE
4393 static __inline void
4394 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net,
4397 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4399 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) {
4401 * we only add to the nonce sum for ECT1, ECT0 does not
4402 * change the NS bit (that we have yet to find a way to send
4406 /* ECN Nonce stuff */
4407 stcb->asoc.receiver_nonce_sum++;
4408 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM;
4411 * Drag up the last_echo point if cumack is larger since we
4412 * don't want the point falling way behind by more than
4413 * 2^^31 and then having it be incorrect.
4415 if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4416 stcb->asoc.last_echo_tsn, MAX_TSN)) {
4417 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4419 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) {
4421 * Drag up the last_echo point if cumack is larger since we
4422 * don't want the point falling way behind by more than
4423 * 2^^31 and then having it be incorrect.
4425 if (compare_with_wrap(stcb->asoc.cumulative_tsn,
4426 stcb->asoc.last_echo_tsn, MAX_TSN)) {
4427 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn;
4432 static __inline void
4433 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net,
4434 uint32_t high_tsn, uint8_t ecn_bits)
4436 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) {
4438 * we possibly must notify the sender that a congestion
4439 * window reduction is in order. We do this by adding a ECNE
4440 * chunk to the output chunk queue. The incoming CWR will
4441 * remove this chunk.
4443 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn,
4445 /* Yep, we need to add a ECNE */
4446 sctp_send_ecn_echo(stcb, net, high_tsn);
4447 stcb->asoc.last_echo_tsn = high_tsn;
4453 * common input chunk processing (v4 and v6)
4456 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
4457 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch,
4458 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net,
4459 uint8_t ecn_bits, uint32_t vrf_id)
4462 * Control chunk processing
4465 int fwd_tsn_seen = 0, data_processed = 0;
4466 struct mbuf *m = *mm;
4470 SCTP_STAT_INCR(sctps_recvdatagrams);
4471 #ifdef SCTP_AUDITING_ENABLED
4472 sctp_audit_log(0xE0, 1);
4473 sctp_auditing(0, inp, stcb, net);
4476 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n",
4480 /* always clear this before beginning a packet */
4481 stcb->asoc.authenticated = 0;
4482 stcb->asoc.seen_a_sack_this_pkt = 0;
4484 if (IS_SCTP_CONTROL(ch)) {
4485 /* process the control portion of the SCTP packet */
4486 /* sa_ignore NO_NULL_CHK */
4487 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch,
4488 inp, stcb, &net, &fwd_tsn_seen, vrf_id);
4491 * This covers us if the cookie-echo was there and
4492 * it changes our INP.
4494 inp = stcb->sctp_ep;
4498 * no control chunks, so pre-process DATA chunks (these
4499 * checks are taken care of by control processing)
4503 * if DATA only packet, and auth is required, then punt...
4504 * can't have authenticated without any AUTH (control)
4507 if ((stcb != NULL) && !sctp_auth_disable &&
4508 sctp_auth_is_required_chunk(SCTP_DATA,
4509 stcb->asoc.local_auth_chunks)) {
4510 /* "silently" ignore */
4511 SCTP_STAT_INCR(sctps_recvauthmissing);
4512 SCTP_TCB_UNLOCK(stcb);
4516 /* out of the blue DATA chunk */
4517 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4521 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
4522 /* v_tag mismatch! */
4523 SCTP_STAT_INCR(sctps_badvtag);
4524 SCTP_TCB_UNLOCK(stcb);
4531 * no valid TCB for this packet, or we found it's a bad
4532 * packet while processing control, or we're done with this
4533 * packet (done or skip rest of data), so we drop it...
4538 * DATA chunk processing
4540 /* plow through the data chunks while length > offset */
4543 * Rest should be DATA only. Check authentication state if AUTH for
4546 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable &&
4547 sctp_auth_is_required_chunk(SCTP_DATA,
4548 stcb->asoc.local_auth_chunks) &&
4549 !stcb->asoc.authenticated) {
4550 /* "silently" ignore */
4551 SCTP_STAT_INCR(sctps_recvauthmissing);
4552 SCTPDBG(SCTP_DEBUG_AUTH1,
4553 "Data chunk requires AUTH, skipped\n");
4556 if (length > offset) {
4560 * First check to make sure our state is correct. We would
4561 * not get here unless we really did have a tag, so we don't
4562 * abort if this happens, just dump the chunk silently.
4564 switch (SCTP_GET_STATE(&stcb->asoc)) {
4565 case SCTP_STATE_COOKIE_ECHOED:
4567 * we consider data with valid tags in this state
4568 * shows us the cookie-ack was lost. Imply it was
4571 stcb->asoc.overall_error_count = 0;
4572 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
4574 case SCTP_STATE_COOKIE_WAIT:
4576 * We consider OOTB any data sent during asoc setup.
4578 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL,
4580 SCTP_TCB_UNLOCK(stcb);
4583 case SCTP_STATE_EMPTY: /* should not happen */
4584 case SCTP_STATE_INUSE: /* should not happen */
4585 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */
4586 case SCTP_STATE_SHUTDOWN_ACK_SENT:
4588 SCTP_TCB_UNLOCK(stcb);
4591 case SCTP_STATE_OPEN:
4592 case SCTP_STATE_SHUTDOWN_SENT:
4595 /* take care of ECN, part 1. */
4596 if (stcb->asoc.ecn_allowed &&
4597 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4598 sctp_process_ecn_marked_a(stcb, net, ecn_bits);
4600 /* plow through the data chunks while length > offset */
4601 retval = sctp_process_data(mm, iphlen, &offset, length, sh,
4602 inp, stcb, net, &high_tsn);
4605 * The association aborted, NO UNLOCK needed since
4606 * the association is destroyed.
4612 /* take care of ecn part 2. */
4613 if (stcb->asoc.ecn_allowed &&
4614 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) {
4615 sctp_process_ecn_marked_b(stcb, net, high_tsn,
4620 * Anything important needs to have been m_copy'ed in
4624 if ((data_processed == 0) && (fwd_tsn_seen)) {
4627 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
4628 stcb->asoc.cumulative_tsn, MAX_TSN)) {
4629 /* there was a gap before this data was processed */
4632 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
4634 /* Again, we aborted so NO UNLOCK needed */
4638 /* trigger send of any chunks in queue... */
4640 #ifdef SCTP_AUDITING_ENABLED
4641 sctp_audit_log(0xE0, 2);
4642 sctp_auditing(1, inp, stcb, net);
4644 SCTPDBG(SCTP_DEBUG_INPUT1,
4645 "Check for chunk output prw:%d tqe:%d tf=%d\n",
4646 stcb->asoc.peers_rwnd,
4647 TAILQ_EMPTY(&stcb->asoc.control_send_queue),
4648 stcb->asoc.total_flight);
4649 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
4651 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) ||
4653 (stcb->asoc.peers_rwnd > 0 ||
4654 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
4655 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
4656 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC);
4657 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
4659 #ifdef SCTP_AUDITING_ENABLED
4660 sctp_audit_log(0xE0, 3);
4661 sctp_auditing(2, inp, stcb, net);
4663 SCTP_TCB_UNLOCK(stcb);
4670 sctp_input(i_pak, off)
4675 #ifdef SCTP_MBUF_LOGGING
4681 uint32_t vrf_id = 0;
4685 struct sctp_inpcb *inp = NULL;
4687 uint32_t check, calc_check;
4688 struct sctp_nets *net;
4689 struct sctp_tcb *stcb = NULL;
4690 struct sctp_chunkhdr *ch;
4691 int refcount_up = 0;
4692 int length, mlen, offset;
4695 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
4696 SCTP_RELEASE_PKT(i_pak);
4699 mlen = SCTP_HEADER_LEN(i_pak);
4701 m = SCTP_HEADER_TO_CHAIN(i_pak);
4704 SCTP_STAT_INCR(sctps_recvpackets);
4705 SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
4708 #ifdef SCTP_MBUF_LOGGING
4709 /* Log in any input mbufs */
4710 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
4713 if (SCTP_BUF_IS_EXTENDED(mat)) {
4714 sctp_log_mb(mat, SCTP_MBUF_INPUT);
4716 mat = SCTP_BUF_NEXT(mat);
4720 #ifdef SCTP_PACKET_LOGGING
4721 sctp_packet_log(m, mlen);
4724 * Must take out the iphlen, since mlen expects this (only effect lb
4730 * Get IP, SCTP, and first chunk header together in first mbuf.
4732 ip = mtod(m, struct ip *);
4733 offset = iphlen + sizeof(*sh) + sizeof(*ch);
4734 if (SCTP_BUF_LEN(m) < offset) {
4735 if ((m = m_pullup(m, offset)) == 0) {
4736 SCTP_STAT_INCR(sctps_hdrops);
4739 ip = mtod(m, struct ip *);
4741 sh = (struct sctphdr *)((caddr_t)ip + iphlen);
4742 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
4744 /* SCTP does not allow broadcasts or multicasts */
4745 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
4748 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
4750 * We only look at broadcast if its a front state, All
4751 * others we will not have a tcb for anyway.
4755 /* validate SCTP checksum */
4756 if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) {
4758 * we do NOT validate things from the loopback if the sysctl
4761 check = sh->checksum; /* save incoming checksum */
4762 if ((check == 0) && (sctp_no_csum_on_loopback)) {
4764 * special hook for where we got a local address
4765 * somehow routed across a non IFT_LOOP type
4768 if (ip->ip_src.s_addr == ip->ip_dst.s_addr)
4769 goto sctp_skip_csum_4;
4771 sh->checksum = 0; /* prepare for calc */
4772 calc_check = sctp_calculate_sum(m, &mlen, iphlen);
4773 if (calc_check != check) {
4774 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n",
4775 calc_check, check, m, mlen, iphlen);
4777 stcb = sctp_findassociation_addr(m, iphlen,
4778 offset - sizeof(*ch),
4781 if ((inp) && (stcb)) {
4782 sctp_send_packet_dropped(stcb, net, m, iphlen, 1);
4783 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR);
4784 } else if ((inp != NULL) && (stcb == NULL)) {
4787 SCTP_STAT_INCR(sctps_badsum);
4788 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
4791 sh->checksum = calc_check;
4794 /* destination port of 0 is illegal, based on RFC2960. */
4795 if (sh->dest_port == 0) {
4796 SCTP_STAT_INCR(sctps_hdrops);
4799 /* validate mbuf chain length with IP payload length */
4800 if (mlen < (ip->ip_len - iphlen)) {
4801 SCTP_STAT_INCR(sctps_hdrops);
4805 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants
4806 * IP/SCTP/first chunk header...
4808 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch),
4809 sh, ch, &inp, &net, vrf_id);
4810 /* inp's ref-count increased && stcb locked */
4812 struct sctp_init_chunk *init_chk, chunk_buf;
4814 SCTP_STAT_INCR(sctps_noport);
4817 * we use the bandwidth limiting to protect against sending
4818 * too many ABORTS all at once. In this case these count the
4819 * same as an ICMP message.
4821 if (badport_bandlim(0) < 0)
4823 #endif /* ICMP_BANDLIM */
4824 SCTPDBG(SCTP_DEBUG_INPUT1,
4825 "Sending a ABORT from packet entry!\n");
4826 if (ch->chunk_type == SCTP_INITIATION) {
4828 * we do a trick here to get the INIT tag, dig in
4829 * and get the tag from the INIT and put it in the
4832 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4833 iphlen + sizeof(*sh), sizeof(*init_chk),
4834 (uint8_t *) & chunk_buf);
4835 if (init_chk != NULL)
4836 sh->v_tag = init_chk->init.initiate_tag;
4838 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
4839 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
4842 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
4845 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION)
4846 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id);
4848 } else if (stcb == NULL) {
4853 * I very much doubt any of the IPSEC stuff will work but I have no
4854 * idea, so I will leave it in place.
4857 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) {
4858 ipsecstat.in_polvio++;
4859 SCTP_STAT_INCR(sctps_hdrops);
4865 * common chunk processing
4867 length = ip->ip_len + iphlen;
4868 offset -= sizeof(struct sctp_chunkhdr);
4870 ecn_bits = ip->ip_tos;
4872 /* sa_ignore NO_NULL_CHK */
4873 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch,
4874 inp, stcb, net, ecn_bits, vrf_id);
4875 /* inp's ref-count reduced && stcb unlocked */
4879 if ((inp) && (refcount_up)) {
4880 /* reduce ref-count */
4881 SCTP_INP_WLOCK(inp);
4882 SCTP_INP_DECR_REF(inp);
4883 SCTP_INP_WUNLOCK(inp);
4888 SCTP_TCB_UNLOCK(stcb);
4890 if ((inp) && (refcount_up)) {
4891 /* reduce ref-count */
4892 SCTP_INP_WLOCK(inp);
4893 SCTP_INP_DECR_REF(inp);
4894 SCTP_INP_WUNLOCK(inp);