]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/netinet/sctp_indata.c
MFC r291700:
[FreeBSD/stable/10.git] / sys / netinet / sctp_indata.c
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47
48
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62         asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69         uint32_t calc = 0;
70
71         /*
72          * This is really set wrong with respect to a 1-2-m socket. Since
73          * the sb_cc is the count that everyone as put up. When we re-write
74          * sctp_soreceive then we will fix this so that ONLY this
75          * associations data is taken into account.
76          */
77         if (stcb->sctp_socket == NULL)
78                 return (calc);
79
80         if (stcb->asoc.sb_cc == 0 &&
81             asoc->size_on_reasm_queue == 0 &&
82             asoc->size_on_all_streams == 0) {
83                 /* Full rwnd granted */
84                 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85                 return (calc);
86         }
87         /* get actual space */
88         calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89
90         /*
91          * take out what has NOT been put on socket queue and we yet hold
92          * for putting up.
93          */
94         calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95             asoc->cnt_on_reasm_queue * MSIZE));
96         calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97             asoc->cnt_on_all_streams * MSIZE));
98
99         if (calc == 0) {
100                 /* out of space */
101                 return (calc);
102         }
103         /* what is the overhead of all these rwnd's */
104         calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105         /*
106          * If the window gets too small due to ctrl-stuff, reduce it to 1,
107          * even it is 0. SWS engaged
108          */
109         if (calc < stcb->asoc.my_rwnd_control_len) {
110                 calc = 1;
111         }
112         return (calc);
113 }
114
115
116
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128         struct sctp_queued_to_read *read_queue_e = NULL;
129
130         sctp_alloc_a_readq(stcb, read_queue_e);
131         if (read_queue_e == NULL) {
132                 goto failed_build;
133         }
134         read_queue_e->sinfo_stream = stream_no;
135         read_queue_e->sinfo_ssn = stream_seq;
136         read_queue_e->sinfo_flags = (flags << 8);
137         read_queue_e->sinfo_ppid = ppid;
138         read_queue_e->sinfo_context = context;
139         read_queue_e->sinfo_timetolive = 0;
140         read_queue_e->sinfo_tsn = tsn;
141         read_queue_e->sinfo_cumtsn = tsn;
142         read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143         read_queue_e->whoFrom = net;
144         read_queue_e->length = 0;
145         atomic_add_int(&net->ref_count, 1);
146         read_queue_e->data = dm;
147         read_queue_e->spec_flags = 0;
148         read_queue_e->tail_mbuf = NULL;
149         read_queue_e->aux_data = NULL;
150         read_queue_e->stcb = stcb;
151         read_queue_e->port_from = stcb->rport;
152         read_queue_e->do_not_ref_stcb = 0;
153         read_queue_e->end_added = 0;
154         read_queue_e->some_taken = 0;
155         read_queue_e->pdapi_aborted = 0;
156 failed_build:
157         return (read_queue_e);
158 }
159
160
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168         struct sctp_queued_to_read *read_queue_e = NULL;
169
170         sctp_alloc_a_readq(stcb, read_queue_e);
171         if (read_queue_e == NULL) {
172                 goto failed_build;
173         }
174         read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175         read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176         read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177         read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178         read_queue_e->sinfo_context = stcb->asoc.context;
179         read_queue_e->sinfo_timetolive = 0;
180         read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181         read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182         read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183         read_queue_e->whoFrom = chk->whoTo;
184         read_queue_e->aux_data = NULL;
185         read_queue_e->length = 0;
186         atomic_add_int(&chk->whoTo->ref_count, 1);
187         read_queue_e->data = chk->data;
188         read_queue_e->tail_mbuf = NULL;
189         read_queue_e->stcb = stcb;
190         read_queue_e->port_from = stcb->rport;
191         read_queue_e->spec_flags = 0;
192         read_queue_e->do_not_ref_stcb = 0;
193         read_queue_e->end_added = 0;
194         read_queue_e->some_taken = 0;
195         read_queue_e->pdapi_aborted = 0;
196 failed_build:
197         return (read_queue_e);
198 }
199
200
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204         struct sctp_extrcvinfo *seinfo;
205         struct sctp_sndrcvinfo *outinfo;
206         struct sctp_rcvinfo *rcvinfo;
207         struct sctp_nxtinfo *nxtinfo;
208         struct cmsghdr *cmh;
209         struct mbuf *ret;
210         int len;
211         int use_extended;
212         int provide_nxt;
213
214         if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215             sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216             sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217                 /* user does not want any ancillary data */
218                 return (NULL);
219         }
220         len = 0;
221         if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222                 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223         }
224         seinfo = (struct sctp_extrcvinfo *)sinfo;
225         if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226             (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227                 provide_nxt = 1;
228                 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
229         } else {
230                 provide_nxt = 0;
231         }
232         if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233                 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234                         use_extended = 1;
235                         len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236                 } else {
237                         use_extended = 0;
238                         len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239                 }
240         } else {
241                 use_extended = 0;
242         }
243
244         ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245         if (ret == NULL) {
246                 /* No space */
247                 return (ret);
248         }
249         SCTP_BUF_LEN(ret) = 0;
250
251         /* We need a CMSG header followed by the struct */
252         cmh = mtod(ret, struct cmsghdr *);
253         /*
254          * Make sure that there is no un-initialized padding between the
255          * cmsg header and cmsg data and after the cmsg data.
256          */
257         memset(cmh, 0, len);
258         if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259                 cmh->cmsg_level = IPPROTO_SCTP;
260                 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261                 cmh->cmsg_type = SCTP_RCVINFO;
262                 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263                 rcvinfo->rcv_sid = sinfo->sinfo_stream;
264                 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265                 rcvinfo->rcv_flags = sinfo->sinfo_flags;
266                 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267                 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268                 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269                 rcvinfo->rcv_context = sinfo->sinfo_context;
270                 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271                 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272                 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
273         }
274         if (provide_nxt) {
275                 cmh->cmsg_level = IPPROTO_SCTP;
276                 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277                 cmh->cmsg_type = SCTP_NXTINFO;
278                 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279                 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
280                 nxtinfo->nxt_flags = 0;
281                 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282                         nxtinfo->nxt_flags |= SCTP_UNORDERED;
283                 }
284                 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285                         nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
286                 }
287                 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288                         nxtinfo->nxt_flags |= SCTP_COMPLETE;
289                 }
290                 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
291                 nxtinfo->nxt_length = seinfo->serinfo_next_length;
292                 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
293                 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294                 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
295         }
296         if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297                 cmh->cmsg_level = IPPROTO_SCTP;
298                 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
299                 if (use_extended) {
300                         cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301                         cmh->cmsg_type = SCTP_EXTRCV;
302                         memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303                         SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
304                 } else {
305                         cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306                         cmh->cmsg_type = SCTP_SNDRCV;
307                         *outinfo = *sinfo;
308                         SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
309                 }
310         }
311         return (ret);
312 }
313
314
315 static void
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
317 {
318         uint32_t gap, i, cumackp1;
319         int fnd = 0;
320
321         if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
322                 return;
323         }
324         cumackp1 = asoc->cumulative_tsn + 1;
325         if (SCTP_TSN_GT(cumackp1, tsn)) {
326                 /*
327                  * this tsn is behind the cum ack and thus we don't need to
328                  * worry about it being moved from one to the other.
329                  */
330                 return;
331         }
332         SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333         if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334                 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335                 sctp_print_mapping_array(asoc);
336 #ifdef INVARIANTS
337                 panic("Things are really messed up now!!");
338 #endif
339         }
340         SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341         SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342         if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343                 asoc->highest_tsn_inside_nr_map = tsn;
344         }
345         if (tsn == asoc->highest_tsn_inside_map) {
346                 /* We must back down to see what the new highest is */
347                 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348                         SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349                         if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350                                 asoc->highest_tsn_inside_map = i;
351                                 fnd = 1;
352                                 break;
353                         }
354                 }
355                 if (!fnd) {
356                         asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
357                 }
358         }
359 }
360
361
362 /*
363  * We are delivering currently from the reassembly queue. We must continue to
364  * deliver until we either: 1) run out of space. 2) run out of sequential
365  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
366  */
367 static void
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
369 {
370         struct sctp_tmit_chunk *chk, *nchk;
371         uint16_t nxt_todel;
372         uint16_t stream_no;
373         int end = 0;
374         int cntDel;
375         struct sctp_queued_to_read *control, *ctl, *nctl;
376
377         if (stcb == NULL)
378                 return;
379
380         cntDel = stream_no = 0;
381         if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382             (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383             (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384                 /* socket above is long gone or going.. */
385 abandon:
386                 asoc->fragmented_delivery_inprogress = 0;
387                 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388                         TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389                         asoc->size_on_reasm_queue -= chk->send_size;
390                         sctp_ucount_decr(asoc->cnt_on_reasm_queue);
391                         /*
392                          * Lose the data pointer, since its in the socket
393                          * buffer
394                          */
395                         if (chk->data) {
396                                 sctp_m_freem(chk->data);
397                                 chk->data = NULL;
398                         }
399                         /* Now free the address and data */
400                         sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401                         /* sa_ignore FREED_MEMORY */
402                 }
403                 return;
404         }
405         SCTP_TCB_LOCK_ASSERT(stcb);
406         TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407                 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408                         /* Can't deliver more :< */
409                         return;
410                 }
411                 stream_no = chk->rec.data.stream_number;
412                 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413                 if (nxt_todel != chk->rec.data.stream_seq &&
414                     (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
415                         /*
416                          * Not the next sequence to deliver in its stream OR
417                          * unordered
418                          */
419                         return;
420                 }
421                 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
422
423                         control = sctp_build_readq_entry_chk(stcb, chk);
424                         if (control == NULL) {
425                                 /* out of memory? */
426                                 return;
427                         }
428                         /* save it off for our future deliveries */
429                         stcb->asoc.control_pdapi = control;
430                         if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
431                                 end = 1;
432                         else
433                                 end = 0;
434                         sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435                         sctp_add_to_readq(stcb->sctp_ep,
436                             stcb, control, &stcb->sctp_socket->so_rcv, end,
437                             SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
438                         cntDel++;
439                 } else {
440                         if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
441                                 end = 1;
442                         else
443                                 end = 0;
444                         sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445                         if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446                             stcb->asoc.control_pdapi,
447                             chk->data, end, chk->rec.data.TSN_seq,
448                             &stcb->sctp_socket->so_rcv)) {
449                                 /*
450                                  * something is very wrong, either
451                                  * control_pdapi is NULL, or the tail_mbuf
452                                  * is corrupt, or there is a EOM already on
453                                  * the mbuf chain.
454                                  */
455                                 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
456                                         goto abandon;
457                                 } else {
458 #ifdef INVARIANTS
459                                         if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460                                                 panic("This should not happen control_pdapi NULL?");
461                                         }
462                                         /* if we did not panic, it was a EOM */
463                                         panic("Bad chunking ??");
464 #else
465                                         if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466                                                 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
467                                         }
468                                         SCTP_PRINTF("Bad chunking ??\n");
469                                         SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
470
471 #endif
472                                         goto abandon;
473                                 }
474                         }
475                         cntDel++;
476                 }
477                 /* pull it we did it */
478                 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479                 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480                         asoc->fragmented_delivery_inprogress = 0;
481                         if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482                                 asoc->strmin[stream_no].last_sequence_delivered++;
483                         }
484                         if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485                                 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
486                         }
487                 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
488                         /*
489                          * turn the flag back on since we just  delivered
490                          * yet another one.
491                          */
492                         asoc->fragmented_delivery_inprogress = 1;
493                 }
494                 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495                 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496                 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497                 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
498
499                 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500                 asoc->size_on_reasm_queue -= chk->send_size;
501                 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502                 /* free up the chk */
503                 chk->data = NULL;
504                 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
505
506                 if (asoc->fragmented_delivery_inprogress == 0) {
507                         /*
508                          * Now lets see if we can deliver the next one on
509                          * the stream
510                          */
511                         struct sctp_stream_in *strm;
512
513                         strm = &asoc->strmin[stream_no];
514                         nxt_todel = strm->last_sequence_delivered + 1;
515                         TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516                                 /* Deliver more if we can. */
517                                 if (nxt_todel == ctl->sinfo_ssn) {
518                                         TAILQ_REMOVE(&strm->inqueue, ctl, next);
519                                         asoc->size_on_all_streams -= ctl->length;
520                                         sctp_ucount_decr(asoc->cnt_on_all_streams);
521                                         strm->last_sequence_delivered++;
522                                         sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523                                         sctp_add_to_readq(stcb->sctp_ep, stcb,
524                                             ctl,
525                                             &stcb->sctp_socket->so_rcv, 1,
526                                             SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
527                                 } else {
528                                         break;
529                                 }
530                                 nxt_todel = strm->last_sequence_delivered + 1;
531                         }
532                         break;
533                 }
534         }
535 }
536
537 /*
538  * Queue the chunk either right into the socket buffer if it is the next one
539  * to go OR put it in the correct place in the delivery queue.  If we do
540  * append to the so_buf, keep doing so until we are out of order. One big
541  * question still remains, what to do when the socket buffer is FULL??
542  */
543 static void
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545     struct sctp_queued_to_read *control, int *abort_flag)
546 {
547         /*
548          * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549          * all the data in one stream this could happen quite rapidly. One
550          * could use the TSN to keep track of things, but this scheme breaks
551          * down in the other type of stream useage that could occur. Send a
552          * single msg to stream 0, send 4Billion messages to stream 1, now
553          * send a message to stream 0. You have a situation where the TSN
554          * has wrapped but not in the stream. Is this worth worrying about
555          * or should we just change our queue sort at the bottom to be by
556          * TSN.
557          * 
558          * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559          * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560          * assignment this could happen... and I don't see how this would be
561          * a violation. So for now I am undecided an will leave the sort by
562          * SSN alone. Maybe a hybred approach is the answer
563          * 
564          */
565         struct sctp_stream_in *strm;
566         struct sctp_queued_to_read *at;
567         int queue_needed;
568         uint16_t nxt_todel;
569         struct mbuf *op_err;
570         char msg[SCTP_DIAG_INFO_LEN];
571
572         queue_needed = 1;
573         asoc->size_on_all_streams += control->length;
574         sctp_ucount_incr(asoc->cnt_on_all_streams);
575         strm = &asoc->strmin[control->sinfo_stream];
576         nxt_todel = strm->last_sequence_delivered + 1;
577         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578                 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
579         }
580         SCTPDBG(SCTP_DEBUG_INDATA1,
581             "queue to stream called for sid:%u ssn:%u tsn:%u lastdel:%u nxt:%u\n",
582             (uint32_t) control->sinfo_stream, (uint32_t) control->sinfo_ssn,
583             (uint32_t) control->sinfo_tsn,
584             (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel);
585         if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
586                 /* The incoming sseq is behind where we last delivered? */
587                 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
588                     control->sinfo_ssn, strm->last_sequence_delivered);
589 protocol_error:
590                 /*
591                  * throw it in the stream so it gets cleaned up in
592                  * association destruction
593                  */
594                 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
595                 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
596                     strm->last_sequence_delivered, control->sinfo_tsn,
597                     control->sinfo_stream, control->sinfo_ssn);
598                 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
599                 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
600                 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
601                 *abort_flag = 1;
602                 return;
603
604         }
605 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
606         struct socket *so;
607
608         so = SCTP_INP_SO(stcb->sctp_ep);
609         atomic_add_int(&stcb->asoc.refcnt, 1);
610         SCTP_TCB_UNLOCK(stcb);
611         SCTP_SOCKET_LOCK(so, 1);
612         SCTP_TCB_LOCK(stcb);
613         atomic_subtract_int(&stcb->asoc.refcnt, 1);
614         if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
615                 SCTP_SOCKET_UNLOCK(so, 1);
616                 return;
617         }
618 #endif
619         if (nxt_todel == control->sinfo_ssn) {
620                 /* can be delivered right away? */
621                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
622                         sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
623                 }
624                 /* EY it wont be queued if it could be delivered directly */
625                 queue_needed = 0;
626                 asoc->size_on_all_streams -= control->length;
627                 sctp_ucount_decr(asoc->cnt_on_all_streams);
628                 strm->last_sequence_delivered++;
629
630                 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
631                 sctp_add_to_readq(stcb->sctp_ep, stcb,
632                     control,
633                     &stcb->sctp_socket->so_rcv, 1,
634                     SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
635                 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
636                         /* all delivered */
637                         nxt_todel = strm->last_sequence_delivered + 1;
638                         if (nxt_todel == control->sinfo_ssn) {
639                                 TAILQ_REMOVE(&strm->inqueue, control, next);
640                                 asoc->size_on_all_streams -= control->length;
641                                 sctp_ucount_decr(asoc->cnt_on_all_streams);
642                                 strm->last_sequence_delivered++;
643                                 /*
644                                  * We ignore the return of deliver_data here
645                                  * since we always can hold the chunk on the
646                                  * d-queue. And we have a finite number that
647                                  * can be delivered from the strq.
648                                  */
649                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
650                                         sctp_log_strm_del(control, NULL,
651                                             SCTP_STR_LOG_FROM_IMMED_DEL);
652                                 }
653                                 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
654                                 sctp_add_to_readq(stcb->sctp_ep, stcb,
655                                     control,
656                                     &stcb->sctp_socket->so_rcv, 1,
657                                     SCTP_READ_LOCK_NOT_HELD,
658                                     SCTP_SO_LOCKED);
659                                 continue;
660                         }
661                         break;
662                 }
663         }
664         if (queue_needed) {
665                 /*
666                  * Ok, we did not deliver this guy, find the correct place
667                  * to put it on the queue.
668                  */
669                 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
670 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
671                         SCTP_SOCKET_UNLOCK(so, 1);
672 #endif
673                         goto protocol_error;
674                 }
675                 if (TAILQ_EMPTY(&strm->inqueue)) {
676                         /* Empty queue */
677                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
678                                 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
679                         }
680                         TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
681                 } else {
682                         TAILQ_FOREACH(at, &strm->inqueue, next) {
683                                 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
684                                         /*
685                                          * one in queue is bigger than the
686                                          * new one, insert before this one
687                                          */
688                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
689                                                 sctp_log_strm_del(control, at,
690                                                     SCTP_STR_LOG_FROM_INSERT_MD);
691                                         }
692                                         TAILQ_INSERT_BEFORE(at, control, next);
693                                         break;
694                                 } else if (at->sinfo_ssn == control->sinfo_ssn) {
695                                         /*
696                                          * Gak, He sent me a duplicate str
697                                          * seq number
698                                          */
699                                         /*
700                                          * foo bar, I guess I will just free
701                                          * this new guy, should we abort
702                                          * too? FIX ME MAYBE? Or it COULD be
703                                          * that the SSN's have wrapped.
704                                          * Maybe I should compare to TSN
705                                          * somehow... sigh for now just blow
706                                          * away the chunk!
707                                          */
708
709                                         if (control->data)
710                                                 sctp_m_freem(control->data);
711                                         control->data = NULL;
712                                         asoc->size_on_all_streams -= control->length;
713                                         sctp_ucount_decr(asoc->cnt_on_all_streams);
714                                         if (control->whoFrom) {
715                                                 sctp_free_remote_addr(control->whoFrom);
716                                                 control->whoFrom = NULL;
717                                         }
718                                         sctp_free_a_readq(stcb, control);
719 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
720                                         SCTP_SOCKET_UNLOCK(so, 1);
721 #endif
722                                         return;
723                                 } else {
724                                         if (TAILQ_NEXT(at, next) == NULL) {
725                                                 /*
726                                                  * We are at the end, insert
727                                                  * it after this one
728                                                  */
729                                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
730                                                         sctp_log_strm_del(control, at,
731                                                             SCTP_STR_LOG_FROM_INSERT_TL);
732                                                 }
733                                                 TAILQ_INSERT_AFTER(&strm->inqueue,
734                                                     at, control, next);
735                                                 break;
736                                         }
737                                 }
738                         }
739                 }
740         }
741 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
742         SCTP_SOCKET_UNLOCK(so, 1);
743 #endif
744 }
745
746 /*
747  * Returns two things: You get the total size of the deliverable parts of the
748  * first fragmented message on the reassembly queue. And you get a 1 back if
749  * all of the message is ready or a 0 back if the message is still incomplete
750  */
751 static int
752 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
753 {
754         struct sctp_tmit_chunk *chk;
755         uint32_t tsn;
756
757         *t_size = 0;
758         chk = TAILQ_FIRST(&asoc->reasmqueue);
759         if (chk == NULL) {
760                 /* nothing on the queue */
761                 return (0);
762         }
763         if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
764                 /* Not a first on the queue */
765                 return (0);
766         }
767         tsn = chk->rec.data.TSN_seq;
768         TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
769                 if (tsn != chk->rec.data.TSN_seq) {
770                         return (0);
771                 }
772                 *t_size += chk->send_size;
773                 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
774                         return (1);
775                 }
776                 tsn++;
777         }
778         return (0);
779 }
780
781 static void
782 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
783 {
784         struct sctp_tmit_chunk *chk;
785         uint16_t nxt_todel;
786         uint32_t tsize, pd_point;
787
788 doit_again:
789         chk = TAILQ_FIRST(&asoc->reasmqueue);
790         if (chk == NULL) {
791                 /* Huh? */
792                 asoc->size_on_reasm_queue = 0;
793                 asoc->cnt_on_reasm_queue = 0;
794                 return;
795         }
796         if (asoc->fragmented_delivery_inprogress == 0) {
797                 nxt_todel =
798                     asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
799                 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
800                     (nxt_todel == chk->rec.data.stream_seq ||
801                     (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
802                         /*
803                          * Yep the first one is here and its ok to deliver
804                          * but should we?
805                          */
806                         if (stcb->sctp_socket) {
807                                 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
808                                     stcb->sctp_ep->partial_delivery_point);
809                         } else {
810                                 pd_point = stcb->sctp_ep->partial_delivery_point;
811                         }
812                         if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
813                                 /*
814                                  * Yes, we setup to start reception, by
815                                  * backing down the TSN just in case we
816                                  * can't deliver. If we
817                                  */
818                                 asoc->fragmented_delivery_inprogress = 1;
819                                 asoc->tsn_last_delivered =
820                                     chk->rec.data.TSN_seq - 1;
821                                 asoc->str_of_pdapi =
822                                     chk->rec.data.stream_number;
823                                 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
824                                 asoc->pdapi_ppid = chk->rec.data.payloadtype;
825                                 asoc->fragment_flags = chk->rec.data.rcv_flags;
826                                 sctp_service_reassembly(stcb, asoc);
827                         }
828                 }
829         } else {
830                 /*
831                  * Service re-assembly will deliver stream data queued at
832                  * the end of fragmented delivery.. but it wont know to go
833                  * back and call itself again... we do that here with the
834                  * got doit_again
835                  */
836                 sctp_service_reassembly(stcb, asoc);
837                 if (asoc->fragmented_delivery_inprogress == 0) {
838                         /*
839                          * finished our Fragmented delivery, could be more
840                          * waiting?
841                          */
842                         goto doit_again;
843                 }
844         }
845 }
846
847 /*
848  * Dump onto the re-assembly queue, in its proper place. After dumping on the
849  * queue, see if anthing can be delivered. If so pull it off (or as much as
850  * we can. If we run out of space then we must dump what we can and set the
851  * appropriate flag to say we queued what we could.
852  */
853 static void
854 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
855     struct sctp_tmit_chunk *chk, int *abort_flag)
856 {
857         struct mbuf *op_err;
858         char msg[SCTP_DIAG_INFO_LEN];
859         uint32_t cum_ackp1, prev_tsn, post_tsn;
860         struct sctp_tmit_chunk *at, *prev, *next;
861
862         prev = next = NULL;
863         cum_ackp1 = asoc->tsn_last_delivered + 1;
864         if (TAILQ_EMPTY(&asoc->reasmqueue)) {
865                 /* This is the first one on the queue */
866                 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
867                 /*
868                  * we do not check for delivery of anything when only one
869                  * fragment is here
870                  */
871                 asoc->size_on_reasm_queue = chk->send_size;
872                 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
873                 if (chk->rec.data.TSN_seq == cum_ackp1) {
874                         if (asoc->fragmented_delivery_inprogress == 0 &&
875                             (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
876                             SCTP_DATA_FIRST_FRAG) {
877                                 /*
878                                  * An empty queue, no delivery inprogress,
879                                  * we hit the next one and it does NOT have
880                                  * a FIRST fragment mark.
881                                  */
882                                 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
883                                 snprintf(msg, sizeof(msg),
884                                     "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
885                                     chk->rec.data.TSN_seq,
886                                     chk->rec.data.stream_number,
887                                     chk->rec.data.stream_seq);
888                                 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
889                                 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
890                                 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
891                                 *abort_flag = 1;
892                         } else if (asoc->fragmented_delivery_inprogress &&
893                             (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
894                                 /*
895                                  * We are doing a partial delivery and the
896                                  * NEXT chunk MUST be either the LAST or
897                                  * MIDDLE fragment NOT a FIRST
898                                  */
899                                 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
900                                 snprintf(msg, sizeof(msg),
901                                     "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
902                                     chk->rec.data.TSN_seq,
903                                     chk->rec.data.stream_number,
904                                     chk->rec.data.stream_seq);
905                                 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
906                                 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
907                                 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
908                                 *abort_flag = 1;
909                         } else if (asoc->fragmented_delivery_inprogress) {
910                                 /*
911                                  * Here we are ok with a MIDDLE or LAST
912                                  * piece
913                                  */
914                                 if (chk->rec.data.stream_number !=
915                                     asoc->str_of_pdapi) {
916                                         /* Got to be the right STR No */
917                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
918                                             chk->rec.data.stream_number,
919                                             asoc->str_of_pdapi);
920                                         snprintf(msg, sizeof(msg),
921                                             "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
922                                             asoc->str_of_pdapi,
923                                             chk->rec.data.TSN_seq,
924                                             chk->rec.data.stream_number,
925                                             chk->rec.data.stream_seq);
926                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
927                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
928                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
929                                         *abort_flag = 1;
930                                 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
931                                             SCTP_DATA_UNORDERED &&
932                                     chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
933                                         /* Got to be the right STR Seq */
934                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
935                                             chk->rec.data.stream_seq,
936                                             asoc->ssn_of_pdapi);
937                                         snprintf(msg, sizeof(msg),
938                                             "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
939                                             asoc->ssn_of_pdapi,
940                                             chk->rec.data.TSN_seq,
941                                             chk->rec.data.stream_number,
942                                             chk->rec.data.stream_seq);
943                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
944                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
945                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
946                                         *abort_flag = 1;
947                                 }
948                         }
949                 }
950                 return;
951         }
952         /* Find its place */
953         TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
954                 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
955                         /*
956                          * one in queue is bigger than the new one, insert
957                          * before this one
958                          */
959                         /* A check */
960                         asoc->size_on_reasm_queue += chk->send_size;
961                         sctp_ucount_incr(asoc->cnt_on_reasm_queue);
962                         next = at;
963                         TAILQ_INSERT_BEFORE(at, chk, sctp_next);
964                         break;
965                 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
966                         /* Gak, He sent me a duplicate str seq number */
967                         /*
968                          * foo bar, I guess I will just free this new guy,
969                          * should we abort too? FIX ME MAYBE? Or it COULD be
970                          * that the SSN's have wrapped. Maybe I should
971                          * compare to TSN somehow... sigh for now just blow
972                          * away the chunk!
973                          */
974                         if (chk->data) {
975                                 sctp_m_freem(chk->data);
976                                 chk->data = NULL;
977                         }
978                         sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
979                         return;
980                 } else {
981                         prev = at;
982                         if (TAILQ_NEXT(at, sctp_next) == NULL) {
983                                 /*
984                                  * We are at the end, insert it after this
985                                  * one
986                                  */
987                                 /* check it first */
988                                 asoc->size_on_reasm_queue += chk->send_size;
989                                 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
990                                 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
991                                 break;
992                         }
993                 }
994         }
995         /* Now the audits */
996         if (prev) {
997                 prev_tsn = chk->rec.data.TSN_seq - 1;
998                 if (prev_tsn == prev->rec.data.TSN_seq) {
999                         /*
1000                          * Ok the one I am dropping onto the end is the
1001                          * NEXT. A bit of valdiation here.
1002                          */
1003                         if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1004                             SCTP_DATA_FIRST_FRAG ||
1005                             (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1006                             SCTP_DATA_MIDDLE_FRAG) {
1007                                 /*
1008                                  * Insert chk MUST be a MIDDLE or LAST
1009                                  * fragment
1010                                  */
1011                                 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1012                                     SCTP_DATA_FIRST_FRAG) {
1013                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1014                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1015                                         snprintf(msg, sizeof(msg),
1016                                             "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1017                                             chk->rec.data.TSN_seq,
1018                                             chk->rec.data.stream_number,
1019                                             chk->rec.data.stream_seq);
1020                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1021                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1022                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1023                                         *abort_flag = 1;
1024                                         return;
1025                                 }
1026                                 if (chk->rec.data.stream_number !=
1027                                     prev->rec.data.stream_number) {
1028                                         /*
1029                                          * Huh, need the correct STR here,
1030                                          * they must be the same.
1031                                          */
1032                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1033                                             chk->rec.data.stream_number,
1034                                             prev->rec.data.stream_number);
1035                                         snprintf(msg, sizeof(msg),
1036                                             "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1037                                             prev->rec.data.stream_number,
1038                                             chk->rec.data.TSN_seq,
1039                                             chk->rec.data.stream_number,
1040                                             chk->rec.data.stream_seq);
1041                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1042                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1043                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1044                                         *abort_flag = 1;
1045                                         return;
1046                                 }
1047                                 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1048                                     (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1049                                         /*
1050                                          * Huh, need the same ordering here,
1051                                          * they must be the same.
1052                                          */
1053                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1054                                         snprintf(msg, sizeof(msg),
1055                                             "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1056                                             (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1057                                             chk->rec.data.TSN_seq,
1058                                             (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1059                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1060                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1061                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1062                                         *abort_flag = 1;
1063                                         return;
1064                                 }
1065                                 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1066                                     chk->rec.data.stream_seq !=
1067                                     prev->rec.data.stream_seq) {
1068                                         /*
1069                                          * Huh, need the correct STR here,
1070                                          * they must be the same.
1071                                          */
1072                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1073                                             chk->rec.data.stream_seq,
1074                                             prev->rec.data.stream_seq);
1075                                         snprintf(msg, sizeof(msg),
1076                                             "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1077                                             prev->rec.data.stream_seq,
1078                                             chk->rec.data.TSN_seq,
1079                                             chk->rec.data.stream_number,
1080                                             chk->rec.data.stream_seq);
1081                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1082                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1083                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1084                                         *abort_flag = 1;
1085                                         return;
1086                                 }
1087                         } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1088                             SCTP_DATA_LAST_FRAG) {
1089                                 /* Insert chk MUST be a FIRST */
1090                                 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1091                                     SCTP_DATA_FIRST_FRAG) {
1092                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1093                                         snprintf(msg, sizeof(msg),
1094                                             "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1095                                             chk->rec.data.TSN_seq,
1096                                             chk->rec.data.stream_number,
1097                                             chk->rec.data.stream_seq);
1098                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1099                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1100                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1101                                         *abort_flag = 1;
1102                                         return;
1103                                 }
1104                         }
1105                 }
1106         }
1107         if (next) {
1108                 post_tsn = chk->rec.data.TSN_seq + 1;
1109                 if (post_tsn == next->rec.data.TSN_seq) {
1110                         /*
1111                          * Ok the one I am inserting ahead of is my NEXT
1112                          * one. A bit of valdiation here.
1113                          */
1114                         if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1115                                 /* Insert chk MUST be a last fragment */
1116                                 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1117                                     != SCTP_DATA_LAST_FRAG) {
1118                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1119                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1120                                         snprintf(msg, sizeof(msg),
1121                                             "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1122                                             chk->rec.data.TSN_seq,
1123                                             chk->rec.data.stream_number,
1124                                             chk->rec.data.stream_seq);
1125                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1126                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1127                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1128                                         *abort_flag = 1;
1129                                         return;
1130                                 }
1131                         } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1132                                     SCTP_DATA_MIDDLE_FRAG ||
1133                                     (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1134                             SCTP_DATA_LAST_FRAG) {
1135                                 /*
1136                                  * Insert chk CAN be MIDDLE or FIRST NOT
1137                                  * LAST
1138                                  */
1139                                 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1140                                     SCTP_DATA_LAST_FRAG) {
1141                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1142                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1143                                         snprintf(msg, sizeof(msg),
1144                                             "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1145                                             chk->rec.data.TSN_seq,
1146                                             chk->rec.data.stream_number,
1147                                             chk->rec.data.stream_seq);
1148                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1149                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1150                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1151                                         *abort_flag = 1;
1152                                         return;
1153                                 }
1154                                 if (chk->rec.data.stream_number !=
1155                                     next->rec.data.stream_number) {
1156                                         /*
1157                                          * Huh, need the correct STR here,
1158                                          * they must be the same.
1159                                          */
1160                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1161                                             chk->rec.data.stream_number,
1162                                             next->rec.data.stream_number);
1163                                         snprintf(msg, sizeof(msg),
1164                                             "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1165                                             next->rec.data.stream_number,
1166                                             chk->rec.data.TSN_seq,
1167                                             chk->rec.data.stream_number,
1168                                             chk->rec.data.stream_seq);
1169                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1170                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1171                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1172                                         *abort_flag = 1;
1173                                         return;
1174                                 }
1175                                 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1176                                     (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1177                                         /*
1178                                          * Huh, need the same ordering here,
1179                                          * they must be the same.
1180                                          */
1181                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1182                                         snprintf(msg, sizeof(msg),
1183                                             "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1184                                             (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1185                                             chk->rec.data.TSN_seq,
1186                                             (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1187                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1188                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1189                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1190                                         *abort_flag = 1;
1191                                         return;
1192                                 }
1193                                 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1194                                     chk->rec.data.stream_seq !=
1195                                     next->rec.data.stream_seq) {
1196                                         /*
1197                                          * Huh, need the correct STR here,
1198                                          * they must be the same.
1199                                          */
1200                                         SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1201                                             chk->rec.data.stream_seq,
1202                                             next->rec.data.stream_seq);
1203                                         snprintf(msg, sizeof(msg),
1204                                             "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1205                                             next->rec.data.stream_seq,
1206                                             chk->rec.data.TSN_seq,
1207                                             chk->rec.data.stream_number,
1208                                             chk->rec.data.stream_seq);
1209                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1210                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1211                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1212                                         *abort_flag = 1;
1213                                         return;
1214                                 }
1215                         }
1216                 }
1217         }
1218         /* Do we need to do some delivery? check */
1219         sctp_deliver_reasm_check(stcb, asoc);
1220 }
1221
1222 /*
1223  * This is an unfortunate routine. It checks to make sure a evil guy is not
1224  * stuffing us full of bad packet fragments. A broken peer could also do this
1225  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1226  * :< more cycles.
1227  */
1228 static int
1229 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1230     uint32_t TSN_seq)
1231 {
1232         struct sctp_tmit_chunk *at;
1233         uint32_t tsn_est;
1234
1235         TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1236                 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1237                         /* is it one bigger? */
1238                         tsn_est = at->rec.data.TSN_seq + 1;
1239                         if (tsn_est == TSN_seq) {
1240                                 /* yep. It better be a last then */
1241                                 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1242                                     SCTP_DATA_LAST_FRAG) {
1243                                         /*
1244                                          * Ok this guy belongs next to a guy
1245                                          * that is NOT last, it should be a
1246                                          * middle/last, not a complete
1247                                          * chunk.
1248                                          */
1249                                         return (1);
1250                                 } else {
1251                                         /*
1252                                          * This guy is ok since its a LAST
1253                                          * and the new chunk is a fully
1254                                          * self- contained one.
1255                                          */
1256                                         return (0);
1257                                 }
1258                         }
1259                 } else if (TSN_seq == at->rec.data.TSN_seq) {
1260                         /* Software error since I have a dup? */
1261                         return (1);
1262                 } else {
1263                         /*
1264                          * Ok, 'at' is larger than new chunk but does it
1265                          * need to be right before it.
1266                          */
1267                         tsn_est = TSN_seq + 1;
1268                         if (tsn_est == at->rec.data.TSN_seq) {
1269                                 /* Yep, It better be a first */
1270                                 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1271                                     SCTP_DATA_FIRST_FRAG) {
1272                                         return (1);
1273                                 } else {
1274                                         return (0);
1275                                 }
1276                         }
1277                 }
1278         }
1279         return (0);
1280 }
1281
1282 static int
1283 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1284     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1285     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1286     int *break_flag, int last_chunk)
1287 {
1288         /* Process a data chunk */
1289         /* struct sctp_tmit_chunk *chk; */
1290         struct sctp_tmit_chunk *chk;
1291         uint32_t tsn, gap;
1292         struct mbuf *dmbuf;
1293         int the_len;
1294         int need_reasm_check = 0;
1295         uint16_t strmno, strmseq;
1296         struct mbuf *op_err;
1297         char msg[SCTP_DIAG_INFO_LEN];
1298         struct sctp_queued_to_read *control;
1299         int ordered;
1300         uint32_t protocol_id;
1301         uint8_t chunk_flags;
1302         struct sctp_stream_reset_list *liste;
1303
1304         chk = NULL;
1305         tsn = ntohl(ch->dp.tsn);
1306         chunk_flags = ch->ch.chunk_flags;
1307         if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1308                 asoc->send_sack = 1;
1309         }
1310         protocol_id = ch->dp.protocol_id;
1311         ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1312         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1313                 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1314         }
1315         if (stcb == NULL) {
1316                 return (0);
1317         }
1318         SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1319         if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1320                 /* It is a duplicate */
1321                 SCTP_STAT_INCR(sctps_recvdupdata);
1322                 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1323                         /* Record a dup for the next outbound sack */
1324                         asoc->dup_tsns[asoc->numduptsns] = tsn;
1325                         asoc->numduptsns++;
1326                 }
1327                 asoc->send_sack = 1;
1328                 return (0);
1329         }
1330         /* Calculate the number of TSN's between the base and this TSN */
1331         SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1332         if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1333                 /* Can't hold the bit in the mapping at max array, toss it */
1334                 return (0);
1335         }
1336         if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1337                 SCTP_TCB_LOCK_ASSERT(stcb);
1338                 if (sctp_expand_mapping_array(asoc, gap)) {
1339                         /* Can't expand, drop it */
1340                         return (0);
1341                 }
1342         }
1343         if (SCTP_TSN_GT(tsn, *high_tsn)) {
1344                 *high_tsn = tsn;
1345         }
1346         /* See if we have received this one already */
1347         if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1348             SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1349                 SCTP_STAT_INCR(sctps_recvdupdata);
1350                 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1351                         /* Record a dup for the next outbound sack */
1352                         asoc->dup_tsns[asoc->numduptsns] = tsn;
1353                         asoc->numduptsns++;
1354                 }
1355                 asoc->send_sack = 1;
1356                 return (0);
1357         }
1358         /*
1359          * Check to see about the GONE flag, duplicates would cause a sack
1360          * to be sent up above
1361          */
1362         if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1363             (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1364             (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1365                 /*
1366                  * wait a minute, this guy is gone, there is no longer a
1367                  * receiver. Send peer an ABORT!
1368                  */
1369                 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1370                 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1371                 *abort_flag = 1;
1372                 return (0);
1373         }
1374         /*
1375          * Now before going further we see if there is room. If NOT then we
1376          * MAY let one through only IF this TSN is the one we are waiting
1377          * for on a partial delivery API.
1378          */
1379
1380         /* now do the tests */
1381         if (((asoc->cnt_on_all_streams +
1382             asoc->cnt_on_reasm_queue +
1383             asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1384             (((int)asoc->my_rwnd) <= 0)) {
1385                 /*
1386                  * When we have NO room in the rwnd we check to make sure
1387                  * the reader is doing its job...
1388                  */
1389                 if (stcb->sctp_socket->so_rcv.sb_cc) {
1390                         /* some to read, wake-up */
1391 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1392                         struct socket *so;
1393
1394                         so = SCTP_INP_SO(stcb->sctp_ep);
1395                         atomic_add_int(&stcb->asoc.refcnt, 1);
1396                         SCTP_TCB_UNLOCK(stcb);
1397                         SCTP_SOCKET_LOCK(so, 1);
1398                         SCTP_TCB_LOCK(stcb);
1399                         atomic_subtract_int(&stcb->asoc.refcnt, 1);
1400                         if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1401                                 /* assoc was freed while we were unlocked */
1402                                 SCTP_SOCKET_UNLOCK(so, 1);
1403                                 return (0);
1404                         }
1405 #endif
1406                         sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1407 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1408                         SCTP_SOCKET_UNLOCK(so, 1);
1409 #endif
1410                 }
1411                 /* now is it in the mapping array of what we have accepted? */
1412                 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1413                     SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1414                         /* Nope not in the valid range dump it */
1415                         sctp_set_rwnd(stcb, asoc);
1416                         if ((asoc->cnt_on_all_streams +
1417                             asoc->cnt_on_reasm_queue +
1418                             asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1419                                 SCTP_STAT_INCR(sctps_datadropchklmt);
1420                         } else {
1421                                 SCTP_STAT_INCR(sctps_datadroprwnd);
1422                         }
1423                         *break_flag = 1;
1424                         return (0);
1425                 }
1426         }
1427         strmno = ntohs(ch->dp.stream_id);
1428         if (strmno >= asoc->streamincnt) {
1429                 struct sctp_error_invalid_stream *cause;
1430
1431                 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1432                     0, M_NOWAIT, 1, MT_DATA);
1433                 if (op_err != NULL) {
1434                         /* add some space up front so prepend will work well */
1435                         SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1436                         cause = mtod(op_err, struct sctp_error_invalid_stream *);
1437                         /*
1438                          * Error causes are just param's and this one has
1439                          * two back to back phdr, one with the error type
1440                          * and size, the other with the streamid and a rsvd
1441                          */
1442                         SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1443                         cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1444                         cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1445                         cause->stream_id = ch->dp.stream_id;
1446                         cause->reserved = htons(0);
1447                         sctp_queue_op_err(stcb, op_err);
1448                 }
1449                 SCTP_STAT_INCR(sctps_badsid);
1450                 SCTP_TCB_LOCK_ASSERT(stcb);
1451                 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1452                 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1453                         asoc->highest_tsn_inside_nr_map = tsn;
1454                 }
1455                 if (tsn == (asoc->cumulative_tsn + 1)) {
1456                         /* Update cum-ack */
1457                         asoc->cumulative_tsn = tsn;
1458                 }
1459                 return (0);
1460         }
1461         /*
1462          * Before we continue lets validate that we are not being fooled by
1463          * an evil attacker. We can only have 4k chunks based on our TSN
1464          * spread allowed by the mapping array 512 * 8 bits, so there is no
1465          * way our stream sequence numbers could have wrapped. We of course
1466          * only validate the FIRST fragment so the bit must be set.
1467          */
1468         strmseq = ntohs(ch->dp.stream_sequence);
1469 #ifdef SCTP_ASOCLOG_OF_TSNS
1470         SCTP_TCB_LOCK_ASSERT(stcb);
1471         if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1472                 asoc->tsn_in_at = 0;
1473                 asoc->tsn_in_wrapped = 1;
1474         }
1475         asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1476         asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1477         asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1478         asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1479         asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1480         asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1481         asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1482         asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1483         asoc->tsn_in_at++;
1484 #endif
1485         if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1486             (TAILQ_EMPTY(&asoc->resetHead)) &&
1487             (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1488             SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1489                 /* The incoming sseq is behind where we last delivered? */
1490                 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1491                     strmseq, asoc->strmin[strmno].last_sequence_delivered);
1492
1493                 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1494                     asoc->strmin[strmno].last_sequence_delivered,
1495                     tsn, strmno, strmseq);
1496                 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1497                 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1498                 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1499                 *abort_flag = 1;
1500                 return (0);
1501         }
1502         /************************************
1503          * From here down we may find ch-> invalid
1504          * so its a good idea NOT to use it.
1505          *************************************/
1506
1507         the_len = (chk_length - sizeof(struct sctp_data_chunk));
1508         if (last_chunk == 0) {
1509                 dmbuf = SCTP_M_COPYM(*m,
1510                     (offset + sizeof(struct sctp_data_chunk)),
1511                     the_len, M_NOWAIT);
1512 #ifdef SCTP_MBUF_LOGGING
1513                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1514                         sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1515                 }
1516 #endif
1517         } else {
1518                 /* We can steal the last chunk */
1519                 int l_len;
1520
1521                 dmbuf = *m;
1522                 /* lop off the top part */
1523                 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1524                 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1525                         l_len = SCTP_BUF_LEN(dmbuf);
1526                 } else {
1527                         /*
1528                          * need to count up the size hopefully does not hit
1529                          * this to often :-0
1530                          */
1531                         struct mbuf *lat;
1532
1533                         l_len = 0;
1534                         for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1535                                 l_len += SCTP_BUF_LEN(lat);
1536                         }
1537                 }
1538                 if (l_len > the_len) {
1539                         /* Trim the end round bytes off  too */
1540                         m_adj(dmbuf, -(l_len - the_len));
1541                 }
1542         }
1543         if (dmbuf == NULL) {
1544                 SCTP_STAT_INCR(sctps_nomem);
1545                 return (0);
1546         }
1547         if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1548             asoc->fragmented_delivery_inprogress == 0 &&
1549             TAILQ_EMPTY(&asoc->resetHead) &&
1550             ((ordered == 0) ||
1551             ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1552             TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1553                 /* Candidate for express delivery */
1554                 /*
1555                  * Its not fragmented, No PD-API is up, Nothing in the
1556                  * delivery queue, Its un-ordered OR ordered and the next to
1557                  * deliver AND nothing else is stuck on the stream queue,
1558                  * And there is room for it in the socket buffer. Lets just
1559                  * stuff it up the buffer....
1560                  */
1561
1562                 /* It would be nice to avoid this copy if we could :< */
1563                 sctp_alloc_a_readq(stcb, control);
1564                 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1565                     protocol_id,
1566                     strmno, strmseq,
1567                     chunk_flags,
1568                     dmbuf);
1569                 if (control == NULL) {
1570                         goto failed_express_del;
1571                 }
1572                 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1573                 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1574                         asoc->highest_tsn_inside_nr_map = tsn;
1575                 }
1576                 sctp_add_to_readq(stcb->sctp_ep, stcb,
1577                     control, &stcb->sctp_socket->so_rcv,
1578                     1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1579
1580                 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1581                         /* for ordered, bump what we delivered */
1582                         asoc->strmin[strmno].last_sequence_delivered++;
1583                 }
1584                 SCTP_STAT_INCR(sctps_recvexpress);
1585                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1586                         sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1587                             SCTP_STR_LOG_FROM_EXPRS_DEL);
1588                 }
1589                 control = NULL;
1590
1591                 goto finish_express_del;
1592         }
1593 failed_express_del:
1594         /* If we reach here this is a new chunk */
1595         chk = NULL;
1596         control = NULL;
1597         /* Express for fragmented delivery? */
1598         if ((asoc->fragmented_delivery_inprogress) &&
1599             (stcb->asoc.control_pdapi) &&
1600             (asoc->str_of_pdapi == strmno) &&
1601             (asoc->ssn_of_pdapi == strmseq)
1602             ) {
1603                 control = stcb->asoc.control_pdapi;
1604                 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1605                         /* Can't be another first? */
1606                         goto failed_pdapi_express_del;
1607                 }
1608                 if (tsn == (control->sinfo_tsn + 1)) {
1609                         /* Yep, we can add it on */
1610                         int end = 0;
1611
1612                         if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1613                                 end = 1;
1614                         }
1615                         if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1616                             tsn,
1617                             &stcb->sctp_socket->so_rcv)) {
1618                                 SCTP_PRINTF("Append fails end:%d\n", end);
1619                                 goto failed_pdapi_express_del;
1620                         }
1621                         SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1622                         if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1623                                 asoc->highest_tsn_inside_nr_map = tsn;
1624                         }
1625                         SCTP_STAT_INCR(sctps_recvexpressm);
1626                         asoc->tsn_last_delivered = tsn;
1627                         asoc->fragment_flags = chunk_flags;
1628                         asoc->tsn_of_pdapi_last_delivered = tsn;
1629                         asoc->last_flags_delivered = chunk_flags;
1630                         asoc->last_strm_seq_delivered = strmseq;
1631                         asoc->last_strm_no_delivered = strmno;
1632                         if (end) {
1633                                 /* clean up the flags and such */
1634                                 asoc->fragmented_delivery_inprogress = 0;
1635                                 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1636                                         asoc->strmin[strmno].last_sequence_delivered++;
1637                                 }
1638                                 stcb->asoc.control_pdapi = NULL;
1639                                 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1640                                         /*
1641                                          * There could be another message
1642                                          * ready
1643                                          */
1644                                         need_reasm_check = 1;
1645                                 }
1646                         }
1647                         control = NULL;
1648                         goto finish_express_del;
1649                 }
1650         }
1651 failed_pdapi_express_del:
1652         control = NULL;
1653         if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1654                 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1655                 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1656                         asoc->highest_tsn_inside_nr_map = tsn;
1657                 }
1658         } else {
1659                 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1660                 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1661                         asoc->highest_tsn_inside_map = tsn;
1662                 }
1663         }
1664         if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1665                 sctp_alloc_a_chunk(stcb, chk);
1666                 if (chk == NULL) {
1667                         /* No memory so we drop the chunk */
1668                         SCTP_STAT_INCR(sctps_nomem);
1669                         if (last_chunk == 0) {
1670                                 /* we copied it, free the copy */
1671                                 sctp_m_freem(dmbuf);
1672                         }
1673                         return (0);
1674                 }
1675                 chk->rec.data.TSN_seq = tsn;
1676                 chk->no_fr_allowed = 0;
1677                 chk->rec.data.stream_seq = strmseq;
1678                 chk->rec.data.stream_number = strmno;
1679                 chk->rec.data.payloadtype = protocol_id;
1680                 chk->rec.data.context = stcb->asoc.context;
1681                 chk->rec.data.doing_fast_retransmit = 0;
1682                 chk->rec.data.rcv_flags = chunk_flags;
1683                 chk->asoc = asoc;
1684                 chk->send_size = the_len;
1685                 chk->whoTo = net;
1686                 atomic_add_int(&net->ref_count, 1);
1687                 chk->data = dmbuf;
1688         } else {
1689                 sctp_alloc_a_readq(stcb, control);
1690                 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1691                     protocol_id,
1692                     strmno, strmseq,
1693                     chunk_flags,
1694                     dmbuf);
1695                 if (control == NULL) {
1696                         /* No memory so we drop the chunk */
1697                         SCTP_STAT_INCR(sctps_nomem);
1698                         if (last_chunk == 0) {
1699                                 /* we copied it, free the copy */
1700                                 sctp_m_freem(dmbuf);
1701                         }
1702                         return (0);
1703                 }
1704                 control->length = the_len;
1705         }
1706
1707         /* Mark it as received */
1708         /* Now queue it where it belongs */
1709         if (control != NULL) {
1710                 /* First a sanity check */
1711                 if (asoc->fragmented_delivery_inprogress) {
1712                         /*
1713                          * Ok, we have a fragmented delivery in progress if
1714                          * this chunk is next to deliver OR belongs in our
1715                          * view to the reassembly, the peer is evil or
1716                          * broken.
1717                          */
1718                         uint32_t estimate_tsn;
1719
1720                         estimate_tsn = asoc->tsn_last_delivered + 1;
1721                         if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1722                             (estimate_tsn == control->sinfo_tsn)) {
1723                                 /* Evil/Broke peer */
1724                                 sctp_m_freem(control->data);
1725                                 control->data = NULL;
1726                                 if (control->whoFrom) {
1727                                         sctp_free_remote_addr(control->whoFrom);
1728                                         control->whoFrom = NULL;
1729                                 }
1730                                 sctp_free_a_readq(stcb, control);
1731                                 snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1732                                     tsn, strmno, strmseq);
1733                                 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1734                                 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1735                                 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1736                                 *abort_flag = 1;
1737                                 if (last_chunk) {
1738                                         *m = NULL;
1739                                 }
1740                                 return (0);
1741                         } else {
1742                                 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1743                                         sctp_m_freem(control->data);
1744                                         control->data = NULL;
1745                                         if (control->whoFrom) {
1746                                                 sctp_free_remote_addr(control->whoFrom);
1747                                                 control->whoFrom = NULL;
1748                                         }
1749                                         sctp_free_a_readq(stcb, control);
1750                                         snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1751                                             tsn, strmno, strmseq);
1752                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1753                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
1754                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1755                                         *abort_flag = 1;
1756                                         if (last_chunk) {
1757                                                 *m = NULL;
1758                                         }
1759                                         return (0);
1760                                 }
1761                         }
1762                 } else {
1763                         /* No PDAPI running */
1764                         if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1765                                 /*
1766                                  * Reassembly queue is NOT empty validate
1767                                  * that this tsn does not need to be in
1768                                  * reasembly queue. If it does then our peer
1769                                  * is broken or evil.
1770                                  */
1771                                 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1772                                         sctp_m_freem(control->data);
1773                                         control->data = NULL;
1774                                         if (control->whoFrom) {
1775                                                 sctp_free_remote_addr(control->whoFrom);
1776                                                 control->whoFrom = NULL;
1777                                         }
1778                                         sctp_free_a_readq(stcb, control);
1779                                         snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1780                                             tsn, strmno, strmseq);
1781                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1782                                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
1783                                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1784                                         *abort_flag = 1;
1785                                         if (last_chunk) {
1786                                                 *m = NULL;
1787                                         }
1788                                         return (0);
1789                                 }
1790                         }
1791                 }
1792                 /* ok, if we reach here we have passed the sanity checks */
1793                 if (chunk_flags & SCTP_DATA_UNORDERED) {
1794                         /* queue directly into socket buffer */
1795                         sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1796                         sctp_add_to_readq(stcb->sctp_ep, stcb,
1797                             control,
1798                             &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1799                 } else {
1800                         /*
1801                          * Special check for when streams are resetting. We
1802                          * could be more smart about this and check the
1803                          * actual stream to see if it is not being reset..
1804                          * that way we would not create a HOLB when amongst
1805                          * streams being reset and those not being reset.
1806                          * 
1807                          * We take complete messages that have a stream reset
1808                          * intervening (aka the TSN is after where our
1809                          * cum-ack needs to be) off and put them on a
1810                          * pending_reply_queue. The reassembly ones we do
1811                          * not have to worry about since they are all sorted
1812                          * and proceessed by TSN order. It is only the
1813                          * singletons I must worry about.
1814                          */
1815                         if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1816                             SCTP_TSN_GT(tsn, liste->tsn)) {
1817                                 /*
1818                                  * yep its past where we need to reset... go
1819                                  * ahead and queue it.
1820                                  */
1821                                 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1822                                         /* first one on */
1823                                         TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1824                                 } else {
1825                                         struct sctp_queued_to_read *ctlOn,
1826                                                            *nctlOn;
1827                                         unsigned char inserted = 0;
1828
1829                                         TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1830                                                 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1831                                                         continue;
1832                                                 } else {
1833                                                         /* found it */
1834                                                         TAILQ_INSERT_BEFORE(ctlOn, control, next);
1835                                                         inserted = 1;
1836                                                         break;
1837                                                 }
1838                                         }
1839                                         if (inserted == 0) {
1840                                                 /*
1841                                                  * must be put at end, use
1842                                                  * prevP (all setup from
1843                                                  * loop) to setup nextP.
1844                                                  */
1845                                                 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1846                                         }
1847                                 }
1848                         } else {
1849                                 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1850                                 if (*abort_flag) {
1851                                         if (last_chunk) {
1852                                                 *m = NULL;
1853                                         }
1854                                         return (0);
1855                                 }
1856                         }
1857                 }
1858         } else {
1859                 /* Into the re-assembly queue */
1860                 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1861                 if (*abort_flag) {
1862                         /*
1863                          * the assoc is now gone and chk was put onto the
1864                          * reasm queue, which has all been freed.
1865                          */
1866                         if (last_chunk) {
1867                                 *m = NULL;
1868                         }
1869                         return (0);
1870                 }
1871         }
1872 finish_express_del:
1873         if (tsn == (asoc->cumulative_tsn + 1)) {
1874                 /* Update cum-ack */
1875                 asoc->cumulative_tsn = tsn;
1876         }
1877         if (last_chunk) {
1878                 *m = NULL;
1879         }
1880         if (ordered) {
1881                 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1882         } else {
1883                 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1884         }
1885         SCTP_STAT_INCR(sctps_recvdata);
1886         /* Set it present please */
1887         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1888                 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1889         }
1890         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1891                 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1892                     asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1893         }
1894         /* check the special flag for stream resets */
1895         if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1896             SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1897                 /*
1898                  * we have finished working through the backlogged TSN's now
1899                  * time to reset streams. 1: call reset function. 2: free
1900                  * pending_reply space 3: distribute any chunks in
1901                  * pending_reply_queue.
1902                  */
1903                 struct sctp_queued_to_read *ctl, *nctl;
1904
1905                 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1906                 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1907                 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
1908                 SCTP_FREE(liste, SCTP_M_STRESET);
1909                 /* sa_ignore FREED_MEMORY */
1910                 liste = TAILQ_FIRST(&asoc->resetHead);
1911                 if (TAILQ_EMPTY(&asoc->resetHead)) {
1912                         /* All can be removed */
1913                         TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1914                                 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1915                                 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1916                                 if (*abort_flag) {
1917                                         return (0);
1918                                 }
1919                         }
1920                 } else {
1921                         TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1922                                 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1923                                         break;
1924                                 }
1925                                 /*
1926                                  * if ctl->sinfo_tsn is <= liste->tsn we can
1927                                  * process it which is the NOT of
1928                                  * ctl->sinfo_tsn > liste->tsn
1929                                  */
1930                                 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1931                                 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1932                                 if (*abort_flag) {
1933                                         return (0);
1934                                 }
1935                         }
1936                 }
1937                 /*
1938                  * Now service re-assembly to pick up anything that has been
1939                  * held on reassembly queue?
1940                  */
1941                 sctp_deliver_reasm_check(stcb, asoc);
1942                 need_reasm_check = 0;
1943         }
1944         if (need_reasm_check) {
1945                 /* Another one waits ? */
1946                 sctp_deliver_reasm_check(stcb, asoc);
1947         }
1948         return (1);
1949 }
1950
1951 int8_t sctp_map_lookup_tab[256] = {
1952         0, 1, 0, 2, 0, 1, 0, 3,
1953         0, 1, 0, 2, 0, 1, 0, 4,
1954         0, 1, 0, 2, 0, 1, 0, 3,
1955         0, 1, 0, 2, 0, 1, 0, 5,
1956         0, 1, 0, 2, 0, 1, 0, 3,
1957         0, 1, 0, 2, 0, 1, 0, 4,
1958         0, 1, 0, 2, 0, 1, 0, 3,
1959         0, 1, 0, 2, 0, 1, 0, 6,
1960         0, 1, 0, 2, 0, 1, 0, 3,
1961         0, 1, 0, 2, 0, 1, 0, 4,
1962         0, 1, 0, 2, 0, 1, 0, 3,
1963         0, 1, 0, 2, 0, 1, 0, 5,
1964         0, 1, 0, 2, 0, 1, 0, 3,
1965         0, 1, 0, 2, 0, 1, 0, 4,
1966         0, 1, 0, 2, 0, 1, 0, 3,
1967         0, 1, 0, 2, 0, 1, 0, 7,
1968         0, 1, 0, 2, 0, 1, 0, 3,
1969         0, 1, 0, 2, 0, 1, 0, 4,
1970         0, 1, 0, 2, 0, 1, 0, 3,
1971         0, 1, 0, 2, 0, 1, 0, 5,
1972         0, 1, 0, 2, 0, 1, 0, 3,
1973         0, 1, 0, 2, 0, 1, 0, 4,
1974         0, 1, 0, 2, 0, 1, 0, 3,
1975         0, 1, 0, 2, 0, 1, 0, 6,
1976         0, 1, 0, 2, 0, 1, 0, 3,
1977         0, 1, 0, 2, 0, 1, 0, 4,
1978         0, 1, 0, 2, 0, 1, 0, 3,
1979         0, 1, 0, 2, 0, 1, 0, 5,
1980         0, 1, 0, 2, 0, 1, 0, 3,
1981         0, 1, 0, 2, 0, 1, 0, 4,
1982         0, 1, 0, 2, 0, 1, 0, 3,
1983         0, 1, 0, 2, 0, 1, 0, 8
1984 };
1985
1986
1987 void
1988 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1989 {
1990         /*
1991          * Now we also need to check the mapping array in a couple of ways.
1992          * 1) Did we move the cum-ack point?
1993          * 
1994          * When you first glance at this you might think that all entries that
1995          * make up the postion of the cum-ack would be in the nr-mapping
1996          * array only.. i.e. things up to the cum-ack are always
1997          * deliverable. Thats true with one exception, when its a fragmented
1998          * message we may not deliver the data until some threshold (or all
1999          * of it) is in place. So we must OR the nr_mapping_array and
2000          * mapping_array to get a true picture of the cum-ack.
2001          */
2002         struct sctp_association *asoc;
2003         int at;
2004         uint8_t val;
2005         int slide_from, slide_end, lgap, distance;
2006         uint32_t old_cumack, old_base, old_highest, highest_tsn;
2007
2008         asoc = &stcb->asoc;
2009
2010         old_cumack = asoc->cumulative_tsn;
2011         old_base = asoc->mapping_array_base_tsn;
2012         old_highest = asoc->highest_tsn_inside_map;
2013         /*
2014          * We could probably improve this a small bit by calculating the
2015          * offset of the current cum-ack as the starting point.
2016          */
2017         at = 0;
2018         for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2019                 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2020                 if (val == 0xff) {
2021                         at += 8;
2022                 } else {
2023                         /* there is a 0 bit */
2024                         at += sctp_map_lookup_tab[val];
2025                         break;
2026                 }
2027         }
2028         asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2029
2030         if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2031             SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2032 #ifdef INVARIANTS
2033                 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2034                     asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2035 #else
2036                 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2037                     asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2038                 sctp_print_mapping_array(asoc);
2039                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2040                         sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2041                 }
2042                 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2043                 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2044 #endif
2045         }
2046         if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2047                 highest_tsn = asoc->highest_tsn_inside_nr_map;
2048         } else {
2049                 highest_tsn = asoc->highest_tsn_inside_map;
2050         }
2051         if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2052                 /* The complete array was completed by a single FR */
2053                 /* highest becomes the cum-ack */
2054                 int clr;
2055
2056 #ifdef INVARIANTS
2057                 unsigned int i;
2058
2059 #endif
2060
2061                 /* clear the array */
2062                 clr = ((at + 7) >> 3);
2063                 if (clr > asoc->mapping_array_size) {
2064                         clr = asoc->mapping_array_size;
2065                 }
2066                 memset(asoc->mapping_array, 0, clr);
2067                 memset(asoc->nr_mapping_array, 0, clr);
2068 #ifdef INVARIANTS
2069                 for (i = 0; i < asoc->mapping_array_size; i++) {
2070                         if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2071                                 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2072                                 sctp_print_mapping_array(asoc);
2073                         }
2074                 }
2075 #endif
2076                 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2077                 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2078         } else if (at >= 8) {
2079                 /* we can slide the mapping array down */
2080                 /* slide_from holds where we hit the first NON 0xff byte */
2081
2082                 /*
2083                  * now calculate the ceiling of the move using our highest
2084                  * TSN value
2085                  */
2086                 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2087                 slide_end = (lgap >> 3);
2088                 if (slide_end < slide_from) {
2089                         sctp_print_mapping_array(asoc);
2090 #ifdef INVARIANTS
2091                         panic("impossible slide");
2092 #else
2093                         SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2094                             lgap, slide_end, slide_from, at);
2095                         return;
2096 #endif
2097                 }
2098                 if (slide_end > asoc->mapping_array_size) {
2099 #ifdef INVARIANTS
2100                         panic("would overrun buffer");
2101 #else
2102                         SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2103                             asoc->mapping_array_size, slide_end);
2104                         slide_end = asoc->mapping_array_size;
2105 #endif
2106                 }
2107                 distance = (slide_end - slide_from) + 1;
2108                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2109                         sctp_log_map(old_base, old_cumack, old_highest,
2110                             SCTP_MAP_PREPARE_SLIDE);
2111                         sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2112                             (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2113                 }
2114                 if (distance + slide_from > asoc->mapping_array_size ||
2115                     distance < 0) {
2116                         /*
2117                          * Here we do NOT slide forward the array so that
2118                          * hopefully when more data comes in to fill it up
2119                          * we will be able to slide it forward. Really I
2120                          * don't think this should happen :-0
2121                          */
2122
2123                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2124                                 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2125                                     (uint32_t) asoc->mapping_array_size,
2126                                     SCTP_MAP_SLIDE_NONE);
2127                         }
2128                 } else {
2129                         int ii;
2130
2131                         for (ii = 0; ii < distance; ii++) {
2132                                 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2133                                 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2134
2135                         }
2136                         for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2137                                 asoc->mapping_array[ii] = 0;
2138                                 asoc->nr_mapping_array[ii] = 0;
2139                         }
2140                         if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2141                                 asoc->highest_tsn_inside_map += (slide_from << 3);
2142                         }
2143                         if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2144                                 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2145                         }
2146                         asoc->mapping_array_base_tsn += (slide_from << 3);
2147                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2148                                 sctp_log_map(asoc->mapping_array_base_tsn,
2149                                     asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2150                                     SCTP_MAP_SLIDE_RESULT);
2151                         }
2152                 }
2153         }
2154 }
2155
2156 void
2157 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2158 {
2159         struct sctp_association *asoc;
2160         uint32_t highest_tsn;
2161
2162         asoc = &stcb->asoc;
2163         if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2164                 highest_tsn = asoc->highest_tsn_inside_nr_map;
2165         } else {
2166                 highest_tsn = asoc->highest_tsn_inside_map;
2167         }
2168
2169         /*
2170          * Now we need to see if we need to queue a sack or just start the
2171          * timer (if allowed).
2172          */
2173         if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2174                 /*
2175                  * Ok special case, in SHUTDOWN-SENT case. here we maker
2176                  * sure SACK timer is off and instead send a SHUTDOWN and a
2177                  * SACK
2178                  */
2179                 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2180                         sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2181                             stcb->sctp_ep, stcb, NULL,
2182                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2183                 }
2184                 sctp_send_shutdown(stcb,
2185                     ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2186                 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2187         } else {
2188                 int is_a_gap;
2189
2190                 /* is there a gap now ? */
2191                 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2192
2193                 /*
2194                  * CMT DAC algorithm: increase number of packets received
2195                  * since last ack
2196                  */
2197                 stcb->asoc.cmt_dac_pkts_rcvd++;
2198
2199                 if ((stcb->asoc.send_sack == 1) ||      /* We need to send a
2200                                                          * SACK */
2201                     ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2202                                                          * longer is one */
2203                     (stcb->asoc.numduptsns) ||  /* we have dup's */
2204                     (is_a_gap) ||       /* is still a gap */
2205                     (stcb->asoc.delayed_ack == 0) ||    /* Delayed sack disabled */
2206                     (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2207                     ) {
2208
2209                         if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2210                             (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2211                             (stcb->asoc.send_sack == 0) &&
2212                             (stcb->asoc.numduptsns == 0) &&
2213                             (stcb->asoc.delayed_ack) &&
2214                             (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2215
2216                                 /*
2217                                  * CMT DAC algorithm: With CMT, delay acks
2218                                  * even in the face of
2219                                  * 
2220                                  * reordering. Therefore, if acks that do not
2221                                  * have to be sent because of the above
2222                                  * reasons, will be delayed. That is, acks
2223                                  * that would have been sent due to gap
2224                                  * reports will be delayed with DAC. Start
2225                                  * the delayed ack timer.
2226                                  */
2227                                 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2228                                     stcb->sctp_ep, stcb, NULL);
2229                         } else {
2230                                 /*
2231                                  * Ok we must build a SACK since the timer
2232                                  * is pending, we got our first packet OR
2233                                  * there are gaps or duplicates.
2234                                  */
2235                                 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2236                                 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2237                         }
2238                 } else {
2239                         if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2240                                 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2241                                     stcb->sctp_ep, stcb, NULL);
2242                         }
2243                 }
2244         }
2245 }
2246
2247 void
2248 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2249 {
2250         struct sctp_tmit_chunk *chk;
2251         uint32_t tsize, pd_point;
2252         uint16_t nxt_todel;
2253
2254         if (asoc->fragmented_delivery_inprogress) {
2255                 sctp_service_reassembly(stcb, asoc);
2256         }
2257         /* Can we proceed further, i.e. the PD-API is complete */
2258         if (asoc->fragmented_delivery_inprogress) {
2259                 /* no */
2260                 return;
2261         }
2262         /*
2263          * Now is there some other chunk I can deliver from the reassembly
2264          * queue.
2265          */
2266 doit_again:
2267         chk = TAILQ_FIRST(&asoc->reasmqueue);
2268         if (chk == NULL) {
2269                 asoc->size_on_reasm_queue = 0;
2270                 asoc->cnt_on_reasm_queue = 0;
2271                 return;
2272         }
2273         nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2274         if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2275             ((nxt_todel == chk->rec.data.stream_seq) ||
2276             (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2277                 /*
2278                  * Yep the first one is here. We setup to start reception,
2279                  * by backing down the TSN just in case we can't deliver.
2280                  */
2281
2282                 /*
2283                  * Before we start though either all of the message should
2284                  * be here or the socket buffer max or nothing on the
2285                  * delivery queue and something can be delivered.
2286                  */
2287                 if (stcb->sctp_socket) {
2288                         pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2289                             stcb->sctp_ep->partial_delivery_point);
2290                 } else {
2291                         pd_point = stcb->sctp_ep->partial_delivery_point;
2292                 }
2293                 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2294                         asoc->fragmented_delivery_inprogress = 1;
2295                         asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2296                         asoc->str_of_pdapi = chk->rec.data.stream_number;
2297                         asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2298                         asoc->pdapi_ppid = chk->rec.data.payloadtype;
2299                         asoc->fragment_flags = chk->rec.data.rcv_flags;
2300                         sctp_service_reassembly(stcb, asoc);
2301                         if (asoc->fragmented_delivery_inprogress == 0) {
2302                                 goto doit_again;
2303                         }
2304                 }
2305         }
2306 }
2307
2308 int
2309 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2310     struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2311     struct sctp_nets *net, uint32_t * high_tsn)
2312 {
2313         struct sctp_data_chunk *ch, chunk_buf;
2314         struct sctp_association *asoc;
2315         int num_chunks = 0;     /* number of control chunks processed */
2316         int stop_proc = 0;
2317         int chk_length, break_flag, last_chunk;
2318         int abort_flag = 0, was_a_gap;
2319         struct mbuf *m;
2320         uint32_t highest_tsn;
2321
2322         /* set the rwnd */
2323         sctp_set_rwnd(stcb, &stcb->asoc);
2324
2325         m = *mm;
2326         SCTP_TCB_LOCK_ASSERT(stcb);
2327         asoc = &stcb->asoc;
2328         if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2329                 highest_tsn = asoc->highest_tsn_inside_nr_map;
2330         } else {
2331                 highest_tsn = asoc->highest_tsn_inside_map;
2332         }
2333         was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2334         /*
2335          * setup where we got the last DATA packet from for any SACK that
2336          * may need to go out. Don't bump the net. This is done ONLY when a
2337          * chunk is assigned.
2338          */
2339         asoc->last_data_chunk_from = net;
2340
2341         /*-
2342          * Now before we proceed we must figure out if this is a wasted
2343          * cluster... i.e. it is a small packet sent in and yet the driver
2344          * underneath allocated a full cluster for it. If so we must copy it
2345          * to a smaller mbuf and free up the cluster mbuf. This will help
2346          * with cluster starvation. Note for __Panda__ we don't do this
2347          * since it has clusters all the way down to 64 bytes.
2348          */
2349         if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2350                 /* we only handle mbufs that are singletons.. not chains */
2351                 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2352                 if (m) {
2353                         /* ok lets see if we can copy the data up */
2354                         caddr_t *from, *to;
2355
2356                         /* get the pointers and copy */
2357                         to = mtod(m, caddr_t *);
2358                         from = mtod((*mm), caddr_t *);
2359                         memcpy(to, from, SCTP_BUF_LEN((*mm)));
2360                         /* copy the length and free up the old */
2361                         SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2362                         sctp_m_freem(*mm);
2363                         /* sucess, back copy */
2364                         *mm = m;
2365                 } else {
2366                         /* We are in trouble in the mbuf world .. yikes */
2367                         m = *mm;
2368                 }
2369         }
2370         /* get pointer to the first chunk header */
2371         ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2372             sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2373         if (ch == NULL) {
2374                 return (1);
2375         }
2376         /*
2377          * process all DATA chunks...
2378          */
2379         *high_tsn = asoc->cumulative_tsn;
2380         break_flag = 0;
2381         asoc->data_pkts_seen++;
2382         while (stop_proc == 0) {
2383                 /* validate chunk length */
2384                 chk_length = ntohs(ch->ch.chunk_length);
2385                 if (length - *offset < chk_length) {
2386                         /* all done, mutulated chunk */
2387                         stop_proc = 1;
2388                         continue;
2389                 }
2390                 if (ch->ch.chunk_type == SCTP_DATA) {
2391                         if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2392                                 /*
2393                                  * Need to send an abort since we had a
2394                                  * invalid data chunk.
2395                                  */
2396                                 struct mbuf *op_err;
2397                                 char msg[SCTP_DIAG_INFO_LEN];
2398
2399                                 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2400                                     chk_length);
2401                                 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2402                                 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2403                                 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2404                                 return (2);
2405                         }
2406                         if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2407                                 /*
2408                                  * Need to send an abort since we had an
2409                                  * empty data chunk.
2410                                  */
2411                                 struct mbuf *op_err;
2412
2413                                 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2414                                 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2415                                 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2416                                 return (2);
2417                         }
2418 #ifdef SCTP_AUDITING_ENABLED
2419                         sctp_audit_log(0xB1, 0);
2420 #endif
2421                         if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2422                                 last_chunk = 1;
2423                         } else {
2424                                 last_chunk = 0;
2425                         }
2426                         if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2427                             chk_length, net, high_tsn, &abort_flag, &break_flag,
2428                             last_chunk)) {
2429                                 num_chunks++;
2430                         }
2431                         if (abort_flag)
2432                                 return (2);
2433
2434                         if (break_flag) {
2435                                 /*
2436                                  * Set because of out of rwnd space and no
2437                                  * drop rep space left.
2438                                  */
2439                                 stop_proc = 1;
2440                                 continue;
2441                         }
2442                 } else {
2443                         /* not a data chunk in the data region */
2444                         switch (ch->ch.chunk_type) {
2445                         case SCTP_INITIATION:
2446                         case SCTP_INITIATION_ACK:
2447                         case SCTP_SELECTIVE_ACK:
2448                         case SCTP_NR_SELECTIVE_ACK:
2449                         case SCTP_HEARTBEAT_REQUEST:
2450                         case SCTP_HEARTBEAT_ACK:
2451                         case SCTP_ABORT_ASSOCIATION:
2452                         case SCTP_SHUTDOWN:
2453                         case SCTP_SHUTDOWN_ACK:
2454                         case SCTP_OPERATION_ERROR:
2455                         case SCTP_COOKIE_ECHO:
2456                         case SCTP_COOKIE_ACK:
2457                         case SCTP_ECN_ECHO:
2458                         case SCTP_ECN_CWR:
2459                         case SCTP_SHUTDOWN_COMPLETE:
2460                         case SCTP_AUTHENTICATION:
2461                         case SCTP_ASCONF_ACK:
2462                         case SCTP_PACKET_DROPPED:
2463                         case SCTP_STREAM_RESET:
2464                         case SCTP_FORWARD_CUM_TSN:
2465                         case SCTP_ASCONF:
2466                                 /*
2467                                  * Now, what do we do with KNOWN chunks that
2468                                  * are NOT in the right place?
2469                                  * 
2470                                  * For now, I do nothing but ignore them. We
2471                                  * may later want to add sysctl stuff to
2472                                  * switch out and do either an ABORT() or
2473                                  * possibly process them.
2474                                  */
2475                                 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2476                                         struct mbuf *op_err;
2477                                         char msg[SCTP_DIAG_INFO_LEN];
2478
2479                                         snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2480                                             ch->ch.chunk_type);
2481                                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2482                                         sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2483                                         return (2);
2484                                 }
2485                                 break;
2486                         default:
2487                                 /* unknown chunk type, use bit rules */
2488                                 if (ch->ch.chunk_type & 0x40) {
2489                                         /* Add a error report to the queue */
2490                                         struct mbuf *op_err;
2491                                         struct sctp_gen_error_cause *cause;
2492
2493                                         op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2494                                             0, M_NOWAIT, 1, MT_DATA);
2495                                         if (op_err != NULL) {
2496                                                 cause = mtod(op_err, struct sctp_gen_error_cause *);
2497                                                 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2498                                                 cause->length = htons(chk_length + sizeof(struct sctp_gen_error_cause));
2499                                                 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2500                                                 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2501                                                 if (SCTP_BUF_NEXT(op_err) != NULL) {
2502                                                         sctp_queue_op_err(stcb, op_err);
2503                                                 } else {
2504                                                         sctp_m_freem(op_err);
2505                                                 }
2506                                         }
2507                                 }
2508                                 if ((ch->ch.chunk_type & 0x80) == 0) {
2509                                         /* discard the rest of this packet */
2510                                         stop_proc = 1;
2511                                 }       /* else skip this bad chunk and
2512                                          * continue... */
2513                                 break;
2514                         }       /* switch of chunk type */
2515                 }
2516                 *offset += SCTP_SIZE32(chk_length);
2517                 if ((*offset >= length) || stop_proc) {
2518                         /* no more data left in the mbuf chain */
2519                         stop_proc = 1;
2520                         continue;
2521                 }
2522                 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2523                     sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2524                 if (ch == NULL) {
2525                         *offset = length;
2526                         stop_proc = 1;
2527                         continue;
2528                 }
2529         }
2530         if (break_flag) {
2531                 /*
2532                  * we need to report rwnd overrun drops.
2533                  */
2534                 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2535         }
2536         if (num_chunks) {
2537                 /*
2538                  * Did we get data, if so update the time for auto-close and
2539                  * give peer credit for being alive.
2540                  */
2541                 SCTP_STAT_INCR(sctps_recvpktwithdata);
2542                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2543                         sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2544                             stcb->asoc.overall_error_count,
2545                             0,
2546                             SCTP_FROM_SCTP_INDATA,
2547                             __LINE__);
2548                 }
2549                 stcb->asoc.overall_error_count = 0;
2550                 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2551         }
2552         /* now service all of the reassm queue if needed */
2553         if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2554                 sctp_service_queues(stcb, asoc);
2555
2556         if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2557                 /* Assure that we ack right away */
2558                 stcb->asoc.send_sack = 1;
2559         }
2560         /* Start a sack timer or QUEUE a SACK for sending */
2561         sctp_sack_check(stcb, was_a_gap);
2562         return (0);
2563 }
2564
2565 static int
2566 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2567     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2568     int *num_frs,
2569     uint32_t * biggest_newly_acked_tsn,
2570     uint32_t * this_sack_lowest_newack,
2571     int *rto_ok)
2572 {
2573         struct sctp_tmit_chunk *tp1;
2574         unsigned int theTSN;
2575         int j, wake_him = 0, circled = 0;
2576
2577         /* Recover the tp1 we last saw */
2578         tp1 = *p_tp1;
2579         if (tp1 == NULL) {
2580                 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2581         }
2582         for (j = frag_strt; j <= frag_end; j++) {
2583                 theTSN = j + last_tsn;
2584                 while (tp1) {
2585                         if (tp1->rec.data.doing_fast_retransmit)
2586                                 (*num_frs) += 1;
2587
2588                         /*-
2589                          * CMT: CUCv2 algorithm. For each TSN being
2590                          * processed from the sent queue, track the
2591                          * next expected pseudo-cumack, or
2592                          * rtx_pseudo_cumack, if required. Separate
2593                          * cumack trackers for first transmissions,
2594                          * and retransmissions.
2595                          */
2596                         if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2597                             (tp1->whoTo->find_pseudo_cumack == 1) &&
2598                             (tp1->snd_count == 1)) {
2599                                 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2600                                 tp1->whoTo->find_pseudo_cumack = 0;
2601                         }
2602                         if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2603                             (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2604                             (tp1->snd_count > 1)) {
2605                                 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2606                                 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2607                         }
2608                         if (tp1->rec.data.TSN_seq == theTSN) {
2609                                 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2610                                         /*-
2611                                          * must be held until
2612                                          * cum-ack passes
2613                                          */
2614                                         if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2615                                                 /*-
2616                                                  * If it is less than RESEND, it is
2617                                                  * now no-longer in flight.
2618                                                  * Higher values may already be set
2619                                                  * via previous Gap Ack Blocks...
2620                                                  * i.e. ACKED or RESEND.
2621                                                  */
2622                                                 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2623                                                     *biggest_newly_acked_tsn)) {
2624                                                         *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2625                                                 }
2626                                                 /*-
2627                                                  * CMT: SFR algo (and HTNA) - set
2628                                                  * saw_newack to 1 for dest being
2629                                                  * newly acked. update
2630                                                  * this_sack_highest_newack if
2631                                                  * appropriate.
2632                                                  */
2633                                                 if (tp1->rec.data.chunk_was_revoked == 0)
2634                                                         tp1->whoTo->saw_newack = 1;
2635
2636                                                 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2637                                                     tp1->whoTo->this_sack_highest_newack)) {
2638                                                         tp1->whoTo->this_sack_highest_newack =
2639                                                             tp1->rec.data.TSN_seq;
2640                                                 }
2641                                                 /*-
2642                                                  * CMT DAC algo: also update
2643                                                  * this_sack_lowest_newack
2644                                                  */
2645                                                 if (*this_sack_lowest_newack == 0) {
2646                                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2647                                                                 sctp_log_sack(*this_sack_lowest_newack,
2648                                                                     last_tsn,
2649                                                                     tp1->rec.data.TSN_seq,
2650                                                                     0,
2651                                                                     0,
2652                                                                     SCTP_LOG_TSN_ACKED);
2653                                                         }
2654                                                         *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2655                                                 }
2656                                                 /*-
2657                                                  * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2658                                                  * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2659                                                  * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2660                                                  * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2661                                                  * Separate pseudo_cumack trackers for first transmissions and
2662                                                  * retransmissions.
2663                                                  */
2664                                                 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2665                                                         if (tp1->rec.data.chunk_was_revoked == 0) {
2666                                                                 tp1->whoTo->new_pseudo_cumack = 1;
2667                                                         }
2668                                                         tp1->whoTo->find_pseudo_cumack = 1;
2669                                                 }
2670                                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2671                                                         sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2672                                                 }
2673                                                 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2674                                                         if (tp1->rec.data.chunk_was_revoked == 0) {
2675                                                                 tp1->whoTo->new_pseudo_cumack = 1;
2676                                                         }
2677                                                         tp1->whoTo->find_rtx_pseudo_cumack = 1;
2678                                                 }
2679                                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2680                                                         sctp_log_sack(*biggest_newly_acked_tsn,
2681                                                             last_tsn,
2682                                                             tp1->rec.data.TSN_seq,
2683                                                             frag_strt,
2684                                                             frag_end,
2685                                                             SCTP_LOG_TSN_ACKED);
2686                                                 }
2687                                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2688                                                         sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2689                                                             tp1->whoTo->flight_size,
2690                                                             tp1->book_size,
2691                                                             (uintptr_t) tp1->whoTo,
2692                                                             tp1->rec.data.TSN_seq);
2693                                                 }
2694                                                 sctp_flight_size_decrease(tp1);
2695                                                 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2696                                                         (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2697                                                             tp1);
2698                                                 }
2699                                                 sctp_total_flight_decrease(stcb, tp1);
2700
2701                                                 tp1->whoTo->net_ack += tp1->send_size;
2702                                                 if (tp1->snd_count < 2) {
2703                                                         /*-
2704                                                          * True non-retransmited chunk
2705                                                          */
2706                                                         tp1->whoTo->net_ack2 += tp1->send_size;
2707
2708                                                         /*-
2709                                                          * update RTO too ?
2710                                                          */
2711                                                         if (tp1->do_rtt) {
2712                                                                 if (*rto_ok) {
2713                                                                         tp1->whoTo->RTO =
2714                                                                             sctp_calculate_rto(stcb,
2715                                                                             &stcb->asoc,
2716                                                                             tp1->whoTo,
2717                                                                             &tp1->sent_rcv_time,
2718                                                                             sctp_align_safe_nocopy,
2719                                                                             SCTP_RTT_FROM_DATA);
2720                                                                         *rto_ok = 0;
2721                                                                 }
2722                                                                 if (tp1->whoTo->rto_needed == 0) {
2723                                                                         tp1->whoTo->rto_needed = 1;
2724                                                                 }
2725                                                                 tp1->do_rtt = 0;
2726                                                         }
2727                                                 }
2728                                         }
2729                                         if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2730                                                 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2731                                                     stcb->asoc.this_sack_highest_gap)) {
2732                                                         stcb->asoc.this_sack_highest_gap =
2733                                                             tp1->rec.data.TSN_seq;
2734                                                 }
2735                                                 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2736                                                         sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2737 #ifdef SCTP_AUDITING_ENABLED
2738                                                         sctp_audit_log(0xB2,
2739                                                             (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2740 #endif
2741                                                 }
2742                                         }
2743                                         /*-
2744                                          * All chunks NOT UNSENT fall through here and are marked
2745                                          * (leave PR-SCTP ones that are to skip alone though)
2746                                          */
2747                                         if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2748                                             (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2749                                                 tp1->sent = SCTP_DATAGRAM_MARKED;
2750                                         }
2751                                         if (tp1->rec.data.chunk_was_revoked) {
2752                                                 /* deflate the cwnd */
2753                                                 tp1->whoTo->cwnd -= tp1->book_size;
2754                                                 tp1->rec.data.chunk_was_revoked = 0;
2755                                         }
2756                                         /* NR Sack code here */
2757                                         if (nr_sacking &&
2758                                             (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2759                                                 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2760                                                         stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2761 #ifdef INVARIANTS
2762                                                 } else {
2763                                                         panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2764 #endif
2765                                                 }
2766                                                 if ((stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
2767                                                     (stcb->asoc.strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
2768                                                     TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.stream_number].outqueue)) {
2769                                                         stcb->asoc.trigger_reset = 1;
2770                                                 }
2771                                                 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2772                                                 if (tp1->data) {
2773                                                         /*
2774                                                          * sa_ignore
2775                                                          * NO_NULL_CHK
2776                                                          */
2777                                                         sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2778                                                         sctp_m_freem(tp1->data);
2779                                                         tp1->data = NULL;
2780                                                 }
2781                                                 wake_him++;
2782                                         }
2783                                 }
2784                                 break;
2785                         }       /* if (tp1->TSN_seq == theTSN) */
2786                         if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2787                                 break;
2788                         }
2789                         tp1 = TAILQ_NEXT(tp1, sctp_next);
2790                         if ((tp1 == NULL) && (circled == 0)) {
2791                                 circled++;
2792                                 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2793                         }
2794                 }               /* end while (tp1) */
2795                 if (tp1 == NULL) {
2796                         circled = 0;
2797                         tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2798                 }
2799                 /* In case the fragments were not in order we must reset */
2800         }                       /* end for (j = fragStart */
2801         *p_tp1 = tp1;
2802         return (wake_him);      /* Return value only used for nr-sack */
2803 }
2804
2805
2806 static int
2807 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2808     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2809     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2810     int num_seg, int num_nr_seg, int *rto_ok)
2811 {
2812         struct sctp_gap_ack_block *frag, block;
2813         struct sctp_tmit_chunk *tp1;
2814         int i;
2815         int num_frs = 0;
2816         int chunk_freed;
2817         int non_revocable;
2818         uint16_t frag_strt, frag_end, prev_frag_end;
2819
2820         tp1 = TAILQ_FIRST(&asoc->sent_queue);
2821         prev_frag_end = 0;
2822         chunk_freed = 0;
2823
2824         for (i = 0; i < (num_seg + num_nr_seg); i++) {
2825                 if (i == num_seg) {
2826                         prev_frag_end = 0;
2827                         tp1 = TAILQ_FIRST(&asoc->sent_queue);
2828                 }
2829                 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2830                     sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2831                 *offset += sizeof(block);
2832                 if (frag == NULL) {
2833                         return (chunk_freed);
2834                 }
2835                 frag_strt = ntohs(frag->start);
2836                 frag_end = ntohs(frag->end);
2837
2838                 if (frag_strt > frag_end) {
2839                         /* This gap report is malformed, skip it. */
2840                         continue;
2841                 }
2842                 if (frag_strt <= prev_frag_end) {
2843                         /* This gap report is not in order, so restart. */
2844                         tp1 = TAILQ_FIRST(&asoc->sent_queue);
2845                 }
2846                 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2847                         *biggest_tsn_acked = last_tsn + frag_end;
2848                 }
2849                 if (i < num_seg) {
2850                         non_revocable = 0;
2851                 } else {
2852                         non_revocable = 1;
2853                 }
2854                 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2855                     non_revocable, &num_frs, biggest_newly_acked_tsn,
2856                     this_sack_lowest_newack, rto_ok)) {
2857                         chunk_freed = 1;
2858                 }
2859                 prev_frag_end = frag_end;
2860         }
2861         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2862                 if (num_frs)
2863                         sctp_log_fr(*biggest_tsn_acked,
2864                             *biggest_newly_acked_tsn,
2865                             last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2866         }
2867         return (chunk_freed);
2868 }
2869
2870 static void
2871 sctp_check_for_revoked(struct sctp_tcb *stcb,
2872     struct sctp_association *asoc, uint32_t cumack,
2873     uint32_t biggest_tsn_acked)
2874 {
2875         struct sctp_tmit_chunk *tp1;
2876
2877         TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2878                 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2879                         /*
2880                          * ok this guy is either ACK or MARKED. If it is
2881                          * ACKED it has been previously acked but not this
2882                          * time i.e. revoked.  If it is MARKED it was ACK'ed
2883                          * again.
2884                          */
2885                         if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2886                                 break;
2887                         }
2888                         if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2889                                 /* it has been revoked */
2890                                 tp1->sent = SCTP_DATAGRAM_SENT;
2891                                 tp1->rec.data.chunk_was_revoked = 1;
2892                                 /*
2893                                  * We must add this stuff back in to assure
2894                                  * timers and such get started.
2895                                  */
2896                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2897                                         sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2898                                             tp1->whoTo->flight_size,
2899                                             tp1->book_size,
2900                                             (uintptr_t) tp1->whoTo,
2901                                             tp1->rec.data.TSN_seq);
2902                                 }
2903                                 sctp_flight_size_increase(tp1);
2904                                 sctp_total_flight_increase(stcb, tp1);
2905                                 /*
2906                                  * We inflate the cwnd to compensate for our
2907                                  * artificial inflation of the flight_size.
2908                                  */
2909                                 tp1->whoTo->cwnd += tp1->book_size;
2910                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2911                                         sctp_log_sack(asoc->last_acked_seq,
2912                                             cumack,
2913                                             tp1->rec.data.TSN_seq,
2914                                             0,
2915                                             0,
2916                                             SCTP_LOG_TSN_REVOKED);
2917                                 }
2918                         } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2919                                 /* it has been re-acked in this SACK */
2920                                 tp1->sent = SCTP_DATAGRAM_ACKED;
2921                         }
2922                 }
2923                 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2924                         break;
2925         }
2926 }
2927
2928
2929 static void
2930 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2931     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2932 {
2933         struct sctp_tmit_chunk *tp1;
2934         int strike_flag = 0;
2935         struct timeval now;
2936         int tot_retrans = 0;
2937         uint32_t sending_seq;
2938         struct sctp_nets *net;
2939         int num_dests_sacked = 0;
2940
2941         /*
2942          * select the sending_seq, this is either the next thing ready to be
2943          * sent but not transmitted, OR, the next seq we assign.
2944          */
2945         tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2946         if (tp1 == NULL) {
2947                 sending_seq = asoc->sending_seq;
2948         } else {
2949                 sending_seq = tp1->rec.data.TSN_seq;
2950         }
2951
2952         /* CMT DAC algo: finding out if SACK is a mixed SACK */
2953         if ((asoc->sctp_cmt_on_off > 0) &&
2954             SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2955                 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2956                         if (net->saw_newack)
2957                                 num_dests_sacked++;
2958                 }
2959         }
2960         if (stcb->asoc.prsctp_supported) {
2961                 (void)SCTP_GETTIME_TIMEVAL(&now);
2962         }
2963         TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2964                 strike_flag = 0;
2965                 if (tp1->no_fr_allowed) {
2966                         /* this one had a timeout or something */
2967                         continue;
2968                 }
2969                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2970                         if (tp1->sent < SCTP_DATAGRAM_RESEND)
2971                                 sctp_log_fr(biggest_tsn_newly_acked,
2972                                     tp1->rec.data.TSN_seq,
2973                                     tp1->sent,
2974                                     SCTP_FR_LOG_CHECK_STRIKE);
2975                 }
2976                 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2977                     tp1->sent == SCTP_DATAGRAM_UNSENT) {
2978                         /* done */
2979                         break;
2980                 }
2981                 if (stcb->asoc.prsctp_supported) {
2982                         if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2983                                 /* Is it expired? */
2984                                 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2985                                         /* Yes so drop it */
2986                                         if (tp1->data != NULL) {
2987                                                 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2988                                                     SCTP_SO_NOT_LOCKED);
2989                                         }
2990                                         continue;
2991                                 }
2992                         }
2993                 }
2994                 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2995                         /* we are beyond the tsn in the sack  */
2996                         break;
2997                 }
2998                 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2999                         /* either a RESEND, ACKED, or MARKED */
3000                         /* skip */
3001                         if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3002                                 /* Continue strikin FWD-TSN chunks */
3003                                 tp1->rec.data.fwd_tsn_cnt++;
3004                         }
3005                         continue;
3006                 }
3007                 /*
3008                  * CMT : SFR algo (covers part of DAC and HTNA as well)
3009                  */
3010                 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3011                         /*
3012                          * No new acks were receieved for data sent to this
3013                          * dest. Therefore, according to the SFR algo for
3014                          * CMT, no data sent to this dest can be marked for
3015                          * FR using this SACK.
3016                          */
3017                         continue;
3018                 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3019                     tp1->whoTo->this_sack_highest_newack)) {
3020                         /*
3021                          * CMT: New acks were receieved for data sent to
3022                          * this dest. But no new acks were seen for data
3023                          * sent after tp1. Therefore, according to the SFR
3024                          * algo for CMT, tp1 cannot be marked for FR using
3025                          * this SACK. This step covers part of the DAC algo
3026                          * and the HTNA algo as well.
3027                          */
3028                         continue;
3029                 }
3030                 /*
3031                  * Here we check to see if we were have already done a FR
3032                  * and if so we see if the biggest TSN we saw in the sack is
3033                  * smaller than the recovery point. If so we don't strike
3034                  * the tsn... otherwise we CAN strike the TSN.
3035                  */
3036                 /*
3037                  * @@@ JRI: Check for CMT if (accum_moved &&
3038                  * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3039                  * 0)) {
3040                  */
3041                 if (accum_moved && asoc->fast_retran_loss_recovery) {
3042                         /*
3043                          * Strike the TSN if in fast-recovery and cum-ack
3044                          * moved.
3045                          */
3046                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3047                                 sctp_log_fr(biggest_tsn_newly_acked,
3048                                     tp1->rec.data.TSN_seq,
3049                                     tp1->sent,
3050                                     SCTP_FR_LOG_STRIKE_CHUNK);
3051                         }
3052                         if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3053                                 tp1->sent++;
3054                         }
3055                         if ((asoc->sctp_cmt_on_off > 0) &&
3056                             SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3057                                 /*
3058                                  * CMT DAC algorithm: If SACK flag is set to
3059                                  * 0, then lowest_newack test will not pass
3060                                  * because it would have been set to the
3061                                  * cumack earlier. If not already to be
3062                                  * rtx'd, If not a mixed sack and if tp1 is
3063                                  * not between two sacked TSNs, then mark by
3064                                  * one more. NOTE that we are marking by one
3065                                  * additional time since the SACK DAC flag
3066                                  * indicates that two packets have been
3067                                  * received after this missing TSN.
3068                                  */
3069                                 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3070                                     SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3071                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3072                                                 sctp_log_fr(16 + num_dests_sacked,
3073                                                     tp1->rec.data.TSN_seq,
3074                                                     tp1->sent,
3075                                                     SCTP_FR_LOG_STRIKE_CHUNK);
3076                                         }
3077                                         tp1->sent++;
3078                                 }
3079                         }
3080                 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3081                     (asoc->sctp_cmt_on_off == 0)) {
3082                         /*
3083                          * For those that have done a FR we must take
3084                          * special consideration if we strike. I.e the
3085                          * biggest_newly_acked must be higher than the
3086                          * sending_seq at the time we did the FR.
3087                          */
3088                         if (
3089 #ifdef SCTP_FR_TO_ALTERNATE
3090                         /*
3091                          * If FR's go to new networks, then we must only do
3092                          * this for singly homed asoc's. However if the FR's
3093                          * go to the same network (Armando's work) then its
3094                          * ok to FR multiple times.
3095                          */
3096                             (asoc->numnets < 2)
3097 #else
3098                             (1)
3099 #endif
3100                             ) {
3101
3102                                 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3103                                     tp1->rec.data.fast_retran_tsn)) {
3104                                         /*
3105                                          * Strike the TSN, since this ack is
3106                                          * beyond where things were when we
3107                                          * did a FR.
3108                                          */
3109                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3110                                                 sctp_log_fr(biggest_tsn_newly_acked,
3111                                                     tp1->rec.data.TSN_seq,
3112                                                     tp1->sent,
3113                                                     SCTP_FR_LOG_STRIKE_CHUNK);
3114                                         }
3115                                         if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3116                                                 tp1->sent++;
3117                                         }
3118                                         strike_flag = 1;
3119                                         if ((asoc->sctp_cmt_on_off > 0) &&
3120                                             SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3121                                                 /*
3122                                                  * CMT DAC algorithm: If
3123                                                  * SACK flag is set to 0,
3124                                                  * then lowest_newack test
3125                                                  * will not pass because it
3126                                                  * would have been set to
3127                                                  * the cumack earlier. If
3128                                                  * not already to be rtx'd,
3129                                                  * If not a mixed sack and
3130                                                  * if tp1 is not between two
3131                                                  * sacked TSNs, then mark by
3132                                                  * one more. NOTE that we
3133                                                  * are marking by one
3134                                                  * additional time since the
3135                                                  * SACK DAC flag indicates
3136                                                  * that two packets have
3137                                                  * been received after this
3138                                                  * missing TSN.
3139                                                  */
3140                                                 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3141                                                     (num_dests_sacked == 1) &&
3142                                                     SCTP_TSN_GT(this_sack_lowest_newack,
3143                                                     tp1->rec.data.TSN_seq)) {
3144                                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3145                                                                 sctp_log_fr(32 + num_dests_sacked,
3146                                                                     tp1->rec.data.TSN_seq,
3147                                                                     tp1->sent,
3148                                                                     SCTP_FR_LOG_STRIKE_CHUNK);
3149                                                         }
3150                                                         if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3151                                                                 tp1->sent++;
3152                                                         }
3153                                                 }
3154                                         }
3155                                 }
3156                         }
3157                         /*
3158                          * JRI: TODO: remove code for HTNA algo. CMT's SFR
3159                          * algo covers HTNA.
3160                          */
3161                 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3162                     biggest_tsn_newly_acked)) {
3163                         /*
3164                          * We don't strike these: This is the  HTNA
3165                          * algorithm i.e. we don't strike If our TSN is
3166                          * larger than the Highest TSN Newly Acked.
3167                          */
3168                         ;
3169                 } else {
3170                         /* Strike the TSN */
3171                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3172                                 sctp_log_fr(biggest_tsn_newly_acked,
3173                                     tp1->rec.data.TSN_seq,
3174                                     tp1->sent,
3175                                     SCTP_FR_LOG_STRIKE_CHUNK);
3176                         }
3177                         if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3178                                 tp1->sent++;
3179                         }
3180                         if ((asoc->sctp_cmt_on_off > 0) &&
3181                             SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3182                                 /*
3183                                  * CMT DAC algorithm: If SACK flag is set to
3184                                  * 0, then lowest_newack test will not pass
3185                                  * because it would have been set to the
3186                                  * cumack earlier. If not already to be
3187                                  * rtx'd, If not a mixed sack and if tp1 is
3188                                  * not between two sacked TSNs, then mark by
3189                                  * one more. NOTE that we are marking by one
3190                                  * additional time since the SACK DAC flag
3191                                  * indicates that two packets have been
3192                                  * received after this missing TSN.
3193                                  */
3194                                 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3195                                     SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3196                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3197                                                 sctp_log_fr(48 + num_dests_sacked,
3198                                                     tp1->rec.data.TSN_seq,
3199                                                     tp1->sent,
3200                                                     SCTP_FR_LOG_STRIKE_CHUNK);
3201                                         }
3202                                         tp1->sent++;
3203                                 }
3204                         }
3205                 }
3206                 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3207                         struct sctp_nets *alt;
3208
3209                         /* fix counts and things */
3210                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3211                                 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3212                                     (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3213                                     tp1->book_size,
3214                                     (uintptr_t) tp1->whoTo,
3215                                     tp1->rec.data.TSN_seq);
3216                         }
3217                         if (tp1->whoTo) {
3218                                 tp1->whoTo->net_ack++;
3219                                 sctp_flight_size_decrease(tp1);
3220                                 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3221                                         (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3222                                             tp1);
3223                                 }
3224                         }
3225                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3226                                 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3227                                     asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3228                         }
3229                         /* add back to the rwnd */
3230                         asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3231
3232                         /* remove from the total flight */
3233                         sctp_total_flight_decrease(stcb, tp1);
3234
3235                         if ((stcb->asoc.prsctp_supported) &&
3236                             (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3237                                 /*
3238                                  * Has it been retransmitted tv_sec times? -
3239                                  * we store the retran count there.
3240                                  */
3241                                 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3242                                         /* Yes, so drop it */
3243                                         if (tp1->data != NULL) {
3244                                                 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3245                                                     SCTP_SO_NOT_LOCKED);
3246                                         }
3247                                         /* Make sure to flag we had a FR */
3248                                         tp1->whoTo->net_ack++;
3249                                         continue;
3250                                 }
3251                         }
3252                         /*
3253                          * SCTP_PRINTF("OK, we are now ready to FR this
3254                          * guy\n");
3255                          */
3256                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3257                                 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3258                                     0, SCTP_FR_MARKED);
3259                         }
3260                         if (strike_flag) {
3261                                 /* This is a subsequent FR */
3262                                 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3263                         }
3264                         sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3265                         if (asoc->sctp_cmt_on_off > 0) {
3266                                 /*
3267                                  * CMT: Using RTX_SSTHRESH policy for CMT.
3268                                  * If CMT is being used, then pick dest with
3269                                  * largest ssthresh for any retransmission.
3270                                  */
3271                                 tp1->no_fr_allowed = 1;
3272                                 alt = tp1->whoTo;
3273                                 /* sa_ignore NO_NULL_CHK */
3274                                 if (asoc->sctp_cmt_pf > 0) {
3275                                         /*
3276                                          * JRS 5/18/07 - If CMT PF is on,
3277                                          * use the PF version of
3278                                          * find_alt_net()
3279                                          */
3280                                         alt = sctp_find_alternate_net(stcb, alt, 2);
3281                                 } else {
3282                                         /*
3283                                          * JRS 5/18/07 - If only CMT is on,
3284                                          * use the CMT version of
3285                                          * find_alt_net()
3286                                          */
3287                                         /* sa_ignore NO_NULL_CHK */
3288                                         alt = sctp_find_alternate_net(stcb, alt, 1);
3289                                 }
3290                                 if (alt == NULL) {
3291                                         alt = tp1->whoTo;
3292                                 }
3293                                 /*
3294                                  * CUCv2: If a different dest is picked for
3295                                  * the retransmission, then new
3296                                  * (rtx-)pseudo_cumack needs to be tracked
3297                                  * for orig dest. Let CUCv2 track new (rtx-)
3298                                  * pseudo-cumack always.
3299                                  */
3300                                 if (tp1->whoTo) {
3301                                         tp1->whoTo->find_pseudo_cumack = 1;
3302                                         tp1->whoTo->find_rtx_pseudo_cumack = 1;
3303                                 }
3304                         } else {/* CMT is OFF */
3305
3306 #ifdef SCTP_FR_TO_ALTERNATE
3307                                 /* Can we find an alternate? */
3308                                 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3309 #else
3310                                 /*
3311                                  * default behavior is to NOT retransmit
3312                                  * FR's to an alternate. Armando Caro's
3313                                  * paper details why.
3314                                  */
3315                                 alt = tp1->whoTo;
3316 #endif
3317                         }
3318
3319                         tp1->rec.data.doing_fast_retransmit = 1;
3320                         tot_retrans++;
3321                         /* mark the sending seq for possible subsequent FR's */
3322                         /*
3323                          * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3324                          * (uint32_t)tpi->rec.data.TSN_seq);
3325                          */
3326                         if (TAILQ_EMPTY(&asoc->send_queue)) {
3327                                 /*
3328                                  * If the queue of send is empty then its
3329                                  * the next sequence number that will be
3330                                  * assigned so we subtract one from this to
3331                                  * get the one we last sent.
3332                                  */
3333                                 tp1->rec.data.fast_retran_tsn = sending_seq;
3334                         } else {
3335                                 /*
3336                                  * If there are chunks on the send queue
3337                                  * (unsent data that has made it from the
3338                                  * stream queues but not out the door, we
3339                                  * take the first one (which will have the
3340                                  * lowest TSN) and subtract one to get the
3341                                  * one we last sent.
3342                                  */
3343                                 struct sctp_tmit_chunk *ttt;
3344
3345                                 ttt = TAILQ_FIRST(&asoc->send_queue);
3346                                 tp1->rec.data.fast_retran_tsn =
3347                                     ttt->rec.data.TSN_seq;
3348                         }
3349
3350                         if (tp1->do_rtt) {
3351                                 /*
3352                                  * this guy had a RTO calculation pending on
3353                                  * it, cancel it
3354                                  */
3355                                 if ((tp1->whoTo != NULL) &&
3356                                     (tp1->whoTo->rto_needed == 0)) {
3357                                         tp1->whoTo->rto_needed = 1;
3358                                 }
3359                                 tp1->do_rtt = 0;
3360                         }
3361                         if (alt != tp1->whoTo) {
3362                                 /* yes, there is an alternate. */
3363                                 sctp_free_remote_addr(tp1->whoTo);
3364                                 /* sa_ignore FREED_MEMORY */
3365                                 tp1->whoTo = alt;
3366                                 atomic_add_int(&alt->ref_count, 1);
3367                         }
3368                 }
3369         }
3370 }
3371
3372 struct sctp_tmit_chunk *
3373 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3374     struct sctp_association *asoc)
3375 {
3376         struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3377         struct timeval now;
3378         int now_filled = 0;
3379
3380         if (asoc->prsctp_supported == 0) {
3381                 return (NULL);
3382         }
3383         TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3384                 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3385                     tp1->sent != SCTP_DATAGRAM_RESEND &&
3386                     tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3387                         /* no chance to advance, out of here */
3388                         break;
3389                 }
3390                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3391                         if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3392                             (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3393                                 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3394                                     asoc->advanced_peer_ack_point,
3395                                     tp1->rec.data.TSN_seq, 0, 0);
3396                         }
3397                 }
3398                 if (!PR_SCTP_ENABLED(tp1->flags)) {
3399                         /*
3400                          * We can't fwd-tsn past any that are reliable aka
3401                          * retransmitted until the asoc fails.
3402                          */
3403                         break;
3404                 }
3405                 if (!now_filled) {
3406                         (void)SCTP_GETTIME_TIMEVAL(&now);
3407                         now_filled = 1;
3408                 }
3409                 /*
3410                  * now we got a chunk which is marked for another
3411                  * retransmission to a PR-stream but has run out its chances
3412                  * already maybe OR has been marked to skip now. Can we skip
3413                  * it if its a resend?
3414                  */
3415                 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3416                     (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3417                         /*
3418                          * Now is this one marked for resend and its time is
3419                          * now up?
3420                          */
3421                         if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3422                                 /* Yes so drop it */
3423                                 if (tp1->data) {
3424                                         (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3425                                             1, SCTP_SO_NOT_LOCKED);
3426                                 }
3427                         } else {
3428                                 /*
3429                                  * No, we are done when hit one for resend
3430                                  * whos time as not expired.
3431                                  */
3432                                 break;
3433                         }
3434                 }
3435                 /*
3436                  * Ok now if this chunk is marked to drop it we can clean up
3437                  * the chunk, advance our peer ack point and we can check
3438                  * the next chunk.
3439                  */
3440                 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3441                     (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3442                         /* advance PeerAckPoint goes forward */
3443                         if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3444                                 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3445                                 a_adv = tp1;
3446                         } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3447                                 /* No update but we do save the chk */
3448                                 a_adv = tp1;
3449                         }
3450                 } else {
3451                         /*
3452                          * If it is still in RESEND we can advance no
3453                          * further
3454                          */
3455                         break;
3456                 }
3457         }
3458         return (a_adv);
3459 }
3460
3461 static int
3462 sctp_fs_audit(struct sctp_association *asoc)
3463 {
3464         struct sctp_tmit_chunk *chk;
3465         int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3466         int ret;
3467
3468 #ifndef INVARIANTS
3469         int entry_flight, entry_cnt;
3470
3471 #endif
3472
3473         ret = 0;
3474 #ifndef INVARIANTS
3475         entry_flight = asoc->total_flight;
3476         entry_cnt = asoc->total_flight_count;
3477 #endif
3478         if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3479                 return (0);
3480
3481         TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3482                 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3483                         SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3484                             chk->rec.data.TSN_seq,
3485                             chk->send_size,
3486                             chk->snd_count);
3487                         inflight++;
3488                 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3489                         resend++;
3490                 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3491                         inbetween++;
3492                 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3493                         above++;
3494                 } else {
3495                         acked++;
3496                 }
3497         }
3498
3499         if ((inflight > 0) || (inbetween > 0)) {
3500 #ifdef INVARIANTS
3501                 panic("Flight size-express incorrect? \n");
3502 #else
3503                 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3504                     entry_flight, entry_cnt);
3505
3506                 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3507                     inflight, inbetween, resend, above, acked);
3508                 ret = 1;
3509 #endif
3510         }
3511         return (ret);
3512 }
3513
3514
3515 static void
3516 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3517     struct sctp_association *asoc,
3518     struct sctp_tmit_chunk *tp1)
3519 {
3520         tp1->window_probe = 0;
3521         if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3522                 /* TSN's skipped we do NOT move back. */
3523                 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3524                     tp1->whoTo ? tp1->whoTo->flight_size : 0,
3525                     tp1->book_size,
3526                     (uintptr_t) tp1->whoTo,
3527                     tp1->rec.data.TSN_seq);
3528                 return;
3529         }
3530         /* First setup this by shrinking flight */
3531         if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3532                 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3533                     tp1);
3534         }
3535         sctp_flight_size_decrease(tp1);
3536         sctp_total_flight_decrease(stcb, tp1);
3537         /* Now mark for resend */
3538         tp1->sent = SCTP_DATAGRAM_RESEND;
3539         sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3540
3541         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3542                 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3543                     tp1->whoTo->flight_size,
3544                     tp1->book_size,
3545                     (uintptr_t) tp1->whoTo,
3546                     tp1->rec.data.TSN_seq);
3547         }
3548 }
3549
3550 void
3551 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3552     uint32_t rwnd, int *abort_now, int ecne_seen)
3553 {
3554         struct sctp_nets *net;
3555         struct sctp_association *asoc;
3556         struct sctp_tmit_chunk *tp1, *tp2;
3557         uint32_t old_rwnd;
3558         int win_probe_recovery = 0;
3559         int win_probe_recovered = 0;
3560         int j, done_once = 0;
3561         int rto_ok = 1;
3562
3563         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3564                 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3565                     rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3566         }
3567         SCTP_TCB_LOCK_ASSERT(stcb);
3568 #ifdef SCTP_ASOCLOG_OF_TSNS
3569         stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3570         stcb->asoc.cumack_log_at++;
3571         if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3572                 stcb->asoc.cumack_log_at = 0;
3573         }
3574 #endif
3575         asoc = &stcb->asoc;
3576         old_rwnd = asoc->peers_rwnd;
3577         if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3578                 /* old ack */
3579                 return;
3580         } else if (asoc->last_acked_seq == cumack) {
3581                 /* Window update sack */
3582                 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3583                     (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3584                 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3585                         /* SWS sender side engages */
3586                         asoc->peers_rwnd = 0;
3587                 }
3588                 if (asoc->peers_rwnd > old_rwnd) {
3589                         goto again;
3590                 }
3591                 return;
3592         }
3593         /* First setup for CC stuff */
3594         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3595                 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3596                         /* Drag along the window_tsn for cwr's */
3597                         net->cwr_window_tsn = cumack;
3598                 }
3599                 net->prev_cwnd = net->cwnd;
3600                 net->net_ack = 0;
3601                 net->net_ack2 = 0;
3602
3603                 /*
3604                  * CMT: Reset CUC and Fast recovery algo variables before
3605                  * SACK processing
3606                  */
3607                 net->new_pseudo_cumack = 0;
3608                 net->will_exit_fast_recovery = 0;
3609                 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3610                         (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3611                 }
3612         }
3613         if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3614                 uint32_t send_s;
3615
3616                 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3617                         tp1 = TAILQ_LAST(&asoc->sent_queue,
3618                             sctpchunk_listhead);
3619                         send_s = tp1->rec.data.TSN_seq + 1;
3620                 } else {
3621                         send_s = asoc->sending_seq;
3622                 }
3623                 if (SCTP_TSN_GE(cumack, send_s)) {
3624                         struct mbuf *op_err;
3625                         char msg[SCTP_DIAG_INFO_LEN];
3626
3627                         *abort_now = 1;
3628                         /* XXX */
3629                         snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3630                             cumack, send_s);
3631                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3632                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
3633                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3634                         return;
3635                 }
3636         }
3637         asoc->this_sack_highest_gap = cumack;
3638         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3639                 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3640                     stcb->asoc.overall_error_count,
3641                     0,
3642                     SCTP_FROM_SCTP_INDATA,
3643                     __LINE__);
3644         }
3645         stcb->asoc.overall_error_count = 0;
3646         if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3647                 /* process the new consecutive TSN first */
3648                 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3649                         if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3650                                 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3651                                         SCTP_PRINTF("Warning, an unsent is now acked?\n");
3652                                 }
3653                                 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3654                                         /*
3655                                          * If it is less than ACKED, it is
3656                                          * now no-longer in flight. Higher
3657                                          * values may occur during marking
3658                                          */
3659                                         if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3660                                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3661                                                         sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3662                                                             tp1->whoTo->flight_size,
3663                                                             tp1->book_size,
3664                                                             (uintptr_t) tp1->whoTo,
3665                                                             tp1->rec.data.TSN_seq);
3666                                                 }
3667                                                 sctp_flight_size_decrease(tp1);
3668                                                 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3669                                                         (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3670                                                             tp1);
3671                                                 }
3672                                                 /* sa_ignore NO_NULL_CHK */
3673                                                 sctp_total_flight_decrease(stcb, tp1);
3674                                         }
3675                                         tp1->whoTo->net_ack += tp1->send_size;
3676                                         if (tp1->snd_count < 2) {
3677                                                 /*
3678                                                  * True non-retransmited
3679                                                  * chunk
3680                                                  */
3681                                                 tp1->whoTo->net_ack2 +=
3682                                                     tp1->send_size;
3683
3684                                                 /* update RTO too? */
3685                                                 if (tp1->do_rtt) {
3686                                                         if (rto_ok) {
3687                                                                 tp1->whoTo->RTO =
3688                                                                 /*
3689                                                                  * sa_ignore
3690                                                                  * NO_NULL_CH
3691                                                                  * K
3692                                                                  */
3693                                                                     sctp_calculate_rto(stcb,
3694                                                                     asoc, tp1->whoTo,
3695                                                                     &tp1->sent_rcv_time,
3696                                                                     sctp_align_safe_nocopy,
3697                                                                     SCTP_RTT_FROM_DATA);
3698                                                                 rto_ok = 0;
3699                                                         }
3700                                                         if (tp1->whoTo->rto_needed == 0) {
3701                                                                 tp1->whoTo->rto_needed = 1;
3702                                                         }
3703                                                         tp1->do_rtt = 0;
3704                                                 }
3705                                         }
3706                                         /*
3707                                          * CMT: CUCv2 algorithm. From the
3708                                          * cumack'd TSNs, for each TSN being
3709                                          * acked for the first time, set the
3710                                          * following variables for the
3711                                          * corresp destination.
3712                                          * new_pseudo_cumack will trigger a
3713                                          * cwnd update.
3714                                          * find_(rtx_)pseudo_cumack will
3715                                          * trigger search for the next
3716                                          * expected (rtx-)pseudo-cumack.
3717                                          */
3718                                         tp1->whoTo->new_pseudo_cumack = 1;
3719                                         tp1->whoTo->find_pseudo_cumack = 1;
3720                                         tp1->whoTo->find_rtx_pseudo_cumack = 1;
3721
3722                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3723                                                 /* sa_ignore NO_NULL_CHK */
3724                                                 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3725                                         }
3726                                 }
3727                                 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3728                                         sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3729                                 }
3730                                 if (tp1->rec.data.chunk_was_revoked) {
3731                                         /* deflate the cwnd */
3732                                         tp1->whoTo->cwnd -= tp1->book_size;
3733                                         tp1->rec.data.chunk_was_revoked = 0;
3734                                 }
3735                                 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3736                                         if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3737                                                 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3738 #ifdef INVARIANTS
3739                                         } else {
3740                                                 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3741 #endif
3742                                         }
3743                                 }
3744                                 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
3745                                     (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
3746                                     TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
3747                                         asoc->trigger_reset = 1;
3748                                 }
3749                                 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3750                                 if (tp1->data) {
3751                                         /* sa_ignore NO_NULL_CHK */
3752                                         sctp_free_bufspace(stcb, asoc, tp1, 1);
3753                                         sctp_m_freem(tp1->data);
3754                                         tp1->data = NULL;
3755                                 }
3756                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3757                                         sctp_log_sack(asoc->last_acked_seq,
3758                                             cumack,
3759                                             tp1->rec.data.TSN_seq,
3760                                             0,
3761                                             0,
3762                                             SCTP_LOG_FREE_SENT);
3763                                 }
3764                                 asoc->sent_queue_cnt--;
3765                                 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3766                         } else {
3767                                 break;
3768                         }
3769                 }
3770
3771         }
3772         /* sa_ignore NO_NULL_CHK */
3773         if (stcb->sctp_socket) {
3774 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3775                 struct socket *so;
3776
3777 #endif
3778                 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3779                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3780                         /* sa_ignore NO_NULL_CHK */
3781                         sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3782                 }
3783 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3784                 so = SCTP_INP_SO(stcb->sctp_ep);
3785                 atomic_add_int(&stcb->asoc.refcnt, 1);
3786                 SCTP_TCB_UNLOCK(stcb);
3787                 SCTP_SOCKET_LOCK(so, 1);
3788                 SCTP_TCB_LOCK(stcb);
3789                 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3790                 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3791                         /* assoc was freed while we were unlocked */
3792                         SCTP_SOCKET_UNLOCK(so, 1);
3793                         return;
3794                 }
3795 #endif
3796                 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3797 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3798                 SCTP_SOCKET_UNLOCK(so, 1);
3799 #endif
3800         } else {
3801                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3802                         sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3803                 }
3804         }
3805
3806         /* JRS - Use the congestion control given in the CC module */
3807         if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3808                 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3809                         if (net->net_ack2 > 0) {
3810                                 /*
3811                                  * Karn's rule applies to clearing error
3812                                  * count, this is optional.
3813                                  */
3814                                 net->error_count = 0;
3815                                 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3816                                         /* addr came good */
3817                                         net->dest_state |= SCTP_ADDR_REACHABLE;
3818                                         sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3819                                             0, (void *)net, SCTP_SO_NOT_LOCKED);
3820                                 }
3821                                 if (net == stcb->asoc.primary_destination) {
3822                                         if (stcb->asoc.alternate) {
3823                                                 /*
3824                                                  * release the alternate,
3825                                                  * primary is good
3826                                                  */
3827                                                 sctp_free_remote_addr(stcb->asoc.alternate);
3828                                                 stcb->asoc.alternate = NULL;
3829                                         }
3830                                 }
3831                                 if (net->dest_state & SCTP_ADDR_PF) {
3832                                         net->dest_state &= ~SCTP_ADDR_PF;
3833                                         sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
3834                                             stcb->sctp_ep, stcb, net,
3835                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
3836                                         sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3837                                         asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3838                                         /* Done with this net */
3839                                         net->net_ack = 0;
3840                                 }
3841                                 /* restore any doubled timers */
3842                                 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3843                                 if (net->RTO < stcb->asoc.minrto) {
3844                                         net->RTO = stcb->asoc.minrto;
3845                                 }
3846                                 if (net->RTO > stcb->asoc.maxrto) {
3847                                         net->RTO = stcb->asoc.maxrto;
3848                                 }
3849                         }
3850                 }
3851                 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3852         }
3853         asoc->last_acked_seq = cumack;
3854
3855         if (TAILQ_EMPTY(&asoc->sent_queue)) {
3856                 /* nothing left in-flight */
3857                 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3858                         net->flight_size = 0;
3859                         net->partial_bytes_acked = 0;
3860                 }
3861                 asoc->total_flight = 0;
3862                 asoc->total_flight_count = 0;
3863         }
3864         /* RWND update */
3865         asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3866             (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3867         if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3868                 /* SWS sender side engages */
3869                 asoc->peers_rwnd = 0;
3870         }
3871         if (asoc->peers_rwnd > old_rwnd) {
3872                 win_probe_recovery = 1;
3873         }
3874         /* Now assure a timer where data is queued at */
3875 again:
3876         j = 0;
3877         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3878                 int to_ticks;
3879
3880                 if (win_probe_recovery && (net->window_probe)) {
3881                         win_probe_recovered = 1;
3882                         /*
3883                          * Find first chunk that was used with window probe
3884                          * and clear the sent
3885                          */
3886                         /* sa_ignore FREED_MEMORY */
3887                         TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3888                                 if (tp1->window_probe) {
3889                                         /* move back to data send queue */
3890                                         sctp_window_probe_recovery(stcb, asoc, tp1);
3891                                         break;
3892                                 }
3893                         }
3894                 }
3895                 if (net->RTO == 0) {
3896                         to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3897                 } else {
3898                         to_ticks = MSEC_TO_TICKS(net->RTO);
3899                 }
3900                 if (net->flight_size) {
3901                         j++;
3902                         (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3903                             sctp_timeout_handler, &net->rxt_timer);
3904                         if (net->window_probe) {
3905                                 net->window_probe = 0;
3906                         }
3907                 } else {
3908                         if (net->window_probe) {
3909                                 /*
3910                                  * In window probes we must assure a timer
3911                                  * is still running there
3912                                  */
3913                                 net->window_probe = 0;
3914                                 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3915                                         SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3916                                             sctp_timeout_handler, &net->rxt_timer);
3917                                 }
3918                         } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3919                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3920                                     stcb, net,
3921                                     SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3922                         }
3923                 }
3924         }
3925         if ((j == 0) &&
3926             (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3927             (asoc->sent_queue_retran_cnt == 0) &&
3928             (win_probe_recovered == 0) &&
3929             (done_once == 0)) {
3930                 /*
3931                  * huh, this should not happen unless all packets are
3932                  * PR-SCTP and marked to skip of course.
3933                  */
3934                 if (sctp_fs_audit(asoc)) {
3935                         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3936                                 net->flight_size = 0;
3937                         }
3938                         asoc->total_flight = 0;
3939                         asoc->total_flight_count = 0;
3940                         asoc->sent_queue_retran_cnt = 0;
3941                         TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3942                                 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3943                                         sctp_flight_size_increase(tp1);
3944                                         sctp_total_flight_increase(stcb, tp1);
3945                                 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3946                                         sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3947                                 }
3948                         }
3949                 }
3950                 done_once = 1;
3951                 goto again;
3952         }
3953         /**********************************/
3954         /* Now what about shutdown issues */
3955         /**********************************/
3956         if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3957                 /* nothing left on sendqueue.. consider done */
3958                 /* clean up */
3959                 if ((asoc->stream_queue_cnt == 1) &&
3960                     ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3961                     (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3962                     (asoc->locked_on_sending)
3963                     ) {
3964                         struct sctp_stream_queue_pending *sp;
3965
3966                         /*
3967                          * I may be in a state where we got all across.. but
3968                          * cannot write more due to a shutdown... we abort
3969                          * since the user did not indicate EOR in this case.
3970                          * The sp will be cleaned during free of the asoc.
3971                          */
3972                         sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3973                             sctp_streamhead);
3974                         if ((sp) && (sp->length == 0)) {
3975                                 /* Let cleanup code purge it */
3976                                 if (sp->msg_is_complete) {
3977                                         asoc->stream_queue_cnt--;
3978                                 } else {
3979                                         asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3980                                         asoc->locked_on_sending = NULL;
3981                                         asoc->stream_queue_cnt--;
3982                                 }
3983                         }
3984                 }
3985                 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3986                     (asoc->stream_queue_cnt == 0)) {
3987                         if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3988                                 /* Need to abort here */
3989                                 struct mbuf *op_err;
3990
3991                 abort_out_now:
3992                                 *abort_now = 1;
3993                                 /* XXX */
3994                                 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3995                                 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_26;
3996                                 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3997                         } else {
3998                                 struct sctp_nets *netp;
3999
4000                                 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4001                                     (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4002                                         SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4003                                 }
4004                                 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4005                                 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4006                                 sctp_stop_timers_for_shutdown(stcb);
4007                                 if (asoc->alternate) {
4008                                         netp = asoc->alternate;
4009                                 } else {
4010                                         netp = asoc->primary_destination;
4011                                 }
4012                                 sctp_send_shutdown(stcb, netp);
4013                                 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4014                                     stcb->sctp_ep, stcb, netp);
4015                                 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4016                                     stcb->sctp_ep, stcb, netp);
4017                         }
4018                 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4019                     (asoc->stream_queue_cnt == 0)) {
4020                         struct sctp_nets *netp;
4021
4022                         if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4023                                 goto abort_out_now;
4024                         }
4025                         SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4026                         SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4027                         SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4028                         sctp_stop_timers_for_shutdown(stcb);
4029                         if (asoc->alternate) {
4030                                 netp = asoc->alternate;
4031                         } else {
4032                                 netp = asoc->primary_destination;
4033                         }
4034                         sctp_send_shutdown_ack(stcb, netp);
4035                         sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4036                             stcb->sctp_ep, stcb, netp);
4037                 }
4038         }
4039         /*********************************************/
4040         /* Here we perform PR-SCTP procedures        */
4041         /* (section 4.2)                             */
4042         /*********************************************/
4043         /* C1. update advancedPeerAckPoint */
4044         if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4045                 asoc->advanced_peer_ack_point = cumack;
4046         }
4047         /* PR-Sctp issues need to be addressed too */
4048         if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4049                 struct sctp_tmit_chunk *lchk;
4050                 uint32_t old_adv_peer_ack_point;
4051
4052                 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4053                 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4054                 /* C3. See if we need to send a Fwd-TSN */
4055                 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4056                         /*
4057                          * ISSUE with ECN, see FWD-TSN processing.
4058                          */
4059                         if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4060                                 send_forward_tsn(stcb, asoc);
4061                         } else if (lchk) {
4062                                 /* try to FR fwd-tsn's that get lost too */
4063                                 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4064                                         send_forward_tsn(stcb, asoc);
4065                                 }
4066                         }
4067                 }
4068                 if (lchk) {
4069                         /* Assure a timer is up */
4070                         sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4071                             stcb->sctp_ep, stcb, lchk->whoTo);
4072                 }
4073         }
4074         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4075                 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4076                     rwnd,
4077                     stcb->asoc.peers_rwnd,
4078                     stcb->asoc.total_flight,
4079                     stcb->asoc.total_output_queue_size);
4080         }
4081 }
4082
4083 void
4084 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4085     struct sctp_tcb *stcb,
4086     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4087     int *abort_now, uint8_t flags,
4088     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4089 {
4090         struct sctp_association *asoc;
4091         struct sctp_tmit_chunk *tp1, *tp2;
4092         uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4093         uint16_t wake_him = 0;
4094         uint32_t send_s = 0;
4095         long j;
4096         int accum_moved = 0;
4097         int will_exit_fast_recovery = 0;
4098         uint32_t a_rwnd, old_rwnd;
4099         int win_probe_recovery = 0;
4100         int win_probe_recovered = 0;
4101         struct sctp_nets *net = NULL;
4102         int done_once;
4103         int rto_ok = 1;
4104         uint8_t reneged_all = 0;
4105         uint8_t cmt_dac_flag;
4106
4107         /*
4108          * we take any chance we can to service our queues since we cannot
4109          * get awoken when the socket is read from :<
4110          */
4111         /*
4112          * Now perform the actual SACK handling: 1) Verify that it is not an
4113          * old sack, if so discard. 2) If there is nothing left in the send
4114          * queue (cum-ack is equal to last acked) then you have a duplicate
4115          * too, update any rwnd change and verify no timers are running.
4116          * then return. 3) Process any new consequtive data i.e. cum-ack
4117          * moved process these first and note that it moved. 4) Process any
4118          * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4119          * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4120          * sync up flightsizes and things, stop all timers and also check
4121          * for shutdown_pending state. If so then go ahead and send off the
4122          * shutdown. If in shutdown recv, send off the shutdown-ack and
4123          * start that timer, Ret. 9) Strike any non-acked things and do FR
4124          * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4125          * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4126          * if in shutdown_recv state.
4127          */
4128         SCTP_TCB_LOCK_ASSERT(stcb);
4129         /* CMT DAC algo */
4130         this_sack_lowest_newack = 0;
4131         SCTP_STAT_INCR(sctps_slowpath_sack);
4132         last_tsn = cum_ack;
4133         cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4134 #ifdef SCTP_ASOCLOG_OF_TSNS
4135         stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4136         stcb->asoc.cumack_log_at++;
4137         if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4138                 stcb->asoc.cumack_log_at = 0;
4139         }
4140 #endif
4141         a_rwnd = rwnd;
4142
4143         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4144                 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4145                     rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4146         }
4147         old_rwnd = stcb->asoc.peers_rwnd;
4148         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4149                 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4150                     stcb->asoc.overall_error_count,
4151                     0,
4152                     SCTP_FROM_SCTP_INDATA,
4153                     __LINE__);
4154         }
4155         stcb->asoc.overall_error_count = 0;
4156         asoc = &stcb->asoc;
4157         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4158                 sctp_log_sack(asoc->last_acked_seq,
4159                     cum_ack,
4160                     0,
4161                     num_seg,
4162                     num_dup,
4163                     SCTP_LOG_NEW_SACK);
4164         }
4165         if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4166                 uint16_t i;
4167                 uint32_t *dupdata, dblock;
4168
4169                 for (i = 0; i < num_dup; i++) {
4170                         dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4171                             sizeof(uint32_t), (uint8_t *) & dblock);
4172                         if (dupdata == NULL) {
4173                                 break;
4174                         }
4175                         sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4176                 }
4177         }
4178         if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4179                 /* reality check */
4180                 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4181                         tp1 = TAILQ_LAST(&asoc->sent_queue,
4182                             sctpchunk_listhead);
4183                         send_s = tp1->rec.data.TSN_seq + 1;
4184                 } else {
4185                         tp1 = NULL;
4186                         send_s = asoc->sending_seq;
4187                 }
4188                 if (SCTP_TSN_GE(cum_ack, send_s)) {
4189                         struct mbuf *op_err;
4190                         char msg[SCTP_DIAG_INFO_LEN];
4191
4192                         /*
4193                          * no way, we have not even sent this TSN out yet.
4194                          * Peer is hopelessly messed up with us.
4195                          */
4196                         SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4197                             cum_ack, send_s);
4198                         if (tp1) {
4199                                 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4200                                     tp1->rec.data.TSN_seq, (void *)tp1);
4201                         }
4202         hopeless_peer:
4203                         *abort_now = 1;
4204                         /* XXX */
4205                         snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4206                             cum_ack, send_s);
4207                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4208                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4209                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4210                         return;
4211                 }
4212         }
4213         /**********************/
4214         /* 1) check the range */
4215         /**********************/
4216         if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4217                 /* acking something behind */
4218                 return;
4219         }
4220         /* update the Rwnd of the peer */
4221         if (TAILQ_EMPTY(&asoc->sent_queue) &&
4222             TAILQ_EMPTY(&asoc->send_queue) &&
4223             (asoc->stream_queue_cnt == 0)) {
4224                 /* nothing left on send/sent and strmq */
4225                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4226                         sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4227                             asoc->peers_rwnd, 0, 0, a_rwnd);
4228                 }
4229                 asoc->peers_rwnd = a_rwnd;
4230                 if (asoc->sent_queue_retran_cnt) {
4231                         asoc->sent_queue_retran_cnt = 0;
4232                 }
4233                 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4234                         /* SWS sender side engages */
4235                         asoc->peers_rwnd = 0;
4236                 }
4237                 /* stop any timers */
4238                 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4239                         sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4240                             stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4241                         net->partial_bytes_acked = 0;
4242                         net->flight_size = 0;
4243                 }
4244                 asoc->total_flight = 0;
4245                 asoc->total_flight_count = 0;
4246                 return;
4247         }
4248         /*
4249          * We init netAckSz and netAckSz2 to 0. These are used to track 2
4250          * things. The total byte count acked is tracked in netAckSz AND
4251          * netAck2 is used to track the total bytes acked that are un-
4252          * amibguious and were never retransmitted. We track these on a per
4253          * destination address basis.
4254          */
4255         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4256                 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4257                         /* Drag along the window_tsn for cwr's */
4258                         net->cwr_window_tsn = cum_ack;
4259                 }
4260                 net->prev_cwnd = net->cwnd;
4261                 net->net_ack = 0;
4262                 net->net_ack2 = 0;
4263
4264                 /*
4265                  * CMT: Reset CUC and Fast recovery algo variables before
4266                  * SACK processing
4267                  */
4268                 net->new_pseudo_cumack = 0;
4269                 net->will_exit_fast_recovery = 0;
4270                 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4271                         (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4272                 }
4273         }
4274         /* process the new consecutive TSN first */
4275         TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4276                 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4277                         if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4278                                 accum_moved = 1;
4279                                 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4280                                         /*
4281                                          * If it is less than ACKED, it is
4282                                          * now no-longer in flight. Higher
4283                                          * values may occur during marking
4284                                          */
4285                                         if ((tp1->whoTo->dest_state &
4286                                             SCTP_ADDR_UNCONFIRMED) &&
4287                                             (tp1->snd_count < 2)) {
4288                                                 /*
4289                                                  * If there was no retran
4290                                                  * and the address is
4291                                                  * un-confirmed and we sent
4292                                                  * there and are now
4293                                                  * sacked.. its confirmed,
4294                                                  * mark it so.
4295                                                  */
4296                                                 tp1->whoTo->dest_state &=
4297                                                     ~SCTP_ADDR_UNCONFIRMED;
4298                                         }
4299                                         if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4300                                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4301                                                         sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4302                                                             tp1->whoTo->flight_size,
4303                                                             tp1->book_size,
4304                                                             (uintptr_t) tp1->whoTo,
4305                                                             tp1->rec.data.TSN_seq);
4306                                                 }
4307                                                 sctp_flight_size_decrease(tp1);
4308                                                 sctp_total_flight_decrease(stcb, tp1);
4309                                                 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4310                                                         (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4311                                                             tp1);
4312                                                 }
4313                                         }
4314                                         tp1->whoTo->net_ack += tp1->send_size;
4315
4316                                         /* CMT SFR and DAC algos */
4317                                         this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4318                                         tp1->whoTo->saw_newack = 1;
4319
4320                                         if (tp1->snd_count < 2) {
4321                                                 /*
4322                                                  * True non-retransmited
4323                                                  * chunk
4324                                                  */
4325                                                 tp1->whoTo->net_ack2 +=
4326                                                     tp1->send_size;
4327
4328                                                 /* update RTO too? */
4329                                                 if (tp1->do_rtt) {
4330                                                         if (rto_ok) {
4331                                                                 tp1->whoTo->RTO =
4332                                                                     sctp_calculate_rto(stcb,
4333                                                                     asoc, tp1->whoTo,
4334                                                                     &tp1->sent_rcv_time,
4335                                                                     sctp_align_safe_nocopy,
4336                                                                     SCTP_RTT_FROM_DATA);
4337                                                                 rto_ok = 0;
4338                                                         }
4339                                                         if (tp1->whoTo->rto_needed == 0) {
4340                                                                 tp1->whoTo->rto_needed = 1;
4341                                                         }
4342                                                         tp1->do_rtt = 0;
4343                                                 }
4344                                         }
4345                                         /*
4346                                          * CMT: CUCv2 algorithm. From the
4347                                          * cumack'd TSNs, for each TSN being
4348                                          * acked for the first time, set the
4349                                          * following variables for the
4350                                          * corresp destination.
4351                                          * new_pseudo_cumack will trigger a
4352                                          * cwnd update.
4353                                          * find_(rtx_)pseudo_cumack will
4354                                          * trigger search for the next
4355                                          * expected (rtx-)pseudo-cumack.
4356                                          */
4357                                         tp1->whoTo->new_pseudo_cumack = 1;
4358                                         tp1->whoTo->find_pseudo_cumack = 1;
4359                                         tp1->whoTo->find_rtx_pseudo_cumack = 1;
4360
4361
4362                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4363                                                 sctp_log_sack(asoc->last_acked_seq,
4364                                                     cum_ack,
4365                                                     tp1->rec.data.TSN_seq,
4366                                                     0,
4367                                                     0,
4368                                                     SCTP_LOG_TSN_ACKED);
4369                                         }
4370                                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4371                                                 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4372                                         }
4373                                 }
4374                                 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4375                                         sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4376 #ifdef SCTP_AUDITING_ENABLED
4377                                         sctp_audit_log(0xB3,
4378                                             (asoc->sent_queue_retran_cnt & 0x000000ff));
4379 #endif
4380                                 }
4381                                 if (tp1->rec.data.chunk_was_revoked) {
4382                                         /* deflate the cwnd */
4383                                         tp1->whoTo->cwnd -= tp1->book_size;
4384                                         tp1->rec.data.chunk_was_revoked = 0;
4385                                 }
4386                                 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4387                                         tp1->sent = SCTP_DATAGRAM_ACKED;
4388                                 }
4389                         }
4390                 } else {
4391                         break;
4392                 }
4393         }
4394         biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4395         /* always set this up to cum-ack */
4396         asoc->this_sack_highest_gap = last_tsn;
4397
4398         if ((num_seg > 0) || (num_nr_seg > 0)) {
4399
4400                 /*
4401                  * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4402                  * to be greater than the cumack. Also reset saw_newack to 0
4403                  * for all dests.
4404                  */
4405                 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4406                         net->saw_newack = 0;
4407                         net->this_sack_highest_newack = last_tsn;
4408                 }
4409
4410                 /*
4411                  * thisSackHighestGap will increase while handling NEW
4412                  * segments this_sack_highest_newack will increase while
4413                  * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4414                  * used for CMT DAC algo. saw_newack will also change.
4415                  */
4416                 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4417                     &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4418                     num_seg, num_nr_seg, &rto_ok)) {
4419                         wake_him++;
4420                 }
4421                 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4422                         /*
4423                          * validate the biggest_tsn_acked in the gap acks if
4424                          * strict adherence is wanted.
4425                          */
4426                         if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4427                                 /*
4428                                  * peer is either confused or we are under
4429                                  * attack. We must abort.
4430                                  */
4431                                 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4432                                     biggest_tsn_acked, send_s);
4433                                 goto hopeless_peer;
4434                         }
4435                 }
4436         }
4437         /*******************************************/
4438         /* cancel ALL T3-send timer if accum moved */
4439         /*******************************************/
4440         if (asoc->sctp_cmt_on_off > 0) {
4441                 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4442                         if (net->new_pseudo_cumack)
4443                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4444                                     stcb, net,
4445                                     SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4446
4447                 }
4448         } else {
4449                 if (accum_moved) {
4450                         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4451                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4452                                     stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4453                         }
4454                 }
4455         }
4456         /********************************************/
4457         /* drop the acked chunks from the sentqueue */
4458         /********************************************/
4459         asoc->last_acked_seq = cum_ack;
4460
4461         TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4462                 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4463                         break;
4464                 }
4465                 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4466                         if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4467                                 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4468 #ifdef INVARIANTS
4469                         } else {
4470                                 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4471 #endif
4472                         }
4473                 }
4474                 if ((asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues == 0) &&
4475                     (asoc->strmout[tp1->rec.data.stream_number].state == SCTP_STREAM_RESET_PENDING) &&
4476                     TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.stream_number].outqueue)) {
4477                         asoc->trigger_reset = 1;
4478                 }
4479                 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4480                 if (PR_SCTP_ENABLED(tp1->flags)) {
4481                         if (asoc->pr_sctp_cnt != 0)
4482                                 asoc->pr_sctp_cnt--;
4483                 }
4484                 asoc->sent_queue_cnt--;
4485                 if (tp1->data) {
4486                         /* sa_ignore NO_NULL_CHK */
4487                         sctp_free_bufspace(stcb, asoc, tp1, 1);
4488                         sctp_m_freem(tp1->data);
4489                         tp1->data = NULL;
4490                         if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4491                                 asoc->sent_queue_cnt_removeable--;
4492                         }
4493                 }
4494                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4495                         sctp_log_sack(asoc->last_acked_seq,
4496                             cum_ack,
4497                             tp1->rec.data.TSN_seq,
4498                             0,
4499                             0,
4500                             SCTP_LOG_FREE_SENT);
4501                 }
4502                 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4503                 wake_him++;
4504         }
4505         if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4506 #ifdef INVARIANTS
4507                 panic("Warning flight size is postive and should be 0");
4508 #else
4509                 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4510                     asoc->total_flight);
4511 #endif
4512                 asoc->total_flight = 0;
4513         }
4514         /* sa_ignore NO_NULL_CHK */
4515         if ((wake_him) && (stcb->sctp_socket)) {
4516 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4517                 struct socket *so;
4518
4519 #endif
4520                 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4521                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4522                         sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4523                 }
4524 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4525                 so = SCTP_INP_SO(stcb->sctp_ep);
4526                 atomic_add_int(&stcb->asoc.refcnt, 1);
4527                 SCTP_TCB_UNLOCK(stcb);
4528                 SCTP_SOCKET_LOCK(so, 1);
4529                 SCTP_TCB_LOCK(stcb);
4530                 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4531                 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4532                         /* assoc was freed while we were unlocked */
4533                         SCTP_SOCKET_UNLOCK(so, 1);
4534                         return;
4535                 }
4536 #endif
4537                 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4538 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4539                 SCTP_SOCKET_UNLOCK(so, 1);
4540 #endif
4541         } else {
4542                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4543                         sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4544                 }
4545         }
4546
4547         if (asoc->fast_retran_loss_recovery && accum_moved) {
4548                 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4549                         /* Setup so we will exit RFC2582 fast recovery */
4550                         will_exit_fast_recovery = 1;
4551                 }
4552         }
4553         /*
4554          * Check for revoked fragments:
4555          * 
4556          * if Previous sack - Had no frags then we can't have any revoked if
4557          * Previous sack - Had frag's then - If we now have frags aka
4558          * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4559          * some of them. else - The peer revoked all ACKED fragments, since
4560          * we had some before and now we have NONE.
4561          */
4562
4563         if (num_seg) {
4564                 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4565                 asoc->saw_sack_with_frags = 1;
4566         } else if (asoc->saw_sack_with_frags) {
4567                 int cnt_revoked = 0;
4568
4569                 /* Peer revoked all dg's marked or acked */
4570                 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4571                         if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4572                                 tp1->sent = SCTP_DATAGRAM_SENT;
4573                                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4574                                         sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4575                                             tp1->whoTo->flight_size,
4576                                             tp1->book_size,
4577                                             (uintptr_t) tp1->whoTo,
4578                                             tp1->rec.data.TSN_seq);
4579                                 }
4580                                 sctp_flight_size_increase(tp1);
4581                                 sctp_total_flight_increase(stcb, tp1);
4582                                 tp1->rec.data.chunk_was_revoked = 1;
4583                                 /*
4584                                  * To ensure that this increase in
4585                                  * flightsize, which is artificial, does not
4586                                  * throttle the sender, we also increase the
4587                                  * cwnd artificially.
4588                                  */
4589                                 tp1->whoTo->cwnd += tp1->book_size;
4590                                 cnt_revoked++;
4591                         }
4592                 }
4593                 if (cnt_revoked) {
4594                         reneged_all = 1;
4595                 }
4596                 asoc->saw_sack_with_frags = 0;
4597         }
4598         if (num_nr_seg > 0)
4599                 asoc->saw_sack_with_nr_frags = 1;
4600         else
4601                 asoc->saw_sack_with_nr_frags = 0;
4602
4603         /* JRS - Use the congestion control given in the CC module */
4604         if (ecne_seen == 0) {
4605                 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4606                         if (net->net_ack2 > 0) {
4607                                 /*
4608                                  * Karn's rule applies to clearing error
4609                                  * count, this is optional.
4610                                  */
4611                                 net->error_count = 0;
4612                                 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4613                                         /* addr came good */
4614                                         net->dest_state |= SCTP_ADDR_REACHABLE;
4615                                         sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4616                                             0, (void *)net, SCTP_SO_NOT_LOCKED);
4617                                 }
4618                                 if (net == stcb->asoc.primary_destination) {
4619                                         if (stcb->asoc.alternate) {
4620                                                 /*
4621                                                  * release the alternate,
4622                                                  * primary is good
4623                                                  */
4624                                                 sctp_free_remote_addr(stcb->asoc.alternate);
4625                                                 stcb->asoc.alternate = NULL;
4626                                         }
4627                                 }
4628                                 if (net->dest_state & SCTP_ADDR_PF) {
4629                                         net->dest_state &= ~SCTP_ADDR_PF;
4630                                         sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4631                                             stcb->sctp_ep, stcb, net,
4632                                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4633                                         sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4634                                         asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4635                                         /* Done with this net */
4636                                         net->net_ack = 0;
4637                                 }
4638                                 /* restore any doubled timers */
4639                                 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4640                                 if (net->RTO < stcb->asoc.minrto) {
4641                                         net->RTO = stcb->asoc.minrto;
4642                                 }
4643                                 if (net->RTO > stcb->asoc.maxrto) {
4644                                         net->RTO = stcb->asoc.maxrto;
4645                                 }
4646                         }
4647                 }
4648                 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4649         }
4650         if (TAILQ_EMPTY(&asoc->sent_queue)) {
4651                 /* nothing left in-flight */
4652                 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4653                         /* stop all timers */
4654                         sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4655                             stcb, net,
4656                             SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4657                         net->flight_size = 0;
4658                         net->partial_bytes_acked = 0;
4659                 }
4660                 asoc->total_flight = 0;
4661                 asoc->total_flight_count = 0;
4662         }
4663         /**********************************/
4664         /* Now what about shutdown issues */
4665         /**********************************/
4666         if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4667                 /* nothing left on sendqueue.. consider done */
4668                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4669                         sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4670                             asoc->peers_rwnd, 0, 0, a_rwnd);
4671                 }
4672                 asoc->peers_rwnd = a_rwnd;
4673                 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4674                         /* SWS sender side engages */
4675                         asoc->peers_rwnd = 0;
4676                 }
4677                 /* clean up */
4678                 if ((asoc->stream_queue_cnt == 1) &&
4679                     ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4680                     (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4681                     (asoc->locked_on_sending)
4682                     ) {
4683                         struct sctp_stream_queue_pending *sp;
4684
4685                         /*
4686                          * I may be in a state where we got all across.. but
4687                          * cannot write more due to a shutdown... we abort
4688                          * since the user did not indicate EOR in this case.
4689                          */
4690                         sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4691                             sctp_streamhead);
4692                         if ((sp) && (sp->length == 0)) {
4693                                 asoc->locked_on_sending = NULL;
4694                                 if (sp->msg_is_complete) {
4695                                         asoc->stream_queue_cnt--;
4696                                 } else {
4697                                         asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4698                                         asoc->stream_queue_cnt--;
4699                                 }
4700                         }
4701                 }
4702                 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4703                     (asoc->stream_queue_cnt == 0)) {
4704                         if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4705                                 /* Need to abort here */
4706                                 struct mbuf *op_err;
4707
4708                 abort_out_now:
4709                                 *abort_now = 1;
4710                                 /* XXX */
4711                                 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4712                                 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
4713                                 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4714                                 return;
4715                         } else {
4716                                 struct sctp_nets *netp;
4717
4718                                 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4719                                     (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4720                                         SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4721                                 }
4722                                 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4723                                 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4724                                 sctp_stop_timers_for_shutdown(stcb);
4725                                 if (asoc->alternate) {
4726                                         netp = asoc->alternate;
4727                                 } else {
4728                                         netp = asoc->primary_destination;
4729                                 }
4730                                 sctp_send_shutdown(stcb, netp);
4731                                 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4732                                     stcb->sctp_ep, stcb, netp);
4733                                 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4734                                     stcb->sctp_ep, stcb, netp);
4735                         }
4736                         return;
4737                 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4738                     (asoc->stream_queue_cnt == 0)) {
4739                         struct sctp_nets *netp;
4740
4741                         if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4742                                 goto abort_out_now;
4743                         }
4744                         SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4745                         SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4746                         SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4747                         sctp_stop_timers_for_shutdown(stcb);
4748                         if (asoc->alternate) {
4749                                 netp = asoc->alternate;
4750                         } else {
4751                                 netp = asoc->primary_destination;
4752                         }
4753                         sctp_send_shutdown_ack(stcb, netp);
4754                         sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4755                             stcb->sctp_ep, stcb, netp);
4756                         return;
4757                 }
4758         }
4759         /*
4760          * Now here we are going to recycle net_ack for a different use...
4761          * HEADS UP.
4762          */
4763         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4764                 net->net_ack = 0;
4765         }
4766
4767         /*
4768          * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4769          * to be done. Setting this_sack_lowest_newack to the cum_ack will
4770          * automatically ensure that.
4771          */
4772         if ((asoc->sctp_cmt_on_off > 0) &&
4773             SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4774             (cmt_dac_flag == 0)) {
4775                 this_sack_lowest_newack = cum_ack;
4776         }
4777         if ((num_seg > 0) || (num_nr_seg > 0)) {
4778                 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4779                     biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4780         }
4781         /* JRS - Use the congestion control given in the CC module */
4782         asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4783
4784         /* Now are we exiting loss recovery ? */
4785         if (will_exit_fast_recovery) {
4786                 /* Ok, we must exit fast recovery */
4787                 asoc->fast_retran_loss_recovery = 0;
4788         }
4789         if ((asoc->sat_t3_loss_recovery) &&
4790             SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4791                 /* end satellite t3 loss recovery */
4792                 asoc->sat_t3_loss_recovery = 0;
4793         }
4794         /*
4795          * CMT Fast recovery
4796          */
4797         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4798                 if (net->will_exit_fast_recovery) {
4799                         /* Ok, we must exit fast recovery */
4800                         net->fast_retran_loss_recovery = 0;
4801                 }
4802         }
4803
4804         /* Adjust and set the new rwnd value */
4805         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4806                 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4807                     asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4808         }
4809         asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4810             (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4811         if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4812                 /* SWS sender side engages */
4813                 asoc->peers_rwnd = 0;
4814         }
4815         if (asoc->peers_rwnd > old_rwnd) {
4816                 win_probe_recovery = 1;
4817         }
4818         /*
4819          * Now we must setup so we have a timer up for anyone with
4820          * outstanding data.
4821          */
4822         done_once = 0;
4823 again:
4824         j = 0;
4825         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4826                 if (win_probe_recovery && (net->window_probe)) {
4827                         win_probe_recovered = 1;
4828                         /*-
4829                          * Find first chunk that was used with
4830                          * window probe and clear the event. Put
4831                          * it back into the send queue as if has
4832                          * not been sent.
4833                          */
4834                         TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4835                                 if (tp1->window_probe) {
4836                                         sctp_window_probe_recovery(stcb, asoc, tp1);
4837                                         break;
4838                                 }
4839                         }
4840                 }
4841                 if (net->flight_size) {
4842                         j++;
4843                         if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4844                                 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4845                                     stcb->sctp_ep, stcb, net);
4846                         }
4847                         if (net->window_probe) {
4848                                 net->window_probe = 0;
4849                         }
4850                 } else {
4851                         if (net->window_probe) {
4852                                 /*
4853                                  * In window probes we must assure a timer
4854                                  * is still running there
4855                                  */
4856                                 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4857                                         sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4858                                             stcb->sctp_ep, stcb, net);
4859
4860                                 }
4861                         } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4862                                 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4863                                     stcb, net,
4864                                     SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4865                         }
4866                 }
4867         }
4868         if ((j == 0) &&
4869             (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4870             (asoc->sent_queue_retran_cnt == 0) &&
4871             (win_probe_recovered == 0) &&
4872             (done_once == 0)) {
4873                 /*
4874                  * huh, this should not happen unless all packets are
4875                  * PR-SCTP and marked to skip of course.
4876                  */
4877                 if (sctp_fs_audit(asoc)) {
4878                         TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4879                                 net->flight_size = 0;
4880                         }
4881                         asoc->total_flight = 0;
4882                         asoc->total_flight_count = 0;
4883                         asoc->sent_queue_retran_cnt = 0;
4884                         TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4885                                 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4886                                         sctp_flight_size_increase(tp1);
4887                                         sctp_total_flight_increase(stcb, tp1);
4888                                 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4889                                         sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4890                                 }
4891                         }
4892                 }
4893                 done_once = 1;
4894                 goto again;
4895         }
4896         /*********************************************/
4897         /* Here we perform PR-SCTP procedures        */
4898         /* (section 4.2)                             */
4899         /*********************************************/
4900         /* C1. update advancedPeerAckPoint */
4901         if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4902                 asoc->advanced_peer_ack_point = cum_ack;
4903         }
4904         /* C2. try to further move advancedPeerAckPoint ahead */
4905         if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4906                 struct sctp_tmit_chunk *lchk;
4907                 uint32_t old_adv_peer_ack_point;
4908
4909                 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4910                 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4911                 /* C3. See if we need to send a Fwd-TSN */
4912                 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4913                         /*
4914                          * ISSUE with ECN, see FWD-TSN processing.
4915                          */
4916                         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4917                                 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4918                                     0xee, cum_ack, asoc->advanced_peer_ack_point,
4919                                     old_adv_peer_ack_point);
4920                         }
4921                         if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4922                                 send_forward_tsn(stcb, asoc);
4923                         } else if (lchk) {
4924                                 /* try to FR fwd-tsn's that get lost too */
4925                                 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4926                                         send_forward_tsn(stcb, asoc);
4927                                 }
4928                         }
4929                 }
4930                 if (lchk) {
4931                         /* Assure a timer is up */
4932                         sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4933                             stcb->sctp_ep, stcb, lchk->whoTo);
4934                 }
4935         }
4936         if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4937                 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4938                     a_rwnd,
4939                     stcb->asoc.peers_rwnd,
4940                     stcb->asoc.total_flight,
4941                     stcb->asoc.total_output_queue_size);
4942         }
4943 }
4944
4945 void
4946 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4947 {
4948         /* Copy cum-ack */
4949         uint32_t cum_ack, a_rwnd;
4950
4951         cum_ack = ntohl(cp->cumulative_tsn_ack);
4952         /* Arrange so a_rwnd does NOT change */
4953         a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4954
4955         /* Now call the express sack handling */
4956         sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4957 }
4958
4959 static void
4960 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4961     struct sctp_stream_in *strmin)
4962 {
4963         struct sctp_queued_to_read *ctl, *nctl;
4964         struct sctp_association *asoc;
4965         uint16_t tt;
4966
4967         asoc = &stcb->asoc;
4968         tt = strmin->last_sequence_delivered;
4969         /*
4970          * First deliver anything prior to and including the stream no that
4971          * came in
4972          */
4973         TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4974                 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4975                         /* this is deliverable now */
4976                         TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4977                         /* subtract pending on streams */
4978                         asoc->size_on_all_streams -= ctl->length;
4979                         sctp_ucount_decr(asoc->cnt_on_all_streams);
4980                         /* deliver it to at least the delivery-q */
4981                         if (stcb->sctp_socket) {
4982                                 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4983                                 sctp_add_to_readq(stcb->sctp_ep, stcb,
4984                                     ctl,
4985                                     &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4986                         }
4987                 } else {
4988                         /* no more delivery now. */
4989                         break;
4990                 }
4991         }
4992         /*
4993          * now we must deliver things in queue the normal way  if any are
4994          * now ready.
4995          */
4996         tt = strmin->last_sequence_delivered + 1;
4997         TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4998                 if (tt == ctl->sinfo_ssn) {
4999                         /* this is deliverable now */
5000                         TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5001                         /* subtract pending on streams */
5002                         asoc->size_on_all_streams -= ctl->length;
5003                         sctp_ucount_decr(asoc->cnt_on_all_streams);
5004                         /* deliver it to at least the delivery-q */
5005                         strmin->last_sequence_delivered = ctl->sinfo_ssn;
5006                         if (stcb->sctp_socket) {
5007                                 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5008                                 sctp_add_to_readq(stcb->sctp_ep, stcb,
5009                                     ctl,
5010                                     &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5011
5012                         }
5013                         tt = strmin->last_sequence_delivered + 1;
5014                 } else {
5015                         break;
5016                 }
5017         }
5018 }
5019
5020 static void
5021 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5022     struct sctp_association *asoc,
5023     uint16_t stream, uint16_t seq)
5024 {
5025         struct sctp_tmit_chunk *chk, *nchk;
5026
5027         /* For each one on here see if we need to toss it */
5028         /*
5029          * For now large messages held on the reasmqueue that are complete
5030          * will be tossed too. We could in theory do more work to spin
5031          * through and stop after dumping one msg aka seeing the start of a
5032          * new msg at the head, and call the delivery function... to see if
5033          * it can be delivered... But for now we just dump everything on the
5034          * queue.
5035          */
5036         TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5037                 /*
5038                  * Do not toss it if on a different stream or marked for
5039                  * unordered delivery in which case the stream sequence
5040                  * number has no meaning.
5041                  */
5042                 if ((chk->rec.data.stream_number != stream) ||
5043                     ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5044                         continue;
5045                 }
5046                 if (chk->rec.data.stream_seq == seq) {
5047                         /* It needs to be tossed */
5048                         TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5049                         if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5050                                 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5051                                 asoc->str_of_pdapi = chk->rec.data.stream_number;
5052                                 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5053                                 asoc->fragment_flags = chk->rec.data.rcv_flags;
5054                         }
5055                         asoc->size_on_reasm_queue -= chk->send_size;
5056                         sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5057
5058                         /* Clear up any stream problem */
5059                         if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5060                             SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5061                                 /*
5062                                  * We must dump forward this streams
5063                                  * sequence number if the chunk is not
5064                                  * unordered that is being skipped. There is
5065                                  * a chance that if the peer does not
5066                                  * include the last fragment in its FWD-TSN
5067                                  * we WILL have a problem here since you
5068                                  * would have a partial chunk in queue that
5069                                  * may not be deliverable. Also if a Partial
5070                                  * delivery API as started the user may get
5071                                  * a partial chunk. The next read returning
5072                                  * a new chunk... really ugly but I see no
5073                                  * way around it! Maybe a notify??
5074                                  */
5075                                 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5076                         }
5077                         if (chk->data) {
5078                                 sctp_m_freem(chk->data);
5079                                 chk->data = NULL;
5080                         }
5081                         sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5082                 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5083                         /*
5084                          * If the stream_seq is > than the purging one, we
5085                          * are done
5086                          */
5087                         break;
5088                 }
5089         }
5090 }
5091
5092
5093 void
5094 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5095     struct sctp_forward_tsn_chunk *fwd,
5096     int *abort_flag, struct mbuf *m, int offset)
5097 {
5098         /* The pr-sctp fwd tsn */
5099         /*
5100          * here we will perform all the data receiver side steps for
5101          * processing FwdTSN, as required in by pr-sctp draft:
5102          * 
5103          * Assume we get FwdTSN(x):
5104          * 
5105          * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5106          * others we have 3) examine and update re-ordering queue on
5107          * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5108          * report where we are.
5109          */
5110         struct sctp_association *asoc;
5111         uint32_t new_cum_tsn, gap;
5112         unsigned int i, fwd_sz, m_size;
5113         uint32_t str_seq;
5114         struct sctp_stream_in *strm;
5115         struct sctp_tmit_chunk *chk, *nchk;
5116         struct sctp_queued_to_read *ctl, *sv;
5117
5118         asoc = &stcb->asoc;
5119         if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5120                 SCTPDBG(SCTP_DEBUG_INDATA1,
5121                     "Bad size too small/big fwd-tsn\n");
5122                 return;
5123         }
5124         m_size = (stcb->asoc.mapping_array_size << 3);
5125         /*************************************************************/
5126         /* 1. Here we update local cumTSN and shift the bitmap array */
5127         /*************************************************************/
5128         new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5129
5130         if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5131                 /* Already got there ... */
5132                 return;
5133         }
5134         /*
5135          * now we know the new TSN is more advanced, let's find the actual
5136          * gap
5137          */
5138         SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5139         asoc->cumulative_tsn = new_cum_tsn;
5140         if (gap >= m_size) {
5141                 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5142                         struct mbuf *op_err;
5143                         char msg[SCTP_DIAG_INFO_LEN];
5144
5145                         /*
5146                          * out of range (of single byte chunks in the rwnd I
5147                          * give out). This must be an attacker.
5148                          */
5149                         *abort_flag = 1;
5150                         snprintf(msg, sizeof(msg),
5151                             "New cum ack %8.8x too high, highest TSN %8.8x",
5152                             new_cum_tsn, asoc->highest_tsn_inside_map);
5153                         op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5154                         stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5155                         sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5156                         return;
5157                 }
5158                 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5159
5160                 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5161                 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5162                 asoc->highest_tsn_inside_map = new_cum_tsn;
5163
5164                 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5165                 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5166
5167                 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5168                         sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5169                 }
5170         } else {
5171                 SCTP_TCB_LOCK_ASSERT(stcb);
5172                 for (i = 0; i <= gap; i++) {
5173                         if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5174                             !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5175                                 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5176                                 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5177                                         asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5178                                 }
5179                         }
5180                 }
5181         }
5182         /*************************************************************/
5183         /* 2. Clear up re-assembly queue                             */
5184         /*************************************************************/
5185         /*
5186          * First service it if pd-api is up, just in case we can progress it
5187          * forward
5188          */
5189         if (asoc->fragmented_delivery_inprogress) {
5190                 sctp_service_reassembly(stcb, asoc);
5191         }
5192         /* For each one on here see if we need to toss it */
5193         /*
5194          * For now large messages held on the reasmqueue that are complete
5195          * will be tossed too. We could in theory do more work to spin
5196          * through and stop after dumping one msg aka seeing the start of a
5197          * new msg at the head, and call the delivery function... to see if
5198          * it can be delivered... But for now we just dump everything on the
5199          * queue.
5200          */
5201         TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5202                 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5203                         /* It needs to be tossed */
5204                         TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5205                         if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5206                                 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5207                                 asoc->str_of_pdapi = chk->rec.data.stream_number;
5208                                 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5209                                 asoc->fragment_flags = chk->rec.data.rcv_flags;
5210                         }
5211                         asoc->size_on_reasm_queue -= chk->send_size;
5212                         sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5213
5214                         /* Clear up any stream problem */
5215                         if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5216                             SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5217                                 /*
5218                                  * We must dump forward this streams
5219                                  * sequence number if the chunk is not
5220                                  * unordered that is being skipped. There is
5221                                  * a chance that if the peer does not
5222                                  * include the last fragment in its FWD-TSN
5223                                  * we WILL have a problem here since you
5224                                  * would have a partial chunk in queue that
5225                                  * may not be deliverable. Also if a Partial
5226                                  * delivery API as started the user may get
5227                                  * a partial chunk. The next read returning
5228                                  * a new chunk... really ugly but I see no
5229                                  * way around it! Maybe a notify??
5230                                  */
5231                                 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5232                         }
5233                         if (chk->data) {
5234                                 sctp_m_freem(chk->data);
5235                                 chk->data = NULL;
5236                         }
5237                         sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5238                 } else {
5239                         /*
5240                          * Ok we have gone beyond the end of the fwd-tsn's
5241                          * mark.
5242                          */
5243                         break;
5244                 }
5245         }
5246         /*******************************************************/
5247         /* 3. Update the PR-stream re-ordering queues and fix  */
5248         /* delivery issues as needed.                       */
5249         /*******************************************************/
5250         fwd_sz -= sizeof(*fwd);
5251         if (m && fwd_sz) {
5252                 /* New method. */
5253                 unsigned int num_str;
5254                 struct sctp_strseq *stseq, strseqbuf;
5255
5256                 offset += sizeof(*fwd);
5257
5258                 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5259                 num_str = fwd_sz / sizeof(struct sctp_strseq);
5260                 for (i = 0; i < num_str; i++) {
5261                         uint16_t st;
5262
5263                         stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5264                             sizeof(struct sctp_strseq),
5265                             (uint8_t *) & strseqbuf);
5266                         offset += sizeof(struct sctp_strseq);
5267                         if (stseq == NULL) {
5268                                 break;
5269                         }
5270                         /* Convert */
5271                         st = ntohs(stseq->stream);
5272                         stseq->stream = st;
5273                         st = ntohs(stseq->sequence);
5274                         stseq->sequence = st;
5275
5276                         /* now process */
5277
5278                         /*
5279                          * Ok we now look for the stream/seq on the read
5280                          * queue where its not all delivered. If we find it
5281                          * we transmute the read entry into a PDI_ABORTED.
5282                          */
5283                         if (stseq->stream >= asoc->streamincnt) {
5284                                 /* screwed up streams, stop!  */
5285                                 break;
5286                         }
5287                         if ((asoc->str_of_pdapi == stseq->stream) &&
5288                             (asoc->ssn_of_pdapi == stseq->sequence)) {
5289                                 /*
5290                                  * If this is the one we were partially
5291                                  * delivering now then we no longer are.
5292                                  * Note this will change with the reassembly
5293                                  * re-write.
5294                                  */
5295                                 asoc->fragmented_delivery_inprogress = 0;
5296                         }
5297                         sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5298                         TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5299                                 if ((ctl->sinfo_stream == stseq->stream) &&
5300                                     (ctl->sinfo_ssn == stseq->sequence)) {
5301                                         str_seq = (stseq->stream << 16) | stseq->sequence;
5302                                         ctl->end_added = 1;
5303                                         ctl->pdapi_aborted = 1;
5304                                         sv = stcb->asoc.control_pdapi;
5305                                         stcb->asoc.control_pdapi = ctl;
5306                                         sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5307                                             stcb,
5308                                             SCTP_PARTIAL_DELIVERY_ABORTED,
5309                                             (void *)&str_seq,
5310                                             SCTP_SO_NOT_LOCKED);
5311                                         stcb->asoc.control_pdapi = sv;
5312                                         break;
5313                                 } else if ((ctl->sinfo_stream == stseq->stream) &&
5314                                     SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5315                                         /* We are past our victim SSN */
5316                                         break;
5317                                 }
5318                         }
5319                         strm = &asoc->strmin[stseq->stream];
5320                         if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5321                                 /* Update the sequence number */
5322                                 strm->last_sequence_delivered = stseq->sequence;
5323                         }
5324                         /* now kick the stream the new way */
5325                         /* sa_ignore NO_NULL_CHK */
5326                         sctp_kick_prsctp_reorder_queue(stcb, strm);
5327                 }
5328                 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5329         }
5330         /*
5331          * Now slide thing forward.
5332          */
5333         sctp_slide_mapping_arrays(stcb);
5334
5335         if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5336                 /* now lets kick out and check for more fragmented delivery */
5337                 /* sa_ignore NO_NULL_CHK */
5338                 sctp_deliver_reasm_check(stcb, &stcb->asoc);
5339         }
5340 }