2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010-2012, by Michael Tuexen. All rights reserved.
5 * Copyright (c) 2010-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2010-2012, by Robin Seggelmann. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <netinet/sctp_pcb.h>
37 * Default simple round-robin algorithm.
38 * Just interates the streams in the order they appear.
42 sctp_ss_default_add(struct sctp_tcb *, struct sctp_association *,
43 struct sctp_stream_out *,
44 struct sctp_stream_queue_pending *);
47 sctp_ss_default_remove(struct sctp_tcb *, struct sctp_association *,
48 struct sctp_stream_out *,
49 struct sctp_stream_queue_pending *);
52 sctp_ss_default_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
56 SCTP_TCB_LOCK_ASSERT(stcb);
58 asoc->ss_data.locked_on_sending = NULL;
59 asoc->ss_data.last_out_stream = NULL;
60 TAILQ_INIT(&asoc->ss_data.out.wheel);
62 * If there is data in the stream queues already, the scheduler of
63 * an existing association has been changed. We need to add all
64 * stream queues to the wheel.
66 for (i = 0; i < asoc->streamoutcnt; i++) {
67 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc,
75 sctp_ss_default_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
76 bool clear_values SCTP_UNUSED)
78 SCTP_TCB_LOCK_ASSERT(stcb);
80 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
81 struct sctp_stream_out *strq;
83 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
84 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
85 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
86 strq->ss_params.scheduled = false;
88 asoc->ss_data.last_out_stream = NULL;
93 sctp_ss_default_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
95 SCTP_TCB_LOCK_ASSERT(stcb);
97 if (with_strq != NULL) {
98 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
99 stcb->asoc.ss_data.locked_on_sending = strq;
101 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
102 stcb->asoc.ss_data.last_out_stream = strq;
105 strq->ss_params.scheduled = false;
110 sctp_ss_default_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
111 struct sctp_stream_out *strq,
112 struct sctp_stream_queue_pending *sp SCTP_UNUSED)
114 SCTP_TCB_LOCK_ASSERT(stcb);
116 /* Add to wheel if not already on it and stream queue not empty */
117 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
118 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel,
119 strq, ss_params.ss.rr.next_spoke);
120 strq->ss_params.scheduled = true;
126 sctp_ss_default_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
128 SCTP_TCB_LOCK_ASSERT(stcb);
130 return (TAILQ_EMPTY(&asoc->ss_data.out.wheel));
134 sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
135 struct sctp_stream_out *strq,
136 struct sctp_stream_queue_pending *sp SCTP_UNUSED)
138 SCTP_TCB_LOCK_ASSERT(stcb);
141 * Remove from wheel if stream queue is empty and actually is on the
144 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
145 if (asoc->ss_data.last_out_stream == strq) {
146 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
148 ss_params.ss.rr.next_spoke);
149 if (asoc->ss_data.last_out_stream == NULL) {
150 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
153 if (asoc->ss_data.last_out_stream == strq) {
154 asoc->ss_data.last_out_stream = NULL;
157 if (asoc->ss_data.locked_on_sending == strq) {
158 asoc->ss_data.locked_on_sending = NULL;
160 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
161 strq->ss_params.scheduled = false;
166 static struct sctp_stream_out *
167 sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
168 struct sctp_association *asoc)
170 struct sctp_stream_out *strq, *strqt;
172 SCTP_TCB_LOCK_ASSERT(stcb);
174 if (asoc->ss_data.locked_on_sending != NULL) {
175 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
176 ("locked_on_sending %p not scheduled",
177 (void *)asoc->ss_data.locked_on_sending));
178 return (asoc->ss_data.locked_on_sending);
180 strqt = asoc->ss_data.last_out_stream;
181 KASSERT(strqt == NULL || strqt->ss_params.scheduled,
182 ("last_out_stream %p not scheduled", (void *)strqt));
184 /* Find the next stream to use */
186 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
188 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
190 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
193 KASSERT(strq == NULL || strq->ss_params.scheduled,
194 ("strq %p not scheduled", (void *)strq));
197 * If CMT is off, we must validate that the stream in question has
198 * the first item pointed towards are network destination requested
199 * by the caller. Note that if we turn out to be locked to a stream
200 * (assigning TSN's then we must stop, since we cannot look for
201 * another stream with data to send to that destination). In CMT's
202 * case, by skipping this check, we will send one data packet
203 * towards the requested net.
205 if (net != NULL && strq != NULL &&
206 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
207 if (TAILQ_FIRST(&strq->outqueue) &&
208 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
209 TAILQ_FIRST(&strq->outqueue)->net != net) {
210 if (strq == asoc->ss_data.last_out_stream) {
222 sctp_ss_default_scheduled(struct sctp_tcb *stcb,
223 struct sctp_nets *net SCTP_UNUSED,
224 struct sctp_association *asoc,
225 struct sctp_stream_out *strq,
226 int moved_how_much SCTP_UNUSED)
228 struct sctp_stream_queue_pending *sp;
230 KASSERT(strq != NULL, ("strq is NULL"));
231 KASSERT(strq->ss_params.scheduled, ("strq %p is not scheduled", (void *)strq));
232 SCTP_TCB_LOCK_ASSERT(stcb);
234 asoc->ss_data.last_out_stream = strq;
235 if (asoc->idata_supported == 0) {
236 sp = TAILQ_FIRST(&strq->outqueue);
237 if ((sp != NULL) && (sp->some_taken == 1)) {
238 asoc->ss_data.locked_on_sending = strq;
240 asoc->ss_data.locked_on_sending = NULL;
243 asoc->ss_data.locked_on_sending = NULL;
249 sctp_ss_default_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
250 struct sctp_association *asoc SCTP_UNUSED)
252 SCTP_TCB_LOCK_ASSERT(stcb);
254 /* Nothing to be done here */
259 sctp_ss_default_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
260 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t *value SCTP_UNUSED)
262 SCTP_TCB_LOCK_ASSERT(stcb);
264 /* Nothing to be done here */
269 sctp_ss_default_set_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
270 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t value SCTP_UNUSED)
272 SCTP_TCB_LOCK_ASSERT(stcb);
274 /* Nothing to be done here */
279 sctp_ss_default_is_user_msgs_incomplete(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
281 struct sctp_stream_out *strq;
282 struct sctp_stream_queue_pending *sp;
284 SCTP_TCB_LOCK_ASSERT(stcb);
286 if (asoc->stream_queue_cnt != 1) {
289 strq = asoc->ss_data.locked_on_sending;
293 sp = TAILQ_FIRST(&strq->outqueue);
297 return (sp->msg_is_complete == 0);
301 * Real round-robin algorithm.
302 * Always interates the streams in ascending order.
305 sctp_ss_rr_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
306 struct sctp_stream_out *strq,
307 struct sctp_stream_queue_pending *sp SCTP_UNUSED)
309 struct sctp_stream_out *strqt;
311 SCTP_TCB_LOCK_ASSERT(stcb);
313 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
314 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
315 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
317 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
318 while (strqt != NULL && (strqt->sid < strq->sid)) {
319 strqt = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
322 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.rr.next_spoke);
324 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
327 strq->ss_params.scheduled = true;
333 * Real round-robin per packet algorithm.
334 * Always interates the streams in ascending order and
335 * only fills messages of the same stream in a packet.
337 static struct sctp_stream_out *
338 sctp_ss_rrp_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
339 struct sctp_association *asoc)
341 SCTP_TCB_LOCK_ASSERT(stcb);
343 return (asoc->ss_data.last_out_stream);
347 sctp_ss_rrp_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
348 struct sctp_association *asoc)
350 struct sctp_stream_out *strq, *strqt;
352 SCTP_TCB_LOCK_ASSERT(stcb);
354 strqt = asoc->ss_data.last_out_stream;
355 KASSERT(strqt == NULL || strqt->ss_params.scheduled,
356 ("last_out_stream %p not scheduled", (void *)strqt));
358 /* Find the next stream to use */
360 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
362 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
364 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
367 KASSERT(strq == NULL || strq->ss_params.scheduled,
368 ("strq %p not scheduled", (void *)strq));
371 * If CMT is off, we must validate that the stream in question has
372 * the first item pointed towards are network destination requested
373 * by the caller. Note that if we turn out to be locked to a stream
374 * (assigning TSN's then we must stop, since we cannot look for
375 * another stream with data to send to that destination). In CMT's
376 * case, by skipping this check, we will send one data packet
377 * towards the requested net.
379 if (net != NULL && strq != NULL &&
380 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
381 if (TAILQ_FIRST(&strq->outqueue) &&
382 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
383 TAILQ_FIRST(&strq->outqueue)->net != net) {
384 if (strq == asoc->ss_data.last_out_stream) {
392 asoc->ss_data.last_out_stream = strq;
397 * Priority algorithm.
398 * Always prefers streams based on their priority id.
401 sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
404 SCTP_TCB_LOCK_ASSERT(stcb);
406 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
407 struct sctp_stream_out *strq;
409 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
410 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
412 strq->ss_params.ss.prio.priority = 0;
414 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
415 strq->ss_params.scheduled = false;
417 asoc->ss_data.last_out_stream = NULL;
422 sctp_ss_prio_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
424 SCTP_TCB_LOCK_ASSERT(stcb);
426 if (with_strq != NULL) {
427 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
428 stcb->asoc.ss_data.locked_on_sending = strq;
430 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
431 stcb->asoc.ss_data.last_out_stream = strq;
434 strq->ss_params.scheduled = false;
435 if (with_strq != NULL) {
436 strq->ss_params.ss.prio.priority = with_strq->ss_params.ss.prio.priority;
438 strq->ss_params.ss.prio.priority = 0;
444 sctp_ss_prio_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
445 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
447 struct sctp_stream_out *strqt;
449 SCTP_TCB_LOCK_ASSERT(stcb);
451 /* Add to wheel if not already on it and stream queue not empty */
452 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
453 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
454 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
456 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
457 while (strqt != NULL && strqt->ss_params.ss.prio.priority < strq->ss_params.ss.prio.priority) {
458 strqt = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
461 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.prio.next_spoke);
463 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
466 strq->ss_params.scheduled = true;
472 sctp_ss_prio_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
473 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
475 SCTP_TCB_LOCK_ASSERT(stcb);
478 * Remove from wheel if stream queue is empty and actually is on the
481 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
482 if (asoc->ss_data.last_out_stream == strq) {
483 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
485 ss_params.ss.prio.next_spoke);
486 if (asoc->ss_data.last_out_stream == NULL) {
487 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
490 if (asoc->ss_data.last_out_stream == strq) {
491 asoc->ss_data.last_out_stream = NULL;
494 if (asoc->ss_data.locked_on_sending == strq) {
495 asoc->ss_data.locked_on_sending = NULL;
497 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
498 strq->ss_params.scheduled = false;
503 static struct sctp_stream_out *
504 sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
505 struct sctp_association *asoc)
507 struct sctp_stream_out *strq, *strqt, *strqn;
509 SCTP_TCB_LOCK_ASSERT(stcb);
511 if (asoc->ss_data.locked_on_sending != NULL) {
512 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
513 ("locked_on_sending %p not scheduled",
514 (void *)asoc->ss_data.locked_on_sending));
515 return (asoc->ss_data.locked_on_sending);
517 strqt = asoc->ss_data.last_out_stream;
518 KASSERT(strqt == NULL || strqt->ss_params.scheduled,
519 ("last_out_stream %p not scheduled", (void *)strqt));
521 /* Find the next stream to use */
523 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
525 strqn = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
527 strqn->ss_params.ss.prio.priority == strqt->ss_params.ss.prio.priority) {
530 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
533 KASSERT(strq == NULL || strq->ss_params.scheduled,
534 ("strq %p not scheduled", (void *)strq));
537 * If CMT is off, we must validate that the stream in question has
538 * the first item pointed towards are network destination requested
539 * by the caller. Note that if we turn out to be locked to a stream
540 * (assigning TSN's then we must stop, since we cannot look for
541 * another stream with data to send to that destination). In CMT's
542 * case, by skipping this check, we will send one data packet
543 * towards the requested net.
545 if (net != NULL && strq != NULL &&
546 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
547 if (TAILQ_FIRST(&strq->outqueue) &&
548 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
549 TAILQ_FIRST(&strq->outqueue)->net != net) {
550 if (strq == asoc->ss_data.last_out_stream) {
562 sctp_ss_prio_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
563 struct sctp_stream_out *strq, uint16_t *value)
565 SCTP_TCB_LOCK_ASSERT(stcb);
570 *value = strq->ss_params.ss.prio.priority;
575 sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc,
576 struct sctp_stream_out *strq, uint16_t value)
578 SCTP_TCB_LOCK_ASSERT(stcb);
583 strq->ss_params.ss.prio.priority = value;
584 sctp_ss_prio_remove(stcb, asoc, strq, NULL);
585 sctp_ss_prio_add(stcb, asoc, strq, NULL);
590 * Fair bandwidth algorithm.
591 * Maintains an equal throughput per stream.
594 sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
597 SCTP_TCB_LOCK_ASSERT(stcb);
599 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
600 struct sctp_stream_out *strq;
602 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
603 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
605 strq->ss_params.ss.fb.rounds = -1;
607 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
608 strq->ss_params.scheduled = false;
610 asoc->ss_data.last_out_stream = NULL;
615 sctp_ss_fb_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
617 SCTP_TCB_LOCK_ASSERT(stcb);
619 if (with_strq != NULL) {
620 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
621 stcb->asoc.ss_data.locked_on_sending = strq;
623 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
624 stcb->asoc.ss_data.last_out_stream = strq;
627 strq->ss_params.scheduled = false;
628 if (with_strq != NULL) {
629 strq->ss_params.ss.fb.rounds = with_strq->ss_params.ss.fb.rounds;
631 strq->ss_params.ss.fb.rounds = -1;
637 sctp_ss_fb_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
638 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
640 SCTP_TCB_LOCK_ASSERT(stcb);
642 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
643 if (strq->ss_params.ss.fb.rounds < 0)
644 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
645 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
646 strq->ss_params.scheduled = true;
652 sctp_ss_fb_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
653 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
655 SCTP_TCB_LOCK_ASSERT(stcb);
658 * Remove from wheel if stream queue is empty and actually is on the
661 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
662 if (asoc->ss_data.last_out_stream == strq) {
663 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
665 ss_params.ss.fb.next_spoke);
666 if (asoc->ss_data.last_out_stream == NULL) {
667 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
670 if (asoc->ss_data.last_out_stream == strq) {
671 asoc->ss_data.last_out_stream = NULL;
674 if (asoc->ss_data.locked_on_sending == strq) {
675 asoc->ss_data.locked_on_sending = NULL;
677 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
678 strq->ss_params.scheduled = false;
683 static struct sctp_stream_out *
684 sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
685 struct sctp_association *asoc)
687 struct sctp_stream_out *strq = NULL, *strqt;
689 SCTP_TCB_LOCK_ASSERT(stcb);
691 if (asoc->ss_data.locked_on_sending != NULL) {
692 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
693 ("locked_on_sending %p not scheduled",
694 (void *)asoc->ss_data.locked_on_sending));
695 return (asoc->ss_data.locked_on_sending);
697 if (asoc->ss_data.last_out_stream == NULL ||
698 TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) {
699 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
701 strqt = TAILQ_NEXT(asoc->ss_data.last_out_stream, ss_params.ss.fb.next_spoke);
704 if ((strqt != NULL) &&
705 ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) > 0) ||
706 (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0 &&
707 (net == NULL || (TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net == NULL) ||
708 (net != NULL && TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net != NULL &&
709 TAILQ_FIRST(&strqt->outqueue)->net == net))))) {
710 if ((strqt->ss_params.ss.fb.rounds >= 0) &&
712 (strqt->ss_params.ss.fb.rounds < strq->ss_params.ss.fb.rounds))) {
717 strqt = TAILQ_NEXT(strqt, ss_params.ss.fb.next_spoke);
719 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
721 } while (strqt != strq);
726 sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED,
727 struct sctp_association *asoc, struct sctp_stream_out *strq,
728 int moved_how_much SCTP_UNUSED)
730 struct sctp_stream_queue_pending *sp;
731 struct sctp_stream_out *strqt;
734 SCTP_TCB_LOCK_ASSERT(stcb);
736 if (asoc->idata_supported == 0) {
737 sp = TAILQ_FIRST(&strq->outqueue);
738 if ((sp != NULL) && (sp->some_taken == 1)) {
739 asoc->ss_data.locked_on_sending = strq;
741 asoc->ss_data.locked_on_sending = NULL;
744 asoc->ss_data.locked_on_sending = NULL;
746 subtract = strq->ss_params.ss.fb.rounds;
747 TAILQ_FOREACH(strqt, &asoc->ss_data.out.wheel, ss_params.ss.fb.next_spoke) {
748 strqt->ss_params.ss.fb.rounds -= subtract;
749 if (strqt->ss_params.ss.fb.rounds < 0)
750 strqt->ss_params.ss.fb.rounds = 0;
752 if (TAILQ_FIRST(&strq->outqueue)) {
753 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
755 strq->ss_params.ss.fb.rounds = -1;
757 asoc->ss_data.last_out_stream = strq;
762 * First-come, first-serve algorithm.
763 * Maintains the order provided by the application.
766 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
767 struct sctp_stream_out *strq SCTP_UNUSED,
768 struct sctp_stream_queue_pending *sp);
771 sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
773 uint32_t x, n = 0, add_more = 1;
774 struct sctp_stream_queue_pending *sp;
777 SCTP_TCB_LOCK_ASSERT(stcb);
779 TAILQ_INIT(&asoc->ss_data.out.list);
781 * If there is data in the stream queues already, the scheduler of
782 * an existing association has been changed. We can only cycle
783 * through the stream queues and add everything to the FCFS queue.
787 for (i = 0; i < asoc->streamoutcnt; i++) {
788 sp = TAILQ_FIRST(&asoc->strmout[i].outqueue);
790 /* Find n. message in current stream queue */
791 while (sp != NULL && x < n) {
792 sp = TAILQ_NEXT(sp, next);
796 sctp_ss_fcfs_add(stcb, asoc, &asoc->strmout[i], sp);
806 sctp_ss_fcfs_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
807 bool clear_values SCTP_UNUSED)
809 struct sctp_stream_queue_pending *sp;
811 SCTP_TCB_LOCK_ASSERT(stcb);
813 while (!TAILQ_EMPTY(&asoc->ss_data.out.list)) {
814 sp = TAILQ_FIRST(&asoc->ss_data.out.list);
815 KASSERT(sp->scheduled, ("sp %p not scheduled", (void *)sp));
816 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
817 sp->scheduled = false;
819 asoc->ss_data.last_out_stream = NULL;
824 sctp_ss_fcfs_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
826 SCTP_TCB_LOCK_ASSERT(stcb);
828 if (with_strq != NULL) {
829 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
830 stcb->asoc.ss_data.locked_on_sending = strq;
832 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
833 stcb->asoc.ss_data.last_out_stream = strq;
836 strq->ss_params.scheduled = false;
841 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
842 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
844 SCTP_TCB_LOCK_ASSERT(stcb);
846 if (!sp->scheduled) {
847 TAILQ_INSERT_TAIL(&asoc->ss_data.out.list, sp, ss_next);
848 sp->scheduled = true;
854 sctp_ss_fcfs_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
856 SCTP_TCB_LOCK_ASSERT(stcb);
858 return (TAILQ_EMPTY(&asoc->ss_data.out.list));
862 sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
863 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
865 SCTP_TCB_LOCK_ASSERT(stcb);
868 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
869 sp->scheduled = false;
874 static struct sctp_stream_out *
875 sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
876 struct sctp_association *asoc)
878 struct sctp_stream_out *strq;
879 struct sctp_stream_queue_pending *sp;
881 SCTP_TCB_LOCK_ASSERT(stcb);
883 if (asoc->ss_data.locked_on_sending) {
884 return (asoc->ss_data.locked_on_sending);
886 sp = TAILQ_FIRST(&asoc->ss_data.out.list);
889 strq = &asoc->strmout[sp->sid];
895 * If CMT is off, we must validate that the stream in question has
896 * the first item pointed towards are network destination requested
897 * by the caller. Note that if we turn out to be locked to a stream
898 * (assigning TSN's then we must stop, since we cannot look for
899 * another stream with data to send to that destination). In CMT's
900 * case, by skipping this check, we will send one data packet
901 * towards the requested net.
903 if (net != NULL && strq != NULL &&
904 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
905 if (TAILQ_FIRST(&strq->outqueue) &&
906 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
907 TAILQ_FIRST(&strq->outqueue)->net != net) {
908 sp = TAILQ_NEXT(sp, ss_next);
916 sctp_ss_fcfs_scheduled(struct sctp_tcb *stcb,
917 struct sctp_nets *net SCTP_UNUSED,
918 struct sctp_association *asoc,
919 struct sctp_stream_out *strq,
920 int moved_how_much SCTP_UNUSED)
922 struct sctp_stream_queue_pending *sp;
924 KASSERT(strq != NULL, ("strq is NULL"));
925 asoc->ss_data.last_out_stream = strq;
926 if (asoc->idata_supported == 0) {
927 sp = TAILQ_FIRST(&strq->outqueue);
928 if ((sp != NULL) && (sp->some_taken == 1)) {
929 asoc->ss_data.locked_on_sending = strq;
931 asoc->ss_data.locked_on_sending = NULL;
934 asoc->ss_data.locked_on_sending = NULL;
939 const struct sctp_ss_functions sctp_ss_functions[] = {
940 /* SCTP_SS_DEFAULT */
942 .sctp_ss_init = sctp_ss_default_init,
943 .sctp_ss_clear = sctp_ss_default_clear,
944 .sctp_ss_init_stream = sctp_ss_default_init_stream,
945 .sctp_ss_add_to_stream = sctp_ss_default_add,
946 .sctp_ss_is_empty = sctp_ss_default_is_empty,
947 .sctp_ss_remove_from_stream = sctp_ss_default_remove,
948 .sctp_ss_select_stream = sctp_ss_default_select,
949 .sctp_ss_scheduled = sctp_ss_default_scheduled,
950 .sctp_ss_packet_done = sctp_ss_default_packet_done,
951 .sctp_ss_get_value = sctp_ss_default_get_value,
952 .sctp_ss_set_value = sctp_ss_default_set_value,
953 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
955 /* SCTP_SS_ROUND_ROBIN */
957 .sctp_ss_init = sctp_ss_default_init,
958 .sctp_ss_clear = sctp_ss_default_clear,
959 .sctp_ss_init_stream = sctp_ss_default_init_stream,
960 .sctp_ss_add_to_stream = sctp_ss_rr_add,
961 .sctp_ss_is_empty = sctp_ss_default_is_empty,
962 .sctp_ss_remove_from_stream = sctp_ss_default_remove,
963 .sctp_ss_select_stream = sctp_ss_default_select,
964 .sctp_ss_scheduled = sctp_ss_default_scheduled,
965 .sctp_ss_packet_done = sctp_ss_default_packet_done,
966 .sctp_ss_get_value = sctp_ss_default_get_value,
967 .sctp_ss_set_value = sctp_ss_default_set_value,
968 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
970 /* SCTP_SS_ROUND_ROBIN_PACKET */
972 .sctp_ss_init = sctp_ss_default_init,
973 .sctp_ss_clear = sctp_ss_default_clear,
974 .sctp_ss_init_stream = sctp_ss_default_init_stream,
975 .sctp_ss_add_to_stream = sctp_ss_rr_add,
976 .sctp_ss_is_empty = sctp_ss_default_is_empty,
977 .sctp_ss_remove_from_stream = sctp_ss_default_remove,
978 .sctp_ss_select_stream = sctp_ss_rrp_select,
979 .sctp_ss_scheduled = sctp_ss_default_scheduled,
980 .sctp_ss_packet_done = sctp_ss_rrp_packet_done,
981 .sctp_ss_get_value = sctp_ss_default_get_value,
982 .sctp_ss_set_value = sctp_ss_default_set_value,
983 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
985 /* SCTP_SS_PRIORITY */
987 .sctp_ss_init = sctp_ss_default_init,
988 .sctp_ss_clear = sctp_ss_prio_clear,
989 .sctp_ss_init_stream = sctp_ss_prio_init_stream,
990 .sctp_ss_add_to_stream = sctp_ss_prio_add,
991 .sctp_ss_is_empty = sctp_ss_default_is_empty,
992 .sctp_ss_remove_from_stream = sctp_ss_prio_remove,
993 .sctp_ss_select_stream = sctp_ss_prio_select,
994 .sctp_ss_scheduled = sctp_ss_default_scheduled,
995 .sctp_ss_packet_done = sctp_ss_default_packet_done,
996 .sctp_ss_get_value = sctp_ss_prio_get_value,
997 .sctp_ss_set_value = sctp_ss_prio_set_value,
998 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
1000 /* SCTP_SS_FAIR_BANDWITH */
1002 .sctp_ss_init = sctp_ss_default_init,
1003 .sctp_ss_clear = sctp_ss_fb_clear,
1004 .sctp_ss_init_stream = sctp_ss_fb_init_stream,
1005 .sctp_ss_add_to_stream = sctp_ss_fb_add,
1006 .sctp_ss_is_empty = sctp_ss_default_is_empty,
1007 .sctp_ss_remove_from_stream = sctp_ss_fb_remove,
1008 .sctp_ss_select_stream = sctp_ss_fb_select,
1009 .sctp_ss_scheduled = sctp_ss_fb_scheduled,
1010 .sctp_ss_packet_done = sctp_ss_default_packet_done,
1011 .sctp_ss_get_value = sctp_ss_default_get_value,
1012 .sctp_ss_set_value = sctp_ss_default_set_value,
1013 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
1015 /* SCTP_SS_FIRST_COME */
1017 .sctp_ss_init = sctp_ss_fcfs_init,
1018 .sctp_ss_clear = sctp_ss_fcfs_clear,
1019 .sctp_ss_init_stream = sctp_ss_fcfs_init_stream,
1020 .sctp_ss_add_to_stream = sctp_ss_fcfs_add,
1021 .sctp_ss_is_empty = sctp_ss_fcfs_is_empty,
1022 .sctp_ss_remove_from_stream = sctp_ss_fcfs_remove,
1023 .sctp_ss_select_stream = sctp_ss_fcfs_select,
1024 .sctp_ss_scheduled = sctp_ss_fcfs_scheduled,
1025 .sctp_ss_packet_done = sctp_ss_default_packet_done,
1026 .sctp_ss_get_value = sctp_ss_default_get_value,
1027 .sctp_ss_set_value = sctp_ss_default_set_value,
1028 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete