2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010-2012, by Michael Tuexen. All rights reserved.
5 * Copyright (c) 2010-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2010-2012, by Robin Seggelmann. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <netinet/sctp_pcb.h>
37 * Default simple round-robin algorithm.
38 * Just interates the streams in the order they appear.
42 sctp_ss_default_add(struct sctp_tcb *, struct sctp_association *,
43 struct sctp_stream_out *,
44 struct sctp_stream_queue_pending *);
47 sctp_ss_default_remove(struct sctp_tcb *, struct sctp_association *,
48 struct sctp_stream_out *,
49 struct sctp_stream_queue_pending *);
52 sctp_ss_default_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
56 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
58 asoc->ss_data.locked_on_sending = NULL;
59 asoc->ss_data.last_out_stream = NULL;
60 TAILQ_INIT(&asoc->ss_data.out.wheel);
62 * If there is data in the stream queues already, the scheduler of
63 * an existing association has been changed. We need to add all
64 * stream queues to the wheel.
66 for (i = 0; i < asoc->streamoutcnt; i++) {
67 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc,
75 sctp_ss_default_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
76 bool clear_values SCTP_UNUSED)
78 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
80 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
81 struct sctp_stream_out *strq;
83 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
84 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
85 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
86 strq->ss_params.scheduled = false;
88 asoc->ss_data.last_out_stream = NULL;
93 sctp_ss_default_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
95 if (with_strq != NULL) {
96 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
97 stcb->asoc.ss_data.locked_on_sending = strq;
99 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
100 stcb->asoc.ss_data.last_out_stream = strq;
103 strq->ss_params.scheduled = false;
108 sctp_ss_default_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
109 struct sctp_stream_out *strq,
110 struct sctp_stream_queue_pending *sp SCTP_UNUSED)
112 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
114 /* Add to wheel if not already on it and stream queue not empty */
115 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
116 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel,
117 strq, ss_params.ss.rr.next_spoke);
118 strq->ss_params.scheduled = true;
124 sctp_ss_default_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
126 return (TAILQ_EMPTY(&asoc->ss_data.out.wheel));
130 sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
131 struct sctp_stream_out *strq,
132 struct sctp_stream_queue_pending *sp SCTP_UNUSED)
134 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
137 * Remove from wheel if stream queue is empty and actually is on the
140 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
141 if (asoc->ss_data.last_out_stream == strq) {
142 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
144 ss_params.ss.rr.next_spoke);
145 if (asoc->ss_data.last_out_stream == NULL) {
146 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
149 if (asoc->ss_data.last_out_stream == strq) {
150 asoc->ss_data.last_out_stream = NULL;
153 if (asoc->ss_data.locked_on_sending == strq) {
154 asoc->ss_data.locked_on_sending = NULL;
156 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
157 strq->ss_params.scheduled = false;
162 static struct sctp_stream_out *
163 sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
164 struct sctp_association *asoc)
166 struct sctp_stream_out *strq, *strqt;
168 if (asoc->ss_data.locked_on_sending != NULL) {
169 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
170 ("locked_on_sending %p not scheduled",
171 (void *)asoc->ss_data.locked_on_sending));
172 return (asoc->ss_data.locked_on_sending);
174 strqt = asoc->ss_data.last_out_stream;
175 KASSERT(strqt == NULL || strqt->ss_params.scheduled,
176 ("last_out_stream %p not scheduled", (void *)strqt));
178 /* Find the next stream to use */
180 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
182 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
184 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
187 KASSERT(strq == NULL || strq->ss_params.scheduled,
188 ("strq %p not scheduled", (void *)strq));
191 * If CMT is off, we must validate that the stream in question has
192 * the first item pointed towards are network destination requested
193 * by the caller. Note that if we turn out to be locked to a stream
194 * (assigning TSN's then we must stop, since we cannot look for
195 * another stream with data to send to that destination). In CMT's
196 * case, by skipping this check, we will send one data packet
197 * towards the requested net.
199 if (net != NULL && strq != NULL &&
200 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
201 if (TAILQ_FIRST(&strq->outqueue) &&
202 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
203 TAILQ_FIRST(&strq->outqueue)->net != net) {
204 if (strq == asoc->ss_data.last_out_stream) {
216 sctp_ss_default_scheduled(struct sctp_tcb *stcb,
217 struct sctp_nets *net SCTP_UNUSED,
218 struct sctp_association *asoc,
219 struct sctp_stream_out *strq,
220 int moved_how_much SCTP_UNUSED)
222 struct sctp_stream_queue_pending *sp;
224 KASSERT(strq != NULL, ("strq is NULL"));
225 KASSERT(strq->ss_params.scheduled, ("strq %p is not scheduled", (void *)strq));
226 asoc->ss_data.last_out_stream = strq;
227 if (asoc->idata_supported == 0) {
228 sp = TAILQ_FIRST(&strq->outqueue);
229 if ((sp != NULL) && (sp->some_taken == 1)) {
230 asoc->ss_data.locked_on_sending = strq;
232 asoc->ss_data.locked_on_sending = NULL;
235 asoc->ss_data.locked_on_sending = NULL;
241 sctp_ss_default_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
242 struct sctp_association *asoc SCTP_UNUSED)
244 /* Nothing to be done here */
249 sctp_ss_default_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
250 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t *value SCTP_UNUSED)
252 /* Nothing to be done here */
257 sctp_ss_default_set_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
258 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t value SCTP_UNUSED)
260 /* Nothing to be done here */
265 sctp_ss_default_is_user_msgs_incomplete(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
267 struct sctp_stream_out *strq;
268 struct sctp_stream_queue_pending *sp;
270 if (asoc->stream_queue_cnt != 1) {
273 strq = asoc->ss_data.locked_on_sending;
277 sp = TAILQ_FIRST(&strq->outqueue);
281 return (sp->msg_is_complete == 0);
285 * Real round-robin algorithm.
286 * Always interates the streams in ascending order.
289 sctp_ss_rr_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
290 struct sctp_stream_out *strq,
291 struct sctp_stream_queue_pending *sp SCTP_UNUSED)
293 struct sctp_stream_out *strqt;
295 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
297 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
298 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
299 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
301 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
302 while (strqt != NULL && (strqt->sid < strq->sid)) {
303 strqt = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
306 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.rr.next_spoke);
308 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
311 strq->ss_params.scheduled = true;
317 * Real round-robin per packet algorithm.
318 * Always interates the streams in ascending order and
319 * only fills messages of the same stream in a packet.
321 static struct sctp_stream_out *
322 sctp_ss_rrp_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
323 struct sctp_association *asoc)
325 return (asoc->ss_data.last_out_stream);
329 sctp_ss_rrp_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
330 struct sctp_association *asoc)
332 struct sctp_stream_out *strq, *strqt;
334 strqt = asoc->ss_data.last_out_stream;
335 KASSERT(strqt == NULL || strqt->ss_params.scheduled,
336 ("last_out_stream %p not scheduled", (void *)strqt));
338 /* Find the next stream to use */
340 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
342 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
344 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
347 KASSERT(strq == NULL || strq->ss_params.scheduled,
348 ("strq %p not scheduled", (void *)strq));
351 * If CMT is off, we must validate that the stream in question has
352 * the first item pointed towards are network destination requested
353 * by the caller. Note that if we turn out to be locked to a stream
354 * (assigning TSN's then we must stop, since we cannot look for
355 * another stream with data to send to that destination). In CMT's
356 * case, by skipping this check, we will send one data packet
357 * towards the requested net.
359 if (net != NULL && strq != NULL &&
360 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
361 if (TAILQ_FIRST(&strq->outqueue) &&
362 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
363 TAILQ_FIRST(&strq->outqueue)->net != net) {
364 if (strq == asoc->ss_data.last_out_stream) {
372 asoc->ss_data.last_out_stream = strq;
377 * Priority algorithm.
378 * Always prefers streams based on their priority id.
381 sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
384 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
386 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
387 struct sctp_stream_out *strq;
389 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
390 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
392 strq->ss_params.ss.prio.priority = 0;
394 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
395 strq->ss_params.scheduled = false;
397 asoc->ss_data.last_out_stream = NULL;
402 sctp_ss_prio_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
404 if (with_strq != NULL) {
405 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
406 stcb->asoc.ss_data.locked_on_sending = strq;
408 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
409 stcb->asoc.ss_data.last_out_stream = strq;
412 strq->ss_params.scheduled = false;
413 if (with_strq != NULL) {
414 strq->ss_params.ss.prio.priority = with_strq->ss_params.ss.prio.priority;
416 strq->ss_params.ss.prio.priority = 0;
422 sctp_ss_prio_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
423 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
425 struct sctp_stream_out *strqt;
427 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
429 /* Add to wheel if not already on it and stream queue not empty */
430 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
431 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
432 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
434 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
435 while (strqt != NULL && strqt->ss_params.ss.prio.priority < strq->ss_params.ss.prio.priority) {
436 strqt = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
439 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.prio.next_spoke);
441 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
444 strq->ss_params.scheduled = true;
450 sctp_ss_prio_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
451 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
453 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
456 * Remove from wheel if stream queue is empty and actually is on the
459 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
460 if (asoc->ss_data.last_out_stream == strq) {
461 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
463 ss_params.ss.prio.next_spoke);
464 if (asoc->ss_data.last_out_stream == NULL) {
465 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
468 if (asoc->ss_data.last_out_stream == strq) {
469 asoc->ss_data.last_out_stream = NULL;
472 if (asoc->ss_data.locked_on_sending == strq) {
473 asoc->ss_data.locked_on_sending = NULL;
475 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
476 strq->ss_params.scheduled = false;
481 static struct sctp_stream_out *
482 sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
483 struct sctp_association *asoc)
485 struct sctp_stream_out *strq, *strqt, *strqn;
487 if (asoc->ss_data.locked_on_sending != NULL) {
488 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
489 ("locked_on_sending %p not scheduled",
490 (void *)asoc->ss_data.locked_on_sending));
491 return (asoc->ss_data.locked_on_sending);
493 strqt = asoc->ss_data.last_out_stream;
494 KASSERT(strqt == NULL || strqt->ss_params.scheduled,
495 ("last_out_stream %p not scheduled", (void *)strqt));
497 /* Find the next stream to use */
499 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
501 strqn = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
503 strqn->ss_params.ss.prio.priority == strqt->ss_params.ss.prio.priority) {
506 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
509 KASSERT(strq == NULL || strq->ss_params.scheduled,
510 ("strq %p not scheduled", (void *)strq));
513 * If CMT is off, we must validate that the stream in question has
514 * the first item pointed towards are network destination requested
515 * by the caller. Note that if we turn out to be locked to a stream
516 * (assigning TSN's then we must stop, since we cannot look for
517 * another stream with data to send to that destination). In CMT's
518 * case, by skipping this check, we will send one data packet
519 * towards the requested net.
521 if (net != NULL && strq != NULL &&
522 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
523 if (TAILQ_FIRST(&strq->outqueue) &&
524 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
525 TAILQ_FIRST(&strq->outqueue)->net != net) {
526 if (strq == asoc->ss_data.last_out_stream) {
538 sctp_ss_prio_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
539 struct sctp_stream_out *strq, uint16_t *value)
544 *value = strq->ss_params.ss.prio.priority;
549 sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc,
550 struct sctp_stream_out *strq, uint16_t value)
555 strq->ss_params.ss.prio.priority = value;
556 sctp_ss_prio_remove(stcb, asoc, strq, NULL);
557 sctp_ss_prio_add(stcb, asoc, strq, NULL);
562 * Fair bandwidth algorithm.
563 * Maintains an equal throughput per stream.
566 sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
569 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
571 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
572 struct sctp_stream_out *strq;
574 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
575 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
577 strq->ss_params.ss.fb.rounds = -1;
579 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
580 strq->ss_params.scheduled = false;
582 asoc->ss_data.last_out_stream = NULL;
587 sctp_ss_fb_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
589 if (with_strq != NULL) {
590 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
591 stcb->asoc.ss_data.locked_on_sending = strq;
593 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
594 stcb->asoc.ss_data.last_out_stream = strq;
597 strq->ss_params.scheduled = false;
598 if (with_strq != NULL) {
599 strq->ss_params.ss.fb.rounds = with_strq->ss_params.ss.fb.rounds;
601 strq->ss_params.ss.fb.rounds = -1;
607 sctp_ss_fb_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
608 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
610 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
612 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
613 if (strq->ss_params.ss.fb.rounds < 0)
614 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
615 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
616 strq->ss_params.scheduled = true;
622 sctp_ss_fb_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
623 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
625 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
628 * Remove from wheel if stream queue is empty and actually is on the
631 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
632 if (asoc->ss_data.last_out_stream == strq) {
633 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
635 ss_params.ss.fb.next_spoke);
636 if (asoc->ss_data.last_out_stream == NULL) {
637 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
640 if (asoc->ss_data.last_out_stream == strq) {
641 asoc->ss_data.last_out_stream = NULL;
644 if (asoc->ss_data.locked_on_sending == strq) {
645 asoc->ss_data.locked_on_sending = NULL;
647 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
648 strq->ss_params.scheduled = false;
653 static struct sctp_stream_out *
654 sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
655 struct sctp_association *asoc)
657 struct sctp_stream_out *strq = NULL, *strqt;
659 if (asoc->ss_data.locked_on_sending != NULL) {
660 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
661 ("locked_on_sending %p not scheduled",
662 (void *)asoc->ss_data.locked_on_sending));
663 return (asoc->ss_data.locked_on_sending);
665 if (asoc->ss_data.last_out_stream == NULL ||
666 TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) {
667 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
669 strqt = TAILQ_NEXT(asoc->ss_data.last_out_stream, ss_params.ss.fb.next_spoke);
672 if ((strqt != NULL) &&
673 ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) > 0) ||
674 (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0 &&
675 (net == NULL || (TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net == NULL) ||
676 (net != NULL && TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net != NULL &&
677 TAILQ_FIRST(&strqt->outqueue)->net == net))))) {
678 if ((strqt->ss_params.ss.fb.rounds >= 0) &&
680 (strqt->ss_params.ss.fb.rounds < strq->ss_params.ss.fb.rounds))) {
685 strqt = TAILQ_NEXT(strqt, ss_params.ss.fb.next_spoke);
687 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
689 } while (strqt != strq);
694 sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED,
695 struct sctp_association *asoc, struct sctp_stream_out *strq,
696 int moved_how_much SCTP_UNUSED)
698 struct sctp_stream_queue_pending *sp;
699 struct sctp_stream_out *strqt;
702 if (asoc->idata_supported == 0) {
703 sp = TAILQ_FIRST(&strq->outqueue);
704 if ((sp != NULL) && (sp->some_taken == 1)) {
705 asoc->ss_data.locked_on_sending = strq;
707 asoc->ss_data.locked_on_sending = NULL;
710 asoc->ss_data.locked_on_sending = NULL;
712 subtract = strq->ss_params.ss.fb.rounds;
713 TAILQ_FOREACH(strqt, &asoc->ss_data.out.wheel, ss_params.ss.fb.next_spoke) {
714 strqt->ss_params.ss.fb.rounds -= subtract;
715 if (strqt->ss_params.ss.fb.rounds < 0)
716 strqt->ss_params.ss.fb.rounds = 0;
718 if (TAILQ_FIRST(&strq->outqueue)) {
719 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
721 strq->ss_params.ss.fb.rounds = -1;
723 asoc->ss_data.last_out_stream = strq;
728 * First-come, first-serve algorithm.
729 * Maintains the order provided by the application.
732 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
733 struct sctp_stream_out *strq SCTP_UNUSED,
734 struct sctp_stream_queue_pending *sp);
737 sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
739 uint32_t x, n = 0, add_more = 1;
740 struct sctp_stream_queue_pending *sp;
743 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
745 TAILQ_INIT(&asoc->ss_data.out.list);
747 * If there is data in the stream queues already, the scheduler of
748 * an existing association has been changed. We can only cycle
749 * through the stream queues and add everything to the FCFS queue.
753 for (i = 0; i < asoc->streamoutcnt; i++) {
754 sp = TAILQ_FIRST(&asoc->strmout[i].outqueue);
756 /* Find n. message in current stream queue */
757 while (sp != NULL && x < n) {
758 sp = TAILQ_NEXT(sp, next);
762 sctp_ss_fcfs_add(stcb, asoc, &asoc->strmout[i], sp);
772 sctp_ss_fcfs_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
773 bool clear_values SCTP_UNUSED)
775 struct sctp_stream_queue_pending *sp;
777 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
779 while (!TAILQ_EMPTY(&asoc->ss_data.out.list)) {
780 sp = TAILQ_FIRST(&asoc->ss_data.out.list);
781 KASSERT(sp->scheduled, ("sp %p not scheduled", (void *)sp));
782 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
783 sp->scheduled = false;
785 asoc->ss_data.last_out_stream = NULL;
790 sctp_ss_fcfs_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
792 if (with_strq != NULL) {
793 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
794 stcb->asoc.ss_data.locked_on_sending = strq;
796 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
797 stcb->asoc.ss_data.last_out_stream = strq;
800 strq->ss_params.scheduled = false;
805 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
806 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
808 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
810 if (!sp->scheduled) {
811 TAILQ_INSERT_TAIL(&asoc->ss_data.out.list, sp, ss_next);
812 sp->scheduled = true;
818 sctp_ss_fcfs_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
820 return (TAILQ_EMPTY(&asoc->ss_data.out.list));
824 sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
825 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
827 SCTP_TCB_SEND_LOCK_ASSERT(stcb);
830 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
831 sp->scheduled = false;
836 static struct sctp_stream_out *
837 sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
838 struct sctp_association *asoc)
840 struct sctp_stream_out *strq;
841 struct sctp_stream_queue_pending *sp;
843 if (asoc->ss_data.locked_on_sending) {
844 return (asoc->ss_data.locked_on_sending);
846 sp = TAILQ_FIRST(&asoc->ss_data.out.list);
849 strq = &asoc->strmout[sp->sid];
855 * If CMT is off, we must validate that the stream in question has
856 * the first item pointed towards are network destination requested
857 * by the caller. Note that if we turn out to be locked to a stream
858 * (assigning TSN's then we must stop, since we cannot look for
859 * another stream with data to send to that destination). In CMT's
860 * case, by skipping this check, we will send one data packet
861 * towards the requested net.
863 if (net != NULL && strq != NULL &&
864 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
865 if (TAILQ_FIRST(&strq->outqueue) &&
866 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
867 TAILQ_FIRST(&strq->outqueue)->net != net) {
868 sp = TAILQ_NEXT(sp, ss_next);
876 sctp_ss_fcfs_scheduled(struct sctp_tcb *stcb,
877 struct sctp_nets *net SCTP_UNUSED,
878 struct sctp_association *asoc,
879 struct sctp_stream_out *strq,
880 int moved_how_much SCTP_UNUSED)
882 struct sctp_stream_queue_pending *sp;
884 KASSERT(strq != NULL, ("strq is NULL"));
885 asoc->ss_data.last_out_stream = strq;
886 if (asoc->idata_supported == 0) {
887 sp = TAILQ_FIRST(&strq->outqueue);
888 if ((sp != NULL) && (sp->some_taken == 1)) {
889 asoc->ss_data.locked_on_sending = strq;
891 asoc->ss_data.locked_on_sending = NULL;
894 asoc->ss_data.locked_on_sending = NULL;
899 const struct sctp_ss_functions sctp_ss_functions[] = {
900 /* SCTP_SS_DEFAULT */
902 .sctp_ss_init = sctp_ss_default_init,
903 .sctp_ss_clear = sctp_ss_default_clear,
904 .sctp_ss_init_stream = sctp_ss_default_init_stream,
905 .sctp_ss_add_to_stream = sctp_ss_default_add,
906 .sctp_ss_is_empty = sctp_ss_default_is_empty,
907 .sctp_ss_remove_from_stream = sctp_ss_default_remove,
908 .sctp_ss_select_stream = sctp_ss_default_select,
909 .sctp_ss_scheduled = sctp_ss_default_scheduled,
910 .sctp_ss_packet_done = sctp_ss_default_packet_done,
911 .sctp_ss_get_value = sctp_ss_default_get_value,
912 .sctp_ss_set_value = sctp_ss_default_set_value,
913 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
915 /* SCTP_SS_ROUND_ROBIN */
917 .sctp_ss_init = sctp_ss_default_init,
918 .sctp_ss_clear = sctp_ss_default_clear,
919 .sctp_ss_init_stream = sctp_ss_default_init_stream,
920 .sctp_ss_add_to_stream = sctp_ss_rr_add,
921 .sctp_ss_is_empty = sctp_ss_default_is_empty,
922 .sctp_ss_remove_from_stream = sctp_ss_default_remove,
923 .sctp_ss_select_stream = sctp_ss_default_select,
924 .sctp_ss_scheduled = sctp_ss_default_scheduled,
925 .sctp_ss_packet_done = sctp_ss_default_packet_done,
926 .sctp_ss_get_value = sctp_ss_default_get_value,
927 .sctp_ss_set_value = sctp_ss_default_set_value,
928 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
930 /* SCTP_SS_ROUND_ROBIN_PACKET */
932 .sctp_ss_init = sctp_ss_default_init,
933 .sctp_ss_clear = sctp_ss_default_clear,
934 .sctp_ss_init_stream = sctp_ss_default_init_stream,
935 .sctp_ss_add_to_stream = sctp_ss_rr_add,
936 .sctp_ss_is_empty = sctp_ss_default_is_empty,
937 .sctp_ss_remove_from_stream = sctp_ss_default_remove,
938 .sctp_ss_select_stream = sctp_ss_rrp_select,
939 .sctp_ss_scheduled = sctp_ss_default_scheduled,
940 .sctp_ss_packet_done = sctp_ss_rrp_packet_done,
941 .sctp_ss_get_value = sctp_ss_default_get_value,
942 .sctp_ss_set_value = sctp_ss_default_set_value,
943 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
945 /* SCTP_SS_PRIORITY */
947 .sctp_ss_init = sctp_ss_default_init,
948 .sctp_ss_clear = sctp_ss_prio_clear,
949 .sctp_ss_init_stream = sctp_ss_prio_init_stream,
950 .sctp_ss_add_to_stream = sctp_ss_prio_add,
951 .sctp_ss_is_empty = sctp_ss_default_is_empty,
952 .sctp_ss_remove_from_stream = sctp_ss_prio_remove,
953 .sctp_ss_select_stream = sctp_ss_prio_select,
954 .sctp_ss_scheduled = sctp_ss_default_scheduled,
955 .sctp_ss_packet_done = sctp_ss_default_packet_done,
956 .sctp_ss_get_value = sctp_ss_prio_get_value,
957 .sctp_ss_set_value = sctp_ss_prio_set_value,
958 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
960 /* SCTP_SS_FAIR_BANDWITH */
962 .sctp_ss_init = sctp_ss_default_init,
963 .sctp_ss_clear = sctp_ss_fb_clear,
964 .sctp_ss_init_stream = sctp_ss_fb_init_stream,
965 .sctp_ss_add_to_stream = sctp_ss_fb_add,
966 .sctp_ss_is_empty = sctp_ss_default_is_empty,
967 .sctp_ss_remove_from_stream = sctp_ss_fb_remove,
968 .sctp_ss_select_stream = sctp_ss_fb_select,
969 .sctp_ss_scheduled = sctp_ss_fb_scheduled,
970 .sctp_ss_packet_done = sctp_ss_default_packet_done,
971 .sctp_ss_get_value = sctp_ss_default_get_value,
972 .sctp_ss_set_value = sctp_ss_default_set_value,
973 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
975 /* SCTP_SS_FIRST_COME */
977 .sctp_ss_init = sctp_ss_fcfs_init,
978 .sctp_ss_clear = sctp_ss_fcfs_clear,
979 .sctp_ss_init_stream = sctp_ss_fcfs_init_stream,
980 .sctp_ss_add_to_stream = sctp_ss_fcfs_add,
981 .sctp_ss_is_empty = sctp_ss_fcfs_is_empty,
982 .sctp_ss_remove_from_stream = sctp_ss_fcfs_remove,
983 .sctp_ss_select_stream = sctp_ss_fcfs_select,
984 .sctp_ss_scheduled = sctp_ss_fcfs_scheduled,
985 .sctp_ss_packet_done = sctp_ss_default_packet_done,
986 .sctp_ss_get_value = sctp_ss_default_get_value,
987 .sctp_ss_set_value = sctp_ss_default_set_value,
988 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete