2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2010-2012, by Michael Tuexen. All rights reserved.
5 * Copyright (c) 2010-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2010-2012, by Robin Seggelmann. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 #include <netinet/sctp_os.h>
32 #include <netinet/sctp_pcb.h>
35 * Default simple round-robin algorithm.
36 * Just iterates the streams in the order they appear.
40 sctp_ss_default_add(struct sctp_tcb *, struct sctp_association *,
41 struct sctp_stream_out *,
42 struct sctp_stream_queue_pending *);
45 sctp_ss_default_remove(struct sctp_tcb *, struct sctp_association *,
46 struct sctp_stream_out *,
47 struct sctp_stream_queue_pending *);
50 sctp_ss_default_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
54 SCTP_TCB_LOCK_ASSERT(stcb);
56 asoc->ss_data.locked_on_sending = NULL;
57 asoc->ss_data.last_out_stream = NULL;
58 TAILQ_INIT(&asoc->ss_data.out.wheel);
60 * If there is data in the stream queues already, the scheduler of
61 * an existing association has been changed. We need to add all
62 * stream queues to the wheel.
64 for (i = 0; i < asoc->streamoutcnt; i++) {
65 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc,
73 sctp_ss_default_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
74 bool clear_values SCTP_UNUSED)
76 SCTP_TCB_LOCK_ASSERT(stcb);
78 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
79 struct sctp_stream_out *strq;
81 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
82 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
83 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
84 strq->ss_params.scheduled = false;
86 asoc->ss_data.last_out_stream = NULL;
91 sctp_ss_default_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
93 SCTP_TCB_LOCK_ASSERT(stcb);
95 if (with_strq != NULL) {
96 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
97 stcb->asoc.ss_data.locked_on_sending = strq;
99 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
100 stcb->asoc.ss_data.last_out_stream = strq;
103 strq->ss_params.scheduled = false;
108 sctp_ss_default_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
109 struct sctp_stream_out *strq,
110 struct sctp_stream_queue_pending *sp SCTP_UNUSED)
112 SCTP_TCB_LOCK_ASSERT(stcb);
114 /* Add to wheel if not already on it and stream queue not empty */
115 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
116 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel,
117 strq, ss_params.ss.rr.next_spoke);
118 strq->ss_params.scheduled = true;
124 sctp_ss_default_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
126 SCTP_TCB_LOCK_ASSERT(stcb);
128 return (TAILQ_EMPTY(&asoc->ss_data.out.wheel));
132 sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
133 struct sctp_stream_out *strq,
134 struct sctp_stream_queue_pending *sp SCTP_UNUSED)
136 SCTP_TCB_LOCK_ASSERT(stcb);
139 * Remove from wheel if stream queue is empty and actually is on the
142 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
143 if (asoc->ss_data.last_out_stream == strq) {
144 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
146 ss_params.ss.rr.next_spoke);
147 if (asoc->ss_data.last_out_stream == NULL) {
148 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
151 if (asoc->ss_data.last_out_stream == strq) {
152 asoc->ss_data.last_out_stream = NULL;
155 if (asoc->ss_data.locked_on_sending == strq) {
156 asoc->ss_data.locked_on_sending = NULL;
158 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
159 strq->ss_params.scheduled = false;
164 static struct sctp_stream_out *
165 sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
166 struct sctp_association *asoc)
168 struct sctp_stream_out *strq, *strqt;
170 SCTP_TCB_LOCK_ASSERT(stcb);
172 if (asoc->ss_data.locked_on_sending != NULL) {
173 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
174 ("locked_on_sending %p not scheduled",
175 (void *)asoc->ss_data.locked_on_sending));
176 return (asoc->ss_data.locked_on_sending);
178 strqt = asoc->ss_data.last_out_stream;
179 KASSERT(strqt == NULL || strqt->ss_params.scheduled,
180 ("last_out_stream %p not scheduled", (void *)strqt));
182 /* Find the next stream to use */
184 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
186 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
188 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
191 KASSERT(strq == NULL || strq->ss_params.scheduled,
192 ("strq %p not scheduled", (void *)strq));
195 * If CMT is off, we must validate that the stream in question has
196 * the first item pointed towards are network destination requested
197 * by the caller. Note that if we turn out to be locked to a stream
198 * (assigning TSN's then we must stop, since we cannot look for
199 * another stream with data to send to that destination). In CMT's
200 * case, by skipping this check, we will send one data packet
201 * towards the requested net.
203 if (net != NULL && strq != NULL &&
204 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
205 if (TAILQ_FIRST(&strq->outqueue) &&
206 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
207 TAILQ_FIRST(&strq->outqueue)->net != net) {
208 if (strq == asoc->ss_data.last_out_stream) {
220 sctp_ss_default_scheduled(struct sctp_tcb *stcb,
221 struct sctp_nets *net SCTP_UNUSED,
222 struct sctp_association *asoc,
223 struct sctp_stream_out *strq,
224 int moved_how_much SCTP_UNUSED)
226 struct sctp_stream_queue_pending *sp;
228 KASSERT(strq != NULL, ("strq is NULL"));
229 KASSERT(strq->ss_params.scheduled, ("strq %p is not scheduled", (void *)strq));
230 SCTP_TCB_LOCK_ASSERT(stcb);
232 asoc->ss_data.last_out_stream = strq;
233 if (asoc->idata_supported == 0) {
234 sp = TAILQ_FIRST(&strq->outqueue);
235 if ((sp != NULL) && (sp->some_taken == 1)) {
236 asoc->ss_data.locked_on_sending = strq;
238 asoc->ss_data.locked_on_sending = NULL;
241 asoc->ss_data.locked_on_sending = NULL;
247 sctp_ss_default_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
248 struct sctp_association *asoc SCTP_UNUSED)
250 SCTP_TCB_LOCK_ASSERT(stcb);
252 /* Nothing to be done here */
257 sctp_ss_default_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
258 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t *value SCTP_UNUSED)
260 SCTP_TCB_LOCK_ASSERT(stcb);
262 /* Nothing to be done here */
267 sctp_ss_default_set_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
268 struct sctp_stream_out *strq SCTP_UNUSED, uint16_t value SCTP_UNUSED)
270 SCTP_TCB_LOCK_ASSERT(stcb);
272 /* Nothing to be done here */
277 sctp_ss_default_is_user_msgs_incomplete(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
279 struct sctp_stream_out *strq;
280 struct sctp_stream_queue_pending *sp;
282 SCTP_TCB_LOCK_ASSERT(stcb);
284 if (asoc->stream_queue_cnt != 1) {
287 strq = asoc->ss_data.locked_on_sending;
291 sp = TAILQ_FIRST(&strq->outqueue);
295 return (sp->msg_is_complete == 0);
299 * Real round-robin algorithm.
300 * Always iterates the streams in ascending order.
303 sctp_ss_rr_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
304 struct sctp_stream_out *strq,
305 struct sctp_stream_queue_pending *sp SCTP_UNUSED)
307 struct sctp_stream_out *strqt;
309 SCTP_TCB_LOCK_ASSERT(stcb);
311 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
312 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
313 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
315 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
316 while (strqt != NULL && (strqt->sid < strq->sid)) {
317 strqt = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
320 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.rr.next_spoke);
322 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.rr.next_spoke);
325 strq->ss_params.scheduled = true;
331 * Real round-robin per packet algorithm.
332 * Always iterates the streams in ascending order and
333 * only fills messages of the same stream in a packet.
335 static struct sctp_stream_out *
336 sctp_ss_rrp_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
337 struct sctp_association *asoc)
339 SCTP_TCB_LOCK_ASSERT(stcb);
341 return (asoc->ss_data.last_out_stream);
345 sctp_ss_rrp_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
346 struct sctp_association *asoc)
348 struct sctp_stream_out *strq, *strqt;
350 SCTP_TCB_LOCK_ASSERT(stcb);
352 strqt = asoc->ss_data.last_out_stream;
353 KASSERT(strqt == NULL || strqt->ss_params.scheduled,
354 ("last_out_stream %p not scheduled", (void *)strqt));
356 /* Find the next stream to use */
358 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
360 strq = TAILQ_NEXT(strqt, ss_params.ss.rr.next_spoke);
362 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
365 KASSERT(strq == NULL || strq->ss_params.scheduled,
366 ("strq %p not scheduled", (void *)strq));
369 * If CMT is off, we must validate that the stream in question has
370 * the first item pointed towards are network destination requested
371 * by the caller. Note that if we turn out to be locked to a stream
372 * (assigning TSN's then we must stop, since we cannot look for
373 * another stream with data to send to that destination). In CMT's
374 * case, by skipping this check, we will send one data packet
375 * towards the requested net.
377 if (net != NULL && strq != NULL &&
378 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
379 if (TAILQ_FIRST(&strq->outqueue) &&
380 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
381 TAILQ_FIRST(&strq->outqueue)->net != net) {
382 if (strq == asoc->ss_data.last_out_stream) {
390 asoc->ss_data.last_out_stream = strq;
395 * Priority algorithm.
396 * Always prefers streams based on their priority id.
399 sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
402 SCTP_TCB_LOCK_ASSERT(stcb);
404 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
405 struct sctp_stream_out *strq;
407 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
408 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
410 strq->ss_params.ss.prio.priority = 0;
412 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
413 strq->ss_params.scheduled = false;
415 asoc->ss_data.last_out_stream = NULL;
420 sctp_ss_prio_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
422 SCTP_TCB_LOCK_ASSERT(stcb);
424 if (with_strq != NULL) {
425 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
426 stcb->asoc.ss_data.locked_on_sending = strq;
428 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
429 stcb->asoc.ss_data.last_out_stream = strq;
432 strq->ss_params.scheduled = false;
433 if (with_strq != NULL) {
434 strq->ss_params.ss.prio.priority = with_strq->ss_params.ss.prio.priority;
436 strq->ss_params.ss.prio.priority = 0;
442 sctp_ss_prio_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
443 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
445 struct sctp_stream_out *strqt;
447 SCTP_TCB_LOCK_ASSERT(stcb);
449 /* Add to wheel if not already on it and stream queue not empty */
450 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
451 if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
452 TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
454 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
455 while (strqt != NULL && strqt->ss_params.ss.prio.priority < strq->ss_params.ss.prio.priority) {
456 strqt = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
459 TAILQ_INSERT_BEFORE(strqt, strq, ss_params.ss.prio.next_spoke);
461 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
464 strq->ss_params.scheduled = true;
470 sctp_ss_prio_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
471 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
473 SCTP_TCB_LOCK_ASSERT(stcb);
476 * Remove from wheel if stream queue is empty and actually is on the
479 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
480 if (asoc->ss_data.last_out_stream == strq) {
481 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
483 ss_params.ss.prio.next_spoke);
484 if (asoc->ss_data.last_out_stream == NULL) {
485 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
488 if (asoc->ss_data.last_out_stream == strq) {
489 asoc->ss_data.last_out_stream = NULL;
492 if (asoc->ss_data.locked_on_sending == strq) {
493 asoc->ss_data.locked_on_sending = NULL;
495 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.prio.next_spoke);
496 strq->ss_params.scheduled = false;
501 static struct sctp_stream_out *
502 sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
503 struct sctp_association *asoc)
505 struct sctp_stream_out *strq, *strqt, *strqn;
507 SCTP_TCB_LOCK_ASSERT(stcb);
509 if (asoc->ss_data.locked_on_sending != NULL) {
510 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
511 ("locked_on_sending %p not scheduled",
512 (void *)asoc->ss_data.locked_on_sending));
513 return (asoc->ss_data.locked_on_sending);
515 strqt = asoc->ss_data.last_out_stream;
516 KASSERT(strqt == NULL || strqt->ss_params.scheduled,
517 ("last_out_stream %p not scheduled", (void *)strqt));
519 /* Find the next stream to use */
521 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
523 strqn = TAILQ_NEXT(strqt, ss_params.ss.prio.next_spoke);
525 strqn->ss_params.ss.prio.priority == strqt->ss_params.ss.prio.priority) {
528 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
531 KASSERT(strq == NULL || strq->ss_params.scheduled,
532 ("strq %p not scheduled", (void *)strq));
535 * If CMT is off, we must validate that the stream in question has
536 * the first item pointed towards are network destination requested
537 * by the caller. Note that if we turn out to be locked to a stream
538 * (assigning TSN's then we must stop, since we cannot look for
539 * another stream with data to send to that destination). In CMT's
540 * case, by skipping this check, we will send one data packet
541 * towards the requested net.
543 if (net != NULL && strq != NULL &&
544 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
545 if (TAILQ_FIRST(&strq->outqueue) &&
546 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
547 TAILQ_FIRST(&strq->outqueue)->net != net) {
548 if (strq == asoc->ss_data.last_out_stream) {
560 sctp_ss_prio_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
561 struct sctp_stream_out *strq, uint16_t *value)
563 SCTP_TCB_LOCK_ASSERT(stcb);
568 *value = strq->ss_params.ss.prio.priority;
573 sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc,
574 struct sctp_stream_out *strq, uint16_t value)
576 SCTP_TCB_LOCK_ASSERT(stcb);
581 strq->ss_params.ss.prio.priority = value;
582 sctp_ss_prio_remove(stcb, asoc, strq, NULL);
583 sctp_ss_prio_add(stcb, asoc, strq, NULL);
588 * Fair bandwidth algorithm.
589 * Maintains an equal throughput per stream.
592 sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
595 SCTP_TCB_LOCK_ASSERT(stcb);
597 while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
598 struct sctp_stream_out *strq;
600 strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
601 KASSERT(strq->ss_params.scheduled, ("strq %p not scheduled", (void *)strq));
603 strq->ss_params.ss.fb.rounds = -1;
605 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
606 strq->ss_params.scheduled = false;
608 asoc->ss_data.last_out_stream = NULL;
613 sctp_ss_fb_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
615 SCTP_TCB_LOCK_ASSERT(stcb);
617 if (with_strq != NULL) {
618 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
619 stcb->asoc.ss_data.locked_on_sending = strq;
621 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
622 stcb->asoc.ss_data.last_out_stream = strq;
625 strq->ss_params.scheduled = false;
626 if (with_strq != NULL) {
627 strq->ss_params.ss.fb.rounds = with_strq->ss_params.ss.fb.rounds;
629 strq->ss_params.ss.fb.rounds = -1;
635 sctp_ss_fb_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
636 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
638 SCTP_TCB_LOCK_ASSERT(stcb);
640 if (!TAILQ_EMPTY(&strq->outqueue) && !strq->ss_params.scheduled) {
641 if (strq->ss_params.ss.fb.rounds < 0)
642 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
643 TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
644 strq->ss_params.scheduled = true;
650 sctp_ss_fb_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
651 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED)
653 SCTP_TCB_LOCK_ASSERT(stcb);
656 * Remove from wheel if stream queue is empty and actually is on the
659 if (TAILQ_EMPTY(&strq->outqueue) && strq->ss_params.scheduled) {
660 if (asoc->ss_data.last_out_stream == strq) {
661 asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
663 ss_params.ss.fb.next_spoke);
664 if (asoc->ss_data.last_out_stream == NULL) {
665 asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
668 if (asoc->ss_data.last_out_stream == strq) {
669 asoc->ss_data.last_out_stream = NULL;
672 if (asoc->ss_data.locked_on_sending == strq) {
673 asoc->ss_data.locked_on_sending = NULL;
675 TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.ss.fb.next_spoke);
676 strq->ss_params.scheduled = false;
681 static struct sctp_stream_out *
682 sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
683 struct sctp_association *asoc)
685 struct sctp_stream_out *strq = NULL, *strqt;
687 SCTP_TCB_LOCK_ASSERT(stcb);
689 if (asoc->ss_data.locked_on_sending != NULL) {
690 KASSERT(asoc->ss_data.locked_on_sending->ss_params.scheduled,
691 ("locked_on_sending %p not scheduled",
692 (void *)asoc->ss_data.locked_on_sending));
693 return (asoc->ss_data.locked_on_sending);
695 if (asoc->ss_data.last_out_stream == NULL ||
696 TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) {
697 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
699 strqt = TAILQ_NEXT(asoc->ss_data.last_out_stream, ss_params.ss.fb.next_spoke);
702 if ((strqt != NULL) &&
703 ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) > 0) ||
704 (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0 &&
705 (net == NULL || (TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net == NULL) ||
706 (net != NULL && TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net != NULL &&
707 TAILQ_FIRST(&strqt->outqueue)->net == net))))) {
708 if ((strqt->ss_params.ss.fb.rounds >= 0) &&
710 (strqt->ss_params.ss.fb.rounds < strq->ss_params.ss.fb.rounds))) {
715 strqt = TAILQ_NEXT(strqt, ss_params.ss.fb.next_spoke);
717 strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
719 } while (strqt != strq);
724 sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED,
725 struct sctp_association *asoc, struct sctp_stream_out *strq,
726 int moved_how_much SCTP_UNUSED)
728 struct sctp_stream_queue_pending *sp;
729 struct sctp_stream_out *strqt;
732 SCTP_TCB_LOCK_ASSERT(stcb);
734 if (asoc->idata_supported == 0) {
735 sp = TAILQ_FIRST(&strq->outqueue);
736 if ((sp != NULL) && (sp->some_taken == 1)) {
737 asoc->ss_data.locked_on_sending = strq;
739 asoc->ss_data.locked_on_sending = NULL;
742 asoc->ss_data.locked_on_sending = NULL;
744 subtract = strq->ss_params.ss.fb.rounds;
745 TAILQ_FOREACH(strqt, &asoc->ss_data.out.wheel, ss_params.ss.fb.next_spoke) {
746 strqt->ss_params.ss.fb.rounds -= subtract;
747 if (strqt->ss_params.ss.fb.rounds < 0)
748 strqt->ss_params.ss.fb.rounds = 0;
750 if (TAILQ_FIRST(&strq->outqueue)) {
751 strq->ss_params.ss.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
753 strq->ss_params.ss.fb.rounds = -1;
755 asoc->ss_data.last_out_stream = strq;
760 * First-come, first-serve algorithm.
761 * Maintains the order provided by the application.
764 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
765 struct sctp_stream_out *strq SCTP_UNUSED,
766 struct sctp_stream_queue_pending *sp);
769 sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc)
771 uint32_t x, n = 0, add_more = 1;
772 struct sctp_stream_queue_pending *sp;
775 SCTP_TCB_LOCK_ASSERT(stcb);
777 TAILQ_INIT(&asoc->ss_data.out.list);
779 * If there is data in the stream queues already, the scheduler of
780 * an existing association has been changed. We can only cycle
781 * through the stream queues and add everything to the FCFS queue.
785 for (i = 0; i < asoc->streamoutcnt; i++) {
786 sp = TAILQ_FIRST(&asoc->strmout[i].outqueue);
788 /* Find n. message in current stream queue */
789 while (sp != NULL && x < n) {
790 sp = TAILQ_NEXT(sp, next);
794 sctp_ss_fcfs_add(stcb, asoc, &asoc->strmout[i], sp);
804 sctp_ss_fcfs_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
805 bool clear_values SCTP_UNUSED)
807 struct sctp_stream_queue_pending *sp;
809 SCTP_TCB_LOCK_ASSERT(stcb);
811 while (!TAILQ_EMPTY(&asoc->ss_data.out.list)) {
812 sp = TAILQ_FIRST(&asoc->ss_data.out.list);
813 KASSERT(sp->scheduled, ("sp %p not scheduled", (void *)sp));
814 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
815 sp->scheduled = false;
817 asoc->ss_data.last_out_stream = NULL;
822 sctp_ss_fcfs_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
824 SCTP_TCB_LOCK_ASSERT(stcb);
826 if (with_strq != NULL) {
827 if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
828 stcb->asoc.ss_data.locked_on_sending = strq;
830 if (stcb->asoc.ss_data.last_out_stream == with_strq) {
831 stcb->asoc.ss_data.last_out_stream = strq;
834 strq->ss_params.scheduled = false;
839 sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
840 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
842 SCTP_TCB_LOCK_ASSERT(stcb);
844 if (!sp->scheduled) {
845 TAILQ_INSERT_TAIL(&asoc->ss_data.out.list, sp, ss_next);
846 sp->scheduled = true;
852 sctp_ss_fcfs_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
854 SCTP_TCB_LOCK_ASSERT(stcb);
856 return (TAILQ_EMPTY(&asoc->ss_data.out.list));
860 sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
861 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp)
863 SCTP_TCB_LOCK_ASSERT(stcb);
866 TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
867 sp->scheduled = false;
872 static struct sctp_stream_out *
873 sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
874 struct sctp_association *asoc)
876 struct sctp_stream_out *strq;
877 struct sctp_stream_queue_pending *sp;
879 SCTP_TCB_LOCK_ASSERT(stcb);
881 if (asoc->ss_data.locked_on_sending) {
882 return (asoc->ss_data.locked_on_sending);
884 sp = TAILQ_FIRST(&asoc->ss_data.out.list);
887 strq = &asoc->strmout[sp->sid];
893 * If CMT is off, we must validate that the stream in question has
894 * the first item pointed towards are network destination requested
895 * by the caller. Note that if we turn out to be locked to a stream
896 * (assigning TSN's then we must stop, since we cannot look for
897 * another stream with data to send to that destination). In CMT's
898 * case, by skipping this check, we will send one data packet
899 * towards the requested net.
901 if (net != NULL && strq != NULL &&
902 SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
903 if (TAILQ_FIRST(&strq->outqueue) &&
904 TAILQ_FIRST(&strq->outqueue)->net != NULL &&
905 TAILQ_FIRST(&strq->outqueue)->net != net) {
906 sp = TAILQ_NEXT(sp, ss_next);
914 sctp_ss_fcfs_scheduled(struct sctp_tcb *stcb,
915 struct sctp_nets *net SCTP_UNUSED,
916 struct sctp_association *asoc,
917 struct sctp_stream_out *strq,
918 int moved_how_much SCTP_UNUSED)
920 struct sctp_stream_queue_pending *sp;
922 KASSERT(strq != NULL, ("strq is NULL"));
923 asoc->ss_data.last_out_stream = strq;
924 if (asoc->idata_supported == 0) {
925 sp = TAILQ_FIRST(&strq->outqueue);
926 if ((sp != NULL) && (sp->some_taken == 1)) {
927 asoc->ss_data.locked_on_sending = strq;
929 asoc->ss_data.locked_on_sending = NULL;
932 asoc->ss_data.locked_on_sending = NULL;
937 const struct sctp_ss_functions sctp_ss_functions[] = {
938 /* SCTP_SS_DEFAULT */
940 .sctp_ss_init = sctp_ss_default_init,
941 .sctp_ss_clear = sctp_ss_default_clear,
942 .sctp_ss_init_stream = sctp_ss_default_init_stream,
943 .sctp_ss_add_to_stream = sctp_ss_default_add,
944 .sctp_ss_is_empty = sctp_ss_default_is_empty,
945 .sctp_ss_remove_from_stream = sctp_ss_default_remove,
946 .sctp_ss_select_stream = sctp_ss_default_select,
947 .sctp_ss_scheduled = sctp_ss_default_scheduled,
948 .sctp_ss_packet_done = sctp_ss_default_packet_done,
949 .sctp_ss_get_value = sctp_ss_default_get_value,
950 .sctp_ss_set_value = sctp_ss_default_set_value,
951 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
955 .sctp_ss_init = sctp_ss_default_init,
956 .sctp_ss_clear = sctp_ss_default_clear,
957 .sctp_ss_init_stream = sctp_ss_default_init_stream,
958 .sctp_ss_add_to_stream = sctp_ss_rr_add,
959 .sctp_ss_is_empty = sctp_ss_default_is_empty,
960 .sctp_ss_remove_from_stream = sctp_ss_default_remove,
961 .sctp_ss_select_stream = sctp_ss_default_select,
962 .sctp_ss_scheduled = sctp_ss_default_scheduled,
963 .sctp_ss_packet_done = sctp_ss_default_packet_done,
964 .sctp_ss_get_value = sctp_ss_default_get_value,
965 .sctp_ss_set_value = sctp_ss_default_set_value,
966 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
970 .sctp_ss_init = sctp_ss_default_init,
971 .sctp_ss_clear = sctp_ss_default_clear,
972 .sctp_ss_init_stream = sctp_ss_default_init_stream,
973 .sctp_ss_add_to_stream = sctp_ss_rr_add,
974 .sctp_ss_is_empty = sctp_ss_default_is_empty,
975 .sctp_ss_remove_from_stream = sctp_ss_default_remove,
976 .sctp_ss_select_stream = sctp_ss_rrp_select,
977 .sctp_ss_scheduled = sctp_ss_default_scheduled,
978 .sctp_ss_packet_done = sctp_ss_rrp_packet_done,
979 .sctp_ss_get_value = sctp_ss_default_get_value,
980 .sctp_ss_set_value = sctp_ss_default_set_value,
981 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
985 .sctp_ss_init = sctp_ss_default_init,
986 .sctp_ss_clear = sctp_ss_prio_clear,
987 .sctp_ss_init_stream = sctp_ss_prio_init_stream,
988 .sctp_ss_add_to_stream = sctp_ss_prio_add,
989 .sctp_ss_is_empty = sctp_ss_default_is_empty,
990 .sctp_ss_remove_from_stream = sctp_ss_prio_remove,
991 .sctp_ss_select_stream = sctp_ss_prio_select,
992 .sctp_ss_scheduled = sctp_ss_default_scheduled,
993 .sctp_ss_packet_done = sctp_ss_default_packet_done,
994 .sctp_ss_get_value = sctp_ss_prio_get_value,
995 .sctp_ss_set_value = sctp_ss_prio_set_value,
996 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
1000 .sctp_ss_init = sctp_ss_default_init,
1001 .sctp_ss_clear = sctp_ss_fb_clear,
1002 .sctp_ss_init_stream = sctp_ss_fb_init_stream,
1003 .sctp_ss_add_to_stream = sctp_ss_fb_add,
1004 .sctp_ss_is_empty = sctp_ss_default_is_empty,
1005 .sctp_ss_remove_from_stream = sctp_ss_fb_remove,
1006 .sctp_ss_select_stream = sctp_ss_fb_select,
1007 .sctp_ss_scheduled = sctp_ss_fb_scheduled,
1008 .sctp_ss_packet_done = sctp_ss_default_packet_done,
1009 .sctp_ss_get_value = sctp_ss_default_get_value,
1010 .sctp_ss_set_value = sctp_ss_default_set_value,
1011 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
1015 .sctp_ss_init = sctp_ss_fcfs_init,
1016 .sctp_ss_clear = sctp_ss_fcfs_clear,
1017 .sctp_ss_init_stream = sctp_ss_fcfs_init_stream,
1018 .sctp_ss_add_to_stream = sctp_ss_fcfs_add,
1019 .sctp_ss_is_empty = sctp_ss_fcfs_is_empty,
1020 .sctp_ss_remove_from_stream = sctp_ss_fcfs_remove,
1021 .sctp_ss_select_stream = sctp_ss_fcfs_select,
1022 .sctp_ss_scheduled = sctp_ss_fcfs_scheduled,
1023 .sctp_ss_packet_done = sctp_ss_default_packet_done,
1024 .sctp_ss_get_value = sctp_ss_default_get_value,
1025 .sctp_ss_set_value = sctp_ss_default_set_value,
1026 .sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete