2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 /* Theory of operation:
32 * Tx queues allocation and mapping
34 * One Tx queue with enabled checksum offload is allocated per Rx channel
35 * (event queue). Also 2 Tx queues (one without checksum offload and one
36 * with IP checksum offload only) are allocated and bound to event queue 0.
37 * sfxge_txq_type is used as Tx queue label.
39 * So, event queue plus label mapping to Tx queue index is:
40 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
41 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
42 * See sfxge_get_txq_by_label() sfxge_ev.c
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
48 #include <sys/types.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
56 #include <net/ethernet.h>
58 #include <net/if_vlan_var.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet/tcp.h>
65 #include "common/efx.h"
71 * Estimate maximum number of Tx descriptors required for TSO packet.
72 * With minimum MSS and maximum mbuf length we might need more (even
73 * than a ring-ful of descriptors), but this should not happen in
74 * practice except due to deliberate attack. In that case we will
75 * truncate the output at a packet boundary.
77 #define SFXGE_TSO_MAX_DESC \
78 (SFXGE_TSO_MAX_SEGS * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
81 * Set the block level to ensure there is space to generate a
82 * large number of descriptors for TSO.
84 #define SFXGE_TXQ_BLOCK_LEVEL(_entries) \
85 (EFX_TXQ_LIMIT(_entries) - SFXGE_TSO_MAX_DESC)
88 #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max)
89 static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT;
90 TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max);
91 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN,
92 &sfxge_tx_dpl_get_max, 0,
93 "Maximum number of any packets in deferred packet get-list");
95 #define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \
96 SFXGE_PARAM(tx_dpl_get_non_tcp_max)
97 static int sfxge_tx_dpl_get_non_tcp_max =
98 SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT;
99 TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max);
100 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN,
101 &sfxge_tx_dpl_get_non_tcp_max, 0,
102 "Maximum number of non-TCP packets in deferred packet get-list");
104 #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max)
105 static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT;
106 TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max);
107 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN,
108 &sfxge_tx_dpl_put_max, 0,
109 "Maximum number of any packets in deferred packet put-list");
112 static const struct {
115 } sfxge_tx_stats[] = {
116 #define SFXGE_TX_STAT(name, member) \
117 { #name, offsetof(struct sfxge_txq, member) }
118 SFXGE_TX_STAT(tso_bursts, tso_bursts),
119 SFXGE_TX_STAT(tso_packets, tso_packets),
120 SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
121 SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many),
122 SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc),
123 SFXGE_TX_STAT(tx_collapses, collapses),
124 SFXGE_TX_STAT(tx_drops, drops),
125 SFXGE_TX_STAT(tx_get_overflow, get_overflow),
126 SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow),
127 SFXGE_TX_STAT(tx_put_overflow, put_overflow),
128 SFXGE_TX_STAT(tx_netdown_drops, netdown_drops),
132 /* Forward declarations. */
133 static void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
134 static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
135 static void sfxge_tx_qunblock(struct sfxge_txq *txq);
136 static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
137 const bus_dma_segment_t *dma_seg, int n_dma_seg);
140 sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq)
142 unsigned int completed;
144 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
146 completed = txq->completed;
147 while (completed != txq->pending) {
148 struct sfxge_tx_mapping *stmp;
151 id = completed++ & txq->ptr_mask;
153 stmp = &txq->stmp[id];
154 if (stmp->flags & TX_BUF_UNMAP) {
155 bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
156 if (stmp->flags & TX_BUF_MBUF) {
157 struct mbuf *m = stmp->u.mbuf;
162 free(stmp->u.heap_buf, M_SFXGE);
167 txq->completed = completed;
169 /* Check whether we need to unblock the queue. */
174 level = txq->added - txq->completed;
175 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
176 sfxge_tx_qunblock(txq);
181 sfxge_is_mbuf_non_tcp(struct mbuf *mbuf)
183 /* Absense of TCP checksum flags does not mean that it is non-TCP
184 * but it should be true if user wants to achieve high throughput.
186 return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)));
190 * Reorder the put list and append it to the get list.
193 sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
195 struct sfxge_tx_dpl *stdp;
196 struct mbuf *mbuf, *get_next, **get_tailp;
197 volatile uintptr_t *putp;
200 unsigned int non_tcp_count;
202 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
206 /* Acquire the put list. */
207 putp = &stdp->std_put;
208 put = atomic_readandclear_ptr(putp);
214 /* Reverse the put list. */
215 get_tailp = &mbuf->m_nextpkt;
221 struct mbuf *put_next;
223 non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf);
224 put_next = mbuf->m_nextpkt;
225 mbuf->m_nextpkt = get_next;
230 } while (mbuf != NULL);
232 if (count > stdp->std_put_hiwat)
233 stdp->std_put_hiwat = count;
235 /* Append the reversed put list to the get list. */
236 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
237 *stdp->std_getp = get_next;
238 stdp->std_getp = get_tailp;
239 stdp->std_get_count += count;
240 stdp->std_get_non_tcp_count += non_tcp_count;
244 sfxge_tx_qreap(struct sfxge_txq *txq)
246 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
248 txq->reaped = txq->completed;
252 sfxge_tx_qlist_post(struct sfxge_txq *txq)
254 unsigned int old_added;
258 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
260 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
261 KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC,
262 ("txq->n_pend_desc too large"));
263 KASSERT(!txq->blocked, ("txq->blocked"));
265 old_added = txq->added;
267 /* Post the fragment list. */
268 rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc,
269 txq->reaped, &txq->added);
270 KASSERT(rc == 0, ("efx_tx_qpost() failed"));
272 /* If efx_tx_qpost() had to refragment, our information about
273 * buffers to free may be associated with the wrong
276 KASSERT(txq->added - old_added == txq->n_pend_desc,
277 ("efx_tx_qpost() refragmented descriptors"));
279 level = txq->added - txq->reaped;
280 KASSERT(level <= txq->entries, ("overfilled TX queue"));
282 /* Clear the fragment list. */
283 txq->n_pend_desc = 0;
285 /* Have we reached the block level? */
286 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
289 /* Reap, and check again */
291 level = txq->added - txq->reaped;
292 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
298 * Avoid a race with completion interrupt handling that could leave
303 level = txq->added - txq->reaped;
304 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) {
310 static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
312 bus_dmamap_t *used_map;
314 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG];
316 struct sfxge_tx_mapping *stmp;
322 KASSERT(!txq->blocked, ("txq->blocked"));
324 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
325 prefetch_read_many(mbuf->m_data);
327 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) {
332 /* Load the packet for DMA. */
333 id = txq->added & txq->ptr_mask;
334 stmp = &txq->stmp[id];
335 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
336 mbuf, dma_seg, &n_dma_seg, 0);
339 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
340 SFXGE_TX_MAPPING_MAX_SEG);
341 if (new_mbuf == NULL)
345 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
347 dma_seg, &n_dma_seg, 0);
352 /* Make the packet visible to the hardware. */
353 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
355 used_map = &stmp->map;
357 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
358 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg);
361 stmp = &txq->stmp[rc];
363 /* Add the mapping to the fragment list, and set flags
368 desc = &txq->pend_desc[i];
369 desc->eb_addr = dma_seg[i].ds_addr;
370 desc->eb_size = dma_seg[i].ds_len;
371 if (i == n_dma_seg - 1) {
379 if (__predict_false(stmp ==
380 &txq->stmp[txq->ptr_mask]))
381 stmp = &txq->stmp[0];
385 txq->n_pend_desc = n_dma_seg;
389 * If the mapping required more than one descriptor
390 * then we need to associate the DMA map with the last
391 * descriptor, not the first.
393 if (used_map != &stmp->map) {
395 stmp->map = *used_map;
400 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF;
402 /* Post the fragment list. */
403 sfxge_tx_qlist_post(txq);
408 bus_dmamap_unload(txq->packet_dma_tag, *used_map);
410 /* Drop the packet on the floor. */
418 * Drain the deferred packet list into the transmit queue.
421 sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
423 struct sfxge_softc *sc;
424 struct sfxge_tx_dpl *stdp;
425 struct mbuf *mbuf, *next;
427 unsigned int non_tcp_count;
431 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
437 if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) {
438 prefetch_read_many(sc->enp);
439 prefetch_read_many(txq->common);
442 mbuf = stdp->std_get;
443 count = stdp->std_get_count;
444 non_tcp_count = stdp->std_get_non_tcp_count;
446 if (count > stdp->std_get_hiwat)
447 stdp->std_get_hiwat = count;
450 KASSERT(mbuf != NULL, ("mbuf == NULL"));
452 next = mbuf->m_nextpkt;
453 mbuf->m_nextpkt = NULL;
455 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */
458 prefetch_read_many(next);
460 rc = sfxge_tx_queue_mbuf(txq, mbuf);
462 non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf);
470 /* Push the fragments to the hardware in batches. */
471 if (txq->added - pushed >= SFXGE_TX_BATCH) {
472 efx_tx_qpush(txq->common, txq->added);
478 KASSERT(mbuf == NULL, ("mbuf != NULL"));
479 KASSERT(non_tcp_count == 0,
480 ("inconsistent TCP/non-TCP detection"));
481 stdp->std_get = NULL;
482 stdp->std_get_count = 0;
483 stdp->std_get_non_tcp_count = 0;
484 stdp->std_getp = &stdp->std_get;
486 stdp->std_get = mbuf;
487 stdp->std_get_count = count;
488 stdp->std_get_non_tcp_count = non_tcp_count;
491 if (txq->added != pushed)
492 efx_tx_qpush(txq->common, txq->added);
494 KASSERT(txq->blocked || stdp->std_get_count == 0,
495 ("queue unblocked but count is non-zero"));
498 #define SFXGE_TX_QDPL_PENDING(_txq) \
499 ((_txq)->dpl.std_put != 0)
502 * Service the deferred packet list.
504 * NOTE: drops the txq mutex!
507 sfxge_tx_qdpl_service(struct sfxge_txq *txq)
509 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
512 if (SFXGE_TX_QDPL_PENDING(txq))
513 sfxge_tx_qdpl_swizzle(txq);
516 sfxge_tx_qdpl_drain(txq);
518 SFXGE_TXQ_UNLOCK(txq);
519 } while (SFXGE_TX_QDPL_PENDING(txq) &&
520 SFXGE_TXQ_TRYLOCK(txq));
524 * Put a packet on the deferred packet list.
526 * If we are called with the txq lock held, we put the packet on the "get
527 * list", otherwise we atomically push it on the "put list". The swizzle
528 * function takes care of ordering.
530 * The length of the put list is bounded by SFXGE_TX_MAX_DEFERRED. We
531 * overload the csum_data field in the mbuf to keep track of this length
532 * because there is no cheap alternative to avoid races.
535 sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked)
537 struct sfxge_tx_dpl *stdp;
541 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
544 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
546 sfxge_tx_qdpl_swizzle(txq);
548 if (stdp->std_get_count >= stdp->std_get_max) {
552 if (sfxge_is_mbuf_non_tcp(mbuf)) {
553 if (stdp->std_get_non_tcp_count >=
554 stdp->std_get_non_tcp_max) {
555 txq->get_non_tcp_overflow++;
558 stdp->std_get_non_tcp_count++;
561 *(stdp->std_getp) = mbuf;
562 stdp->std_getp = &mbuf->m_nextpkt;
563 stdp->std_get_count++;
565 volatile uintptr_t *putp;
570 putp = &stdp->std_put;
571 new = (uintptr_t)mbuf;
576 struct mbuf *mp = (struct mbuf *)old;
577 old_len = mp->m_pkthdr.csum_data;
580 if (old_len >= stdp->std_put_max) {
581 atomic_add_long(&txq->put_overflow, 1);
584 mbuf->m_pkthdr.csum_data = old_len + 1;
585 mbuf->m_nextpkt = (void *)old;
586 } while (atomic_cmpset_ptr(putp, old, new) == 0);
593 * Called from if_transmit - will try to grab the txq lock and enqueue to the
594 * put list if it succeeds, otherwise try to push onto the defer list if space.
597 sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
602 if (!SFXGE_LINK_UP(txq->sc)) {
604 atomic_add_long(&txq->netdown_drops, 1);
609 * Try to grab the txq lock. If we are able to get the lock,
610 * the packet will be appended to the "get list" of the deferred
611 * packet list. Otherwise, it will be pushed on the "put list".
613 locked = SFXGE_TXQ_TRYLOCK(txq);
615 if (sfxge_tx_qdpl_put(txq, m, locked) != 0) {
617 SFXGE_TXQ_UNLOCK(txq);
623 * Try to grab the lock again.
625 * If we are able to get the lock, we need to process the deferred
626 * packet list. If we are not able to get the lock, another thread
627 * is processing the list.
630 locked = SFXGE_TXQ_TRYLOCK(txq);
633 /* Try to service the list. */
634 sfxge_tx_qdpl_service(txq);
635 /* Lock has been dropped. */
646 sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
648 struct sfxge_tx_dpl *stdp = &txq->dpl;
649 struct mbuf *mbuf, *next;
653 sfxge_tx_qdpl_swizzle(txq);
654 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
655 next = mbuf->m_nextpkt;
658 stdp->std_get = NULL;
659 stdp->std_get_count = 0;
660 stdp->std_get_non_tcp_count = 0;
661 stdp->std_getp = &stdp->std_get;
663 SFXGE_TXQ_UNLOCK(txq);
667 sfxge_if_qflush(struct ifnet *ifp)
669 struct sfxge_softc *sc;
674 for (i = 0; i < sc->txq_count; i++)
675 sfxge_tx_qdpl_flush(sc->txq[i]);
679 * TX start -- called by the stack.
682 sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
684 struct sfxge_softc *sc;
685 struct sfxge_txq *txq;
688 sc = (struct sfxge_softc *)ifp->if_softc;
691 * Transmit may be called when interface is up from the kernel
692 * point of view, but not yet up (in progress) from the driver
693 * point of view. I.e. link aggregation bring up.
694 * Transmit may be called when interface is up from the driver
695 * point of view, but already down from the kernel point of
696 * view. I.e. Rx when interface shutdown is in progress.
698 KASSERT((ifp->if_flags & IFF_UP) || (sc->if_flags & IFF_UP),
699 ("interface not up"));
701 /* Pick the desired transmit queue. */
702 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) {
705 /* check if flowid is set */
706 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
707 uint32_t hash = m->m_pkthdr.flowid;
709 index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX];
711 txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index];
712 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
713 txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
715 txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
718 rc = sfxge_tx_packet_add(txq, m);
724 * Software "TSO". Not quite as good as doing it in hardware, but
725 * still faster than segmenting in the stack.
728 struct sfxge_tso_state {
729 /* Output position */
730 unsigned out_len; /* Remaining length in current segment */
731 unsigned seqnum; /* Current sequence number */
732 unsigned packet_space; /* Remaining space in current packet */
735 uint64_t dma_addr; /* DMA address of current position */
736 unsigned in_len; /* Remaining length in current mbuf */
738 const struct mbuf *mbuf; /* Input mbuf (head of chain) */
739 u_short protocol; /* Network protocol (after VLAN decap) */
740 ssize_t nh_off; /* Offset of network header */
741 ssize_t tcph_off; /* Offset of TCP header */
742 unsigned header_len; /* Number of bytes of header */
743 unsigned seg_size; /* TCP segment size */
746 static const struct ip *tso_iph(const struct sfxge_tso_state *tso)
748 KASSERT(tso->protocol == htons(ETHERTYPE_IP),
749 ("tso_iph() in non-IPv4 state"));
750 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off);
752 static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso)
754 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
755 ("tso_ip6h() in non-IPv6 state"));
756 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off);
758 static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
760 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off);
763 /* Size of preallocated TSO header buffers. Larger blocks must be
764 * allocated from the heap.
766 #define TSOH_STD_SIZE 128
768 /* At most half the descriptors in the queue at any time will refer to
769 * a TSO header buffer, since they must always be followed by a
770 * payload descriptor referring to an mbuf.
772 #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u)
773 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
774 #define TSOH_PAGE_COUNT(_txq_entries) \
775 ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
777 static int tso_init(struct sfxge_txq *txq)
779 struct sfxge_softc *sc = txq->sc;
780 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
783 /* Allocate TSO header buffers */
784 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
787 for (i = 0; i < tsoh_page_count; i++) {
788 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
797 sfxge_dma_free(&txq->tsoh_buffer[i]);
798 free(txq->tsoh_buffer, M_SFXGE);
799 txq->tsoh_buffer = NULL;
803 static void tso_fini(struct sfxge_txq *txq)
807 if (txq->tsoh_buffer != NULL) {
808 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
809 sfxge_dma_free(&txq->tsoh_buffer[i]);
810 free(txq->tsoh_buffer, M_SFXGE);
814 static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf)
816 struct ether_header *eh = mtod(mbuf, struct ether_header *);
817 const struct tcphdr *th;
818 struct tcphdr th_copy;
822 /* Find network protocol and header */
823 tso->protocol = eh->ether_type;
824 if (tso->protocol == htons(ETHERTYPE_VLAN)) {
825 struct ether_vlan_header *veh =
826 mtod(mbuf, struct ether_vlan_header *);
827 tso->protocol = veh->evl_proto;
828 tso->nh_off = sizeof(*veh);
830 tso->nh_off = sizeof(*eh);
833 /* Find TCP header */
834 if (tso->protocol == htons(ETHERTYPE_IP)) {
835 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP,
836 ("TSO required on non-TCP packet"));
837 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl;
839 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
840 ("TSO required on non-IP packet"));
841 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP,
842 ("TSO required on non-TCP packet"));
843 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr);
846 KASSERT(mbuf->m_len >= tso->tcph_off,
847 ("network header is fragmented in mbuf"));
848 /* We need TCP header including flags (window is the next) */
849 if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) {
850 m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy),
857 tso->header_len = tso->tcph_off + 4 * th->th_off;
858 tso->seg_size = mbuf->m_pkthdr.tso_segsz;
860 tso->seqnum = ntohl(th->th_seq);
862 /* These flags must not be duplicated */
863 KASSERT(!(th->th_flags & (TH_URG | TH_SYN | TH_RST)),
864 ("incompatible TCP flag on TSO packet"));
866 tso->out_len = mbuf->m_pkthdr.len - tso->header_len;
870 * tso_fill_packet_with_fragment - form descriptors for the current fragment
872 * Form descriptors for the current fragment, until we reach the end
873 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
876 static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
877 struct sfxge_tso_state *tso)
882 if (tso->in_len == 0 || tso->packet_space == 0)
885 KASSERT(tso->in_len > 0, ("TSO input length went negative"));
886 KASSERT(tso->packet_space > 0, ("TSO packet space went negative"));
888 n = min(tso->in_len, tso->packet_space);
890 tso->packet_space -= n;
894 desc = &txq->pend_desc[txq->n_pend_desc++];
895 desc->eb_addr = tso->dma_addr;
897 desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0;
902 /* Callback from bus_dmamap_load() for long TSO headers. */
903 static void tso_map_long_header(void *dma_addr_ret,
904 bus_dma_segment_t *segs, int nseg,
907 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) &&
908 __predict_true(nseg == 1)) ?
913 * tso_start_new_packet - generate a new header and prepare for the new packet
915 * Generate a new header and prepare for the new packet. Return 0 on
916 * success, or an error code if failed to alloc header.
918 static int tso_start_new_packet(struct sfxge_txq *txq,
919 struct sfxge_tso_state *tso,
922 struct sfxge_tx_mapping *stmp = &txq->stmp[id];
923 struct tcphdr *tsoh_th;
931 /* Allocate a DMA-mapped header buffer. */
932 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) {
933 unsigned int page_index = (id / 2) / TSOH_PER_PAGE;
934 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE;
936 header = (txq->tsoh_buffer[page_index].esm_base +
937 buf_index * TSOH_STD_SIZE);
938 dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
939 buf_index * TSOH_STD_SIZE);
940 map = txq->tsoh_buffer[page_index].esm_map;
944 /* We cannot use bus_dmamem_alloc() as that may sleep */
945 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
946 if (__predict_false(!header))
948 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
949 header, tso->header_len,
950 tso_map_long_header, &dma_addr,
952 if (__predict_false(dma_addr == 0)) {
954 /* Succeeded but got >1 segment */
955 bus_dmamap_unload(txq->packet_dma_tag,
959 free(header, M_SFXGE);
964 txq->tso_long_headers++;
965 stmp->u.heap_buf = header;
966 stmp->flags = TX_BUF_UNMAP;
969 tsoh_th = (struct tcphdr *)(header + tso->tcph_off);
971 /* Copy and update the headers. */
972 m_copydata(tso->mbuf, 0, tso->header_len, header);
974 tsoh_th->th_seq = htonl(tso->seqnum);
975 tso->seqnum += tso->seg_size;
976 if (tso->out_len > tso->seg_size) {
977 /* This packet will not finish the TSO burst. */
978 ip_length = tso->header_len - tso->nh_off + tso->seg_size;
979 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
981 /* This packet will be the last in the TSO burst. */
982 ip_length = tso->header_len - tso->nh_off + tso->out_len;
985 if (tso->protocol == htons(ETHERTYPE_IP)) {
986 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off);
987 tsoh_iph->ip_len = htons(ip_length);
988 /* XXX We should increment ip_id, but FreeBSD doesn't
989 * currently allocate extra IDs for multiple segments.
992 struct ip6_hdr *tsoh_iph =
993 (struct ip6_hdr *)(header + tso->nh_off);
994 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph));
997 /* Make the header visible to the hardware. */
998 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
1000 tso->packet_space = tso->seg_size;
1003 /* Form a descriptor for this header. */
1004 desc = &txq->pend_desc[txq->n_pend_desc++];
1005 desc->eb_addr = dma_addr;
1006 desc->eb_size = tso->header_len;
1013 sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
1014 const bus_dma_segment_t *dma_seg, int n_dma_seg)
1016 struct sfxge_tso_state tso;
1017 unsigned int id, next_id;
1018 unsigned skipped = 0;
1020 tso_start(&tso, mbuf);
1022 while (dma_seg->ds_len + skipped <= tso.header_len) {
1023 skipped += dma_seg->ds_len;
1025 KASSERT(n_dma_seg, ("no payload found in TSO packet"));
1028 tso.in_len = dma_seg->ds_len - (tso.header_len - skipped);
1029 tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped);
1031 id = txq->added & txq->ptr_mask;
1032 if (__predict_false(tso_start_new_packet(txq, &tso, id)))
1036 id = (id + 1) & txq->ptr_mask;
1037 tso_fill_packet_with_fragment(txq, &tso);
1039 /* Move onto the next fragment? */
1040 if (tso.in_len == 0) {
1045 tso.in_len = dma_seg->ds_len;
1046 tso.dma_addr = dma_seg->ds_addr;
1049 /* End of packet? */
1050 if (tso.packet_space == 0) {
1051 /* If the queue is now full due to tiny MSS,
1052 * or we can't create another header, discard
1053 * the remainder of the input mbuf but do not
1054 * roll back the work we have done.
1056 if (txq->n_pend_desc + 1 /* header */ + n_dma_seg >
1057 SFXGE_TSO_MAX_DESC) {
1058 txq->tso_pdrop_too_many++;
1061 next_id = (id + 1) & txq->ptr_mask;
1062 if (__predict_false(tso_start_new_packet(txq, &tso,
1064 txq->tso_pdrop_no_rsrc++;
1076 sfxge_tx_qunblock(struct sfxge_txq *txq)
1078 struct sfxge_softc *sc;
1079 struct sfxge_evq *evq;
1082 evq = sc->evq[txq->evq_index];
1084 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
1086 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED))
1089 SFXGE_TXQ_LOCK(txq);
1094 level = txq->added - txq->completed;
1095 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) {
1096 /* reaped must be in sync with blocked */
1097 sfxge_tx_qreap(txq);
1102 sfxge_tx_qdpl_service(txq);
1103 /* note: lock has been dropped */
1107 sfxge_tx_qflush_done(struct sfxge_txq *txq)
1110 txq->flush_state = SFXGE_FLUSH_DONE;
1114 sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
1116 struct sfxge_txq *txq;
1117 struct sfxge_evq *evq;
1120 txq = sc->txq[index];
1121 evq = sc->evq[txq->evq_index];
1123 SFXGE_TXQ_LOCK(txq);
1125 KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1126 ("txq->init_state != SFXGE_TXQ_STARTED"));
1128 txq->init_state = SFXGE_TXQ_INITIALIZED;
1129 txq->flush_state = SFXGE_FLUSH_PENDING;
1131 /* Flush the transmit queue. */
1132 efx_tx_qflush(txq->common);
1134 SFXGE_TXQ_UNLOCK(txq);
1138 /* Spin for 100ms. */
1141 if (txq->flush_state != SFXGE_FLUSH_PENDING)
1143 } while (++count < 20);
1145 SFXGE_EVQ_LOCK(evq);
1146 SFXGE_TXQ_LOCK(txq);
1148 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1149 ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1151 txq->flush_state = SFXGE_FLUSH_DONE;
1154 txq->pending = txq->added;
1156 sfxge_tx_qcomplete(txq, evq);
1157 KASSERT(txq->completed == txq->added,
1158 ("txq->completed != txq->added"));
1160 sfxge_tx_qreap(txq);
1161 KASSERT(txq->reaped == txq->completed,
1162 ("txq->reaped != txq->completed"));
1169 /* Destroy the common code transmit queue. */
1170 efx_tx_qdestroy(txq->common);
1173 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1174 EFX_TXQ_NBUFS(sc->txq_entries));
1176 SFXGE_EVQ_UNLOCK(evq);
1177 SFXGE_TXQ_UNLOCK(txq);
1181 sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1183 struct sfxge_txq *txq;
1186 struct sfxge_evq *evq;
1189 txq = sc->txq[index];
1191 evq = sc->evq[txq->evq_index];
1193 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1194 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1195 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1196 ("evq->init_state != SFXGE_EVQ_STARTED"));
1198 /* Program the buffer table. */
1199 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1200 EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
1203 /* Determine the kind of queue we are creating. */
1204 switch (txq->type) {
1205 case SFXGE_TXQ_NON_CKSUM:
1208 case SFXGE_TXQ_IP_CKSUM:
1209 flags = EFX_CKSUM_IPV4;
1211 case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
1212 flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP;
1215 KASSERT(0, ("Impossible TX queue"));
1220 /* Create the common code transmit queue. */
1221 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
1222 sc->txq_entries, txq->buf_base_id, flags, evq->common,
1223 &txq->common)) != 0)
1226 SFXGE_TXQ_LOCK(txq);
1228 /* Enable the transmit queue. */
1229 efx_tx_qenable(txq->common);
1231 txq->init_state = SFXGE_TXQ_STARTED;
1233 SFXGE_TXQ_UNLOCK(txq);
1238 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1239 EFX_TXQ_NBUFS(sc->txq_entries));
1244 sfxge_tx_stop(struct sfxge_softc *sc)
1248 index = sc->txq_count;
1249 while (--index >= 0)
1250 sfxge_tx_qstop(sc, index);
1252 /* Tear down the transmit module */
1253 efx_tx_fini(sc->enp);
1257 sfxge_tx_start(struct sfxge_softc *sc)
1262 /* Initialize the common code transmit module. */
1263 if ((rc = efx_tx_init(sc->enp)) != 0)
1266 for (index = 0; index < sc->txq_count; index++) {
1267 if ((rc = sfxge_tx_qstart(sc, index)) != 0)
1274 while (--index >= 0)
1275 sfxge_tx_qstop(sc, index);
1277 efx_tx_fini(sc->enp);
1283 sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node)
1285 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev);
1286 struct sysctl_oid *stat_node;
1289 stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO,
1290 "stats", CTLFLAG_RD, NULL,
1291 "Tx queue statistics");
1292 if (stat_node == NULL)
1295 for (id = 0; id < nitems(sfxge_tx_stats); id++) {
1297 ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO,
1298 sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS,
1299 (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset),
1307 * Destroy a transmit queue.
1310 sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1312 struct sfxge_txq *txq;
1315 txq = sc->txq[index];
1317 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1318 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1320 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1323 /* Free the context arrays. */
1324 free(txq->pend_desc, M_SFXGE);
1325 nmaps = sc->txq_entries;
1326 while (nmaps-- != 0)
1327 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1328 free(txq->stmp, M_SFXGE);
1330 /* Release DMA memory mapping. */
1331 sfxge_dma_free(&txq->mem);
1333 sc->txq[index] = NULL;
1335 SFXGE_TXQ_LOCK_DESTROY(txq);
1341 sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
1342 enum sfxge_txq_type type, unsigned int evq_index)
1345 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1346 struct sysctl_oid *txq_node;
1347 struct sfxge_txq *txq;
1348 struct sfxge_evq *evq;
1349 struct sfxge_tx_dpl *stdp;
1350 struct sysctl_oid *dpl_node;
1355 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1357 txq->entries = sc->txq_entries;
1358 txq->ptr_mask = txq->entries - 1;
1360 sc->txq[txq_index] = txq;
1363 evq = sc->evq[evq_index];
1365 /* Allocate and zero DMA space for the descriptor ring. */
1366 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
1369 /* Allocate buffer table entries. */
1370 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
1373 /* Create a DMA tag for packet mappings. */
1374 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000,
1375 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
1376 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL,
1377 &txq->packet_dma_tag) != 0) {
1378 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1383 /* Allocate pending descriptor array for batching writes. */
1384 txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries,
1385 M_SFXGE, M_ZERO | M_WAITOK);
1387 /* Allocate and initialise mbuf DMA mapping array. */
1388 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
1389 M_SFXGE, M_ZERO | M_WAITOK);
1390 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
1391 rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1392 &txq->stmp[nmaps].map);
1397 snprintf(name, sizeof(name), "%u", txq_index);
1398 txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node),
1399 OID_AUTO, name, CTLFLAG_RD, NULL, "");
1400 if (txq_node == NULL) {
1405 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1406 (rc = tso_init(txq)) != 0)
1409 if (sfxge_tx_dpl_get_max <= 0) {
1410 log(LOG_ERR, "%s=%d must be greater than 0",
1411 SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max);
1413 goto fail_tx_dpl_get_max;
1415 if (sfxge_tx_dpl_get_non_tcp_max <= 0) {
1416 log(LOG_ERR, "%s=%d must be greater than 0",
1417 SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX,
1418 sfxge_tx_dpl_get_non_tcp_max);
1420 goto fail_tx_dpl_get_max;
1422 if (sfxge_tx_dpl_put_max < 0) {
1423 log(LOG_ERR, "%s=%d must be greater or equal to 0",
1424 SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max);
1426 goto fail_tx_dpl_put_max;
1429 /* Initialize the deferred packet list. */
1431 stdp->std_put_max = sfxge_tx_dpl_put_max;
1432 stdp->std_get_max = sfxge_tx_dpl_get_max;
1433 stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max;
1434 stdp->std_getp = &stdp->std_get;
1436 SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index);
1438 dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO,
1439 "dpl", CTLFLAG_RD, NULL,
1440 "Deferred packet list statistics");
1441 if (dpl_node == NULL) {
1446 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1447 "get_count", CTLFLAG_RD | CTLFLAG_STATS,
1448 &stdp->std_get_count, 0, "");
1449 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1450 "get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS,
1451 &stdp->std_get_non_tcp_count, 0, "");
1452 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1453 "get_hiwat", CTLFLAG_RD | CTLFLAG_STATS,
1454 &stdp->std_get_hiwat, 0, "");
1455 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1456 "put_hiwat", CTLFLAG_RD | CTLFLAG_STATS,
1457 &stdp->std_put_hiwat, 0, "");
1459 rc = sfxge_txq_stat_init(txq, txq_node);
1461 goto fail_txq_stat_init;
1464 txq->evq_index = evq_index;
1465 txq->txq_index = txq_index;
1466 txq->init_state = SFXGE_TXQ_INITIALIZED;
1472 fail_tx_dpl_put_max:
1473 fail_tx_dpl_get_max:
1476 free(txq->pend_desc, M_SFXGE);
1478 while (nmaps-- != 0)
1479 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1480 free(txq->stmp, M_SFXGE);
1481 bus_dma_tag_destroy(txq->packet_dma_tag);
1484 sfxge_dma_free(esmp);
1490 sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
1492 struct sfxge_softc *sc = arg1;
1493 unsigned int id = arg2;
1497 /* Sum across all TX queues */
1499 for (index = 0; index < sc->txq_count; index++)
1500 sum += *(unsigned long *)((caddr_t)sc->txq[index] +
1501 sfxge_tx_stats[id].offset);
1503 return (SYSCTL_OUT(req, &sum, sizeof(sum)));
1507 sfxge_tx_stat_init(struct sfxge_softc *sc)
1509 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1510 struct sysctl_oid_list *stat_list;
1513 stat_list = SYSCTL_CHILDREN(sc->stats_node);
1515 for (id = 0; id < nitems(sfxge_tx_stats); id++) {
1518 OID_AUTO, sfxge_tx_stats[id].name,
1519 CTLTYPE_ULONG|CTLFLAG_RD,
1520 sc, id, sfxge_tx_stat_handler, "LU",
1526 sfxge_tx_get_drops(struct sfxge_softc *sc)
1530 struct sfxge_txq *txq;
1532 /* Sum across all TX queues */
1533 for (index = 0; index < sc->txq_count; index++) {
1534 txq = sc->txq[index];
1536 * In theory, txq->put_overflow and txq->netdown_drops
1537 * should use atomic operation and other should be
1538 * obtained under txq lock, but it is just statistics.
1540 drops += txq->drops + txq->get_overflow +
1541 txq->get_non_tcp_overflow +
1542 txq->put_overflow + txq->netdown_drops +
1543 txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc;
1549 sfxge_tx_fini(struct sfxge_softc *sc)
1553 index = sc->txq_count;
1554 while (--index >= 0)
1555 sfxge_tx_qfini(sc, index);
1562 sfxge_tx_init(struct sfxge_softc *sc)
1564 struct sfxge_intr *intr;
1570 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
1571 ("intr->state != SFXGE_INTR_INITIALIZED"));
1573 sc->txq_count = SFXGE_TXQ_NTYPES - 1 + sc->intr.n_alloc;
1575 sc->txqs_node = SYSCTL_ADD_NODE(
1576 device_get_sysctl_ctx(sc->dev),
1577 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
1578 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues");
1579 if (sc->txqs_node == NULL) {
1584 /* Initialize the transmit queues */
1585 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
1586 SFXGE_TXQ_NON_CKSUM, 0)) != 0)
1589 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
1590 SFXGE_TXQ_IP_CKSUM, 0)) != 0)
1594 index < sc->txq_count - SFXGE_TXQ_NTYPES + 1;
1596 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NTYPES - 1 + index,
1597 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
1601 sfxge_tx_stat_init(sc);
1606 while (--index >= 0)
1607 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1609 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1612 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);