2 * Copyright (c) 2010-2015 Solarflare Communications Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * The views and conclusions contained in the software and documentation are
30 * those of the authors and should not be interpreted as representing official
31 * policies, either expressed or implied, of the FreeBSD Project.
34 /* Theory of operation:
36 * Tx queues allocation and mapping
38 * One Tx queue with enabled checksum offload is allocated per Rx channel
39 * (event queue). Also 2 Tx queues (one without checksum offload and one
40 * with IP checksum offload only) are allocated and bound to event queue 0.
41 * sfxge_txq_type is used as Tx queue label.
43 * So, event queue plus label mapping to Tx queue index is:
44 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
45 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
46 * See sfxge_get_txq_by_label() sfxge_ev.c
49 #include <sys/cdefs.h>
50 __FBSDID("$FreeBSD$");
52 #include <sys/types.h>
55 #include <sys/socket.h>
56 #include <sys/sysctl.h>
57 #include <sys/syslog.h>
60 #include <net/ethernet.h>
62 #include <net/if_vlan_var.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip6.h>
67 #include <netinet/tcp.h>
69 #include "common/efx.h"
75 #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max)
76 static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT;
77 TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max);
78 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN,
79 &sfxge_tx_dpl_get_max, 0,
80 "Maximum number of any packets in deferred packet get-list");
82 #define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \
83 SFXGE_PARAM(tx_dpl_get_non_tcp_max)
84 static int sfxge_tx_dpl_get_non_tcp_max =
85 SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT;
86 TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max);
87 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN,
88 &sfxge_tx_dpl_get_non_tcp_max, 0,
89 "Maximum number of non-TCP packets in deferred packet get-list");
91 #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max)
92 static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT;
93 TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max);
94 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN,
95 &sfxge_tx_dpl_put_max, 0,
96 "Maximum number of any packets in deferred packet put-list");
98 #define SFXGE_PARAM_TSO_FW_ASSISTED SFXGE_PARAM(tso_fw_assisted)
99 static int sfxge_tso_fw_assisted = 1;
100 TUNABLE_INT(SFXGE_PARAM_TSO_FW_ASSISTED, &sfxge_tso_fw_assisted);
101 SYSCTL_INT(_hw_sfxge, OID_AUTO, tso_fw_assisted, CTLFLAG_RDTUN,
102 &sfxge_tso_fw_assisted, 0,
103 "Use FW-assisted TSO if supported by NIC firmware");
106 static const struct {
109 } sfxge_tx_stats[] = {
110 #define SFXGE_TX_STAT(name, member) \
111 { #name, offsetof(struct sfxge_txq, member) }
112 SFXGE_TX_STAT(tso_bursts, tso_bursts),
113 SFXGE_TX_STAT(tso_packets, tso_packets),
114 SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
115 SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many),
116 SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc),
117 SFXGE_TX_STAT(tx_collapses, collapses),
118 SFXGE_TX_STAT(tx_drops, drops),
119 SFXGE_TX_STAT(tx_get_overflow, get_overflow),
120 SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow),
121 SFXGE_TX_STAT(tx_put_overflow, put_overflow),
122 SFXGE_TX_STAT(tx_netdown_drops, netdown_drops),
126 /* Forward declarations. */
127 static void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
128 static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
129 static void sfxge_tx_qunblock(struct sfxge_txq *txq);
130 static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
131 const bus_dma_segment_t *dma_seg, int n_dma_seg,
135 sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf)
137 uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ?
138 mbuf->m_pkthdr.ether_vtag :
141 if (this_tag == txq->hw_vlan_tci)
144 efx_tx_qdesc_vlantci_create(txq->common,
147 txq->n_pend_desc = 1;
148 txq->hw_vlan_tci = this_tag;
153 sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp)
155 KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0"));
156 if (__predict_false(*pstmp ==
157 &txq->stmp[txq->ptr_mask]))
158 *pstmp = &txq->stmp[0];
165 sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq)
167 unsigned int completed;
169 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
171 completed = txq->completed;
172 while (completed != txq->pending) {
173 struct sfxge_tx_mapping *stmp;
176 id = completed++ & txq->ptr_mask;
178 stmp = &txq->stmp[id];
179 if (stmp->flags & TX_BUF_UNMAP) {
180 bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
181 if (stmp->flags & TX_BUF_MBUF) {
182 struct mbuf *m = stmp->u.mbuf;
187 free(stmp->u.heap_buf, M_SFXGE);
192 txq->completed = completed;
194 /* Check whether we need to unblock the queue. */
199 level = txq->added - txq->completed;
200 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
201 sfxge_tx_qunblock(txq);
206 sfxge_is_mbuf_non_tcp(struct mbuf *mbuf)
208 /* Absense of TCP checksum flags does not mean that it is non-TCP
209 * but it should be true if user wants to achieve high throughput.
211 return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)));
215 * Reorder the put list and append it to the get list.
218 sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
220 struct sfxge_tx_dpl *stdp;
221 struct mbuf *mbuf, *get_next, **get_tailp;
222 volatile uintptr_t *putp;
225 unsigned int non_tcp_count;
227 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
231 /* Acquire the put list. */
232 putp = &stdp->std_put;
233 put = atomic_readandclear_ptr(putp);
239 /* Reverse the put list. */
240 get_tailp = &mbuf->m_nextpkt;
246 struct mbuf *put_next;
248 non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf);
249 put_next = mbuf->m_nextpkt;
250 mbuf->m_nextpkt = get_next;
255 } while (mbuf != NULL);
257 if (count > stdp->std_put_hiwat)
258 stdp->std_put_hiwat = count;
260 /* Append the reversed put list to the get list. */
261 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
262 *stdp->std_getp = get_next;
263 stdp->std_getp = get_tailp;
264 stdp->std_get_count += count;
265 stdp->std_get_non_tcp_count += non_tcp_count;
269 sfxge_tx_qreap(struct sfxge_txq *txq)
271 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
273 txq->reaped = txq->completed;
277 sfxge_tx_qlist_post(struct sfxge_txq *txq)
279 unsigned int old_added;
280 unsigned int block_level;
284 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
286 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
287 KASSERT(txq->n_pend_desc <= txq->max_pkt_desc,
288 ("txq->n_pend_desc too large"));
289 KASSERT(!txq->blocked, ("txq->blocked"));
291 old_added = txq->added;
293 /* Post the fragment list. */
294 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc,
295 txq->reaped, &txq->added);
296 KASSERT(rc == 0, ("efx_tx_qdesc_post() failed"));
298 /* If efx_tx_qdesc_post() had to refragment, our information about
299 * buffers to free may be associated with the wrong
302 KASSERT(txq->added - old_added == txq->n_pend_desc,
303 ("efx_tx_qdesc_post() refragmented descriptors"));
305 level = txq->added - txq->reaped;
306 KASSERT(level <= txq->entries, ("overfilled TX queue"));
308 /* Clear the fragment list. */
309 txq->n_pend_desc = 0;
312 * Set the block level to ensure there is space to generate a
313 * large number of descriptors for TSO.
315 block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc;
317 /* Have we reached the block level? */
318 if (level < block_level)
321 /* Reap, and check again */
323 level = txq->added - txq->reaped;
324 if (level < block_level)
330 * Avoid a race with completion interrupt handling that could leave
335 level = txq->added - txq->reaped;
336 if (level < block_level) {
342 static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
344 bus_dmamap_t *used_map;
346 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG];
348 struct sfxge_tx_mapping *stmp;
356 KASSERT(!txq->blocked, ("txq->blocked"));
358 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
359 prefetch_read_many(mbuf->m_data);
361 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) {
366 /* Load the packet for DMA. */
367 id = txq->added & txq->ptr_mask;
368 stmp = &txq->stmp[id];
369 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
370 mbuf, dma_seg, &n_dma_seg, 0);
373 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
374 SFXGE_TX_MAPPING_MAX_SEG);
375 if (new_mbuf == NULL)
379 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
381 dma_seg, &n_dma_seg, 0);
386 /* Make the packet visible to the hardware. */
387 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
389 used_map = &stmp->map;
391 vlan_tagged = sfxge_tx_maybe_insert_tag(txq, mbuf);
393 sfxge_next_stmp(txq, &stmp);
395 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
396 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg, vlan_tagged);
399 stmp = &txq->stmp[(rc - 1) & txq->ptr_mask];
401 /* Add the mapping to the fragment list, and set flags
407 desc = &txq->pend_desc[i + vlan_tagged];
408 eop = (i == n_dma_seg - 1);
409 efx_tx_qdesc_dma_create(txq->common,
417 sfxge_next_stmp(txq, &stmp);
419 txq->n_pend_desc = n_dma_seg + vlan_tagged;
423 * If the mapping required more than one descriptor
424 * then we need to associate the DMA map with the last
425 * descriptor, not the first.
427 if (used_map != &stmp->map) {
429 stmp->map = *used_map;
434 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF;
436 /* Post the fragment list. */
437 sfxge_tx_qlist_post(txq);
442 bus_dmamap_unload(txq->packet_dma_tag, *used_map);
444 /* Drop the packet on the floor. */
452 * Drain the deferred packet list into the transmit queue.
455 sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
457 struct sfxge_softc *sc;
458 struct sfxge_tx_dpl *stdp;
459 struct mbuf *mbuf, *next;
461 unsigned int non_tcp_count;
465 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
471 if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) {
472 prefetch_read_many(sc->enp);
473 prefetch_read_many(txq->common);
476 mbuf = stdp->std_get;
477 count = stdp->std_get_count;
478 non_tcp_count = stdp->std_get_non_tcp_count;
480 if (count > stdp->std_get_hiwat)
481 stdp->std_get_hiwat = count;
484 KASSERT(mbuf != NULL, ("mbuf == NULL"));
486 next = mbuf->m_nextpkt;
487 mbuf->m_nextpkt = NULL;
489 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */
492 prefetch_read_many(next);
494 rc = sfxge_tx_queue_mbuf(txq, mbuf);
496 non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf);
504 /* Push the fragments to the hardware in batches. */
505 if (txq->added - pushed >= SFXGE_TX_BATCH) {
506 efx_tx_qpush(txq->common, txq->added, pushed);
512 KASSERT(mbuf == NULL, ("mbuf != NULL"));
513 KASSERT(non_tcp_count == 0,
514 ("inconsistent TCP/non-TCP detection"));
515 stdp->std_get = NULL;
516 stdp->std_get_count = 0;
517 stdp->std_get_non_tcp_count = 0;
518 stdp->std_getp = &stdp->std_get;
520 stdp->std_get = mbuf;
521 stdp->std_get_count = count;
522 stdp->std_get_non_tcp_count = non_tcp_count;
525 if (txq->added != pushed)
526 efx_tx_qpush(txq->common, txq->added, pushed);
528 KASSERT(txq->blocked || stdp->std_get_count == 0,
529 ("queue unblocked but count is non-zero"));
532 #define SFXGE_TX_QDPL_PENDING(_txq) ((_txq)->dpl.std_put != 0)
535 * Service the deferred packet list.
537 * NOTE: drops the txq mutex!
540 sfxge_tx_qdpl_service(struct sfxge_txq *txq)
542 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
545 if (SFXGE_TX_QDPL_PENDING(txq))
546 sfxge_tx_qdpl_swizzle(txq);
549 sfxge_tx_qdpl_drain(txq);
551 SFXGE_TXQ_UNLOCK(txq);
552 } while (SFXGE_TX_QDPL_PENDING(txq) &&
553 SFXGE_TXQ_TRYLOCK(txq));
557 * Put a packet on the deferred packet get-list.
560 sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf)
562 struct sfxge_tx_dpl *stdp;
566 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
568 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
570 if (stdp->std_get_count >= stdp->std_get_max) {
574 if (sfxge_is_mbuf_non_tcp(mbuf)) {
575 if (stdp->std_get_non_tcp_count >=
576 stdp->std_get_non_tcp_max) {
577 txq->get_non_tcp_overflow++;
580 stdp->std_get_non_tcp_count++;
583 *(stdp->std_getp) = mbuf;
584 stdp->std_getp = &mbuf->m_nextpkt;
585 stdp->std_get_count++;
591 * Put a packet on the deferred packet put-list.
593 * We overload the csum_data field in the mbuf to keep track of this length
594 * because there is no cheap alternative to avoid races.
597 sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf)
599 struct sfxge_tx_dpl *stdp;
600 volatile uintptr_t *putp;
605 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
607 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq);
610 putp = &stdp->std_put;
611 new = (uintptr_t)mbuf;
616 struct mbuf *mp = (struct mbuf *)old;
617 old_len = mp->m_pkthdr.csum_data;
620 if (old_len >= stdp->std_put_max) {
621 atomic_add_long(&txq->put_overflow, 1);
624 mbuf->m_pkthdr.csum_data = old_len + 1;
625 mbuf->m_nextpkt = (void *)old;
626 } while (atomic_cmpset_ptr(putp, old, new) == 0);
632 * Called from if_transmit - will try to grab the txq lock and enqueue to the
633 * put list if it succeeds, otherwise try to push onto the defer list if space.
636 sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
640 if (!SFXGE_LINK_UP(txq->sc)) {
641 atomic_add_long(&txq->netdown_drops, 1);
646 * Try to grab the txq lock. If we are able to get the lock,
647 * the packet will be appended to the "get list" of the deferred
648 * packet list. Otherwise, it will be pushed on the "put list".
650 if (SFXGE_TXQ_TRYLOCK(txq)) {
651 /* First swizzle put-list to get-list to keep order */
652 sfxge_tx_qdpl_swizzle(txq);
654 rc = sfxge_tx_qdpl_put_locked(txq, m);
656 /* Try to service the list. */
657 sfxge_tx_qdpl_service(txq);
658 /* Lock has been dropped. */
660 rc = sfxge_tx_qdpl_put_unlocked(txq, m);
663 * Try to grab the lock again.
665 * If we are able to get the lock, we need to process
666 * the deferred packet list. If we are not able to get
667 * the lock, another thread is processing the list.
669 if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) {
670 sfxge_tx_qdpl_service(txq);
671 /* Lock has been dropped. */
675 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq);
681 sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
683 struct sfxge_tx_dpl *stdp = &txq->dpl;
684 struct mbuf *mbuf, *next;
688 sfxge_tx_qdpl_swizzle(txq);
689 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
690 next = mbuf->m_nextpkt;
693 stdp->std_get = NULL;
694 stdp->std_get_count = 0;
695 stdp->std_get_non_tcp_count = 0;
696 stdp->std_getp = &stdp->std_get;
698 SFXGE_TXQ_UNLOCK(txq);
702 sfxge_if_qflush(struct ifnet *ifp)
704 struct sfxge_softc *sc;
709 for (i = 0; i < sc->txq_count; i++)
710 sfxge_tx_qdpl_flush(sc->txq[i]);
714 * TX start -- called by the stack.
717 sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
719 struct sfxge_softc *sc;
720 struct sfxge_txq *txq;
723 sc = (struct sfxge_softc *)ifp->if_softc;
726 * Transmit may be called when interface is up from the kernel
727 * point of view, but not yet up (in progress) from the driver
728 * point of view. I.e. link aggregation bring up.
729 * Transmit may be called when interface is up from the driver
730 * point of view, but already down from the kernel point of
731 * view. I.e. Rx when interface shutdown is in progress.
733 KASSERT((ifp->if_flags & IFF_UP) || (sc->if_flags & IFF_UP),
734 ("interface not up"));
736 /* Pick the desired transmit queue. */
737 if (m->m_pkthdr.csum_flags &
738 (CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO)) {
741 /* check if flowid is set */
742 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
743 uint32_t hash = m->m_pkthdr.flowid;
745 index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX];
747 txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index];
748 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
749 txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
751 txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
754 rc = sfxge_tx_packet_add(txq, m);
762 * Software "TSO". Not quite as good as doing it in hardware, but
763 * still faster than segmenting in the stack.
766 struct sfxge_tso_state {
767 /* Output position */
768 unsigned out_len; /* Remaining length in current segment */
769 unsigned seqnum; /* Current sequence number */
770 unsigned packet_space; /* Remaining space in current packet */
773 uint64_t dma_addr; /* DMA address of current position */
774 unsigned in_len; /* Remaining length in current mbuf */
776 const struct mbuf *mbuf; /* Input mbuf (head of chain) */
777 u_short protocol; /* Network protocol (after VLAN decap) */
778 ssize_t nh_off; /* Offset of network header */
779 ssize_t tcph_off; /* Offset of TCP header */
780 unsigned header_len; /* Number of bytes of header */
781 unsigned seg_size; /* TCP segment size */
782 int fw_assisted; /* Use FW-assisted TSO */
783 u_short packet_id; /* IPv4 packet ID from the original packet */
784 efx_desc_t header_desc; /* Precomputed header descriptor for
788 static const struct ip *tso_iph(const struct sfxge_tso_state *tso)
790 KASSERT(tso->protocol == htons(ETHERTYPE_IP),
791 ("tso_iph() in non-IPv4 state"));
792 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off);
794 static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso)
796 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
797 ("tso_ip6h() in non-IPv6 state"));
798 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off);
800 static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
802 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off);
805 /* Size of preallocated TSO header buffers. Larger blocks must be
806 * allocated from the heap.
808 #define TSOH_STD_SIZE 128
810 /* At most half the descriptors in the queue at any time will refer to
811 * a TSO header buffer, since they must always be followed by a
812 * payload descriptor referring to an mbuf.
814 #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u)
815 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
816 #define TSOH_PAGE_COUNT(_txq_entries) \
817 ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
819 static int tso_init(struct sfxge_txq *txq)
821 struct sfxge_softc *sc = txq->sc;
822 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
825 /* Allocate TSO header buffers */
826 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
829 for (i = 0; i < tsoh_page_count; i++) {
830 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
839 sfxge_dma_free(&txq->tsoh_buffer[i]);
840 free(txq->tsoh_buffer, M_SFXGE);
841 txq->tsoh_buffer = NULL;
845 static void tso_fini(struct sfxge_txq *txq)
849 if (txq->tsoh_buffer != NULL) {
850 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
851 sfxge_dma_free(&txq->tsoh_buffer[i]);
852 free(txq->tsoh_buffer, M_SFXGE);
856 static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso,
857 const bus_dma_segment_t *hdr_dma_seg,
860 struct ether_header *eh = mtod(mbuf, struct ether_header *);
861 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp);
862 const struct tcphdr *th;
863 struct tcphdr th_copy;
865 tso->fw_assisted = txq->sc->tso_fw_assisted;
868 /* Find network protocol and header */
869 tso->protocol = eh->ether_type;
870 if (tso->protocol == htons(ETHERTYPE_VLAN)) {
871 struct ether_vlan_header *veh =
872 mtod(mbuf, struct ether_vlan_header *);
873 tso->protocol = veh->evl_proto;
874 tso->nh_off = sizeof(*veh);
876 tso->nh_off = sizeof(*eh);
879 /* Find TCP header */
880 if (tso->protocol == htons(ETHERTYPE_IP)) {
881 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP,
882 ("TSO required on non-TCP packet"));
883 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl;
884 tso->packet_id = tso_iph(tso)->ip_id;
886 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
887 ("TSO required on non-IP packet"));
888 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP,
889 ("TSO required on non-TCP packet"));
890 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr);
893 if (tso->fw_assisted &&
894 __predict_false(tso->tcph_off >
895 encp->enc_tx_tso_tcp_header_offset_limit)) {
896 tso->fw_assisted = 0;
899 KASSERT(mbuf->m_len >= tso->tcph_off,
900 ("network header is fragmented in mbuf"));
901 /* We need TCP header including flags (window is the next) */
902 if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) {
903 m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy),
910 tso->header_len = tso->tcph_off + 4 * th->th_off;
911 tso->seg_size = mbuf->m_pkthdr.tso_segsz;
913 tso->seqnum = ntohl(th->th_seq);
915 /* These flags must not be duplicated */
917 * RST should not be duplicated as well, but FreeBSD kernel
918 * generates TSO packets with RST flag. So, do not assert
921 KASSERT(!(th->th_flags & (TH_URG | TH_SYN)),
922 ("incompatible TCP flag 0x%x on TSO packet",
923 th->th_flags & (TH_URG | TH_SYN)));
925 tso->out_len = mbuf->m_pkthdr.len - tso->header_len;
927 if (tso->fw_assisted) {
928 if (hdr_dma_seg->ds_len >= tso->header_len)
929 efx_tx_qdesc_dma_create(txq->common,
930 hdr_dma_seg->ds_addr,
935 tso->fw_assisted = 0;
940 * tso_fill_packet_with_fragment - form descriptors for the current fragment
942 * Form descriptors for the current fragment, until we reach the end
943 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
946 static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
947 struct sfxge_tso_state *tso)
952 if (tso->in_len == 0 || tso->packet_space == 0)
955 KASSERT(tso->in_len > 0, ("TSO input length went negative"));
956 KASSERT(tso->packet_space > 0, ("TSO packet space went negative"));
958 n = min(tso->in_len, tso->packet_space);
960 tso->packet_space -= n;
964 desc = &txq->pend_desc[txq->n_pend_desc++];
965 efx_tx_qdesc_dma_create(txq->common,
968 tso->out_len == 0 || tso->packet_space == 0,
974 /* Callback from bus_dmamap_load() for long TSO headers. */
975 static void tso_map_long_header(void *dma_addr_ret,
976 bus_dma_segment_t *segs, int nseg,
979 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) &&
980 __predict_true(nseg == 1)) ?
985 * tso_start_new_packet - generate a new header and prepare for the new packet
987 * Generate a new header and prepare for the new packet. Return 0 on
988 * success, or an error code if failed to alloc header.
990 static int tso_start_new_packet(struct sfxge_txq *txq,
991 struct sfxge_tso_state *tso,
994 unsigned int id = *idp;
995 struct tcphdr *tsoh_th;
1003 if (tso->fw_assisted) {
1004 uint8_t tcp_flags = tso_tcph(tso)->th_flags;
1006 if (tso->out_len > tso->seg_size)
1007 tcp_flags &= ~(TH_FIN | TH_PUSH);
1009 /* TSO option descriptor */
1010 desc = &txq->pend_desc[txq->n_pend_desc++];
1011 efx_tx_qdesc_tso_create(txq->common,
1016 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1017 id = (id + 1) & txq->ptr_mask;
1019 /* Header DMA descriptor */
1020 *desc = tso->header_desc;
1022 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1023 id = (id + 1) & txq->ptr_mask;
1025 tso->seqnum += tso->seg_size;
1027 /* Allocate a DMA-mapped header buffer. */
1028 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) {
1029 unsigned int page_index = (id / 2) / TSOH_PER_PAGE;
1030 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE;
1032 header = (txq->tsoh_buffer[page_index].esm_base +
1033 buf_index * TSOH_STD_SIZE);
1034 dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
1035 buf_index * TSOH_STD_SIZE);
1036 map = txq->tsoh_buffer[page_index].esm_map;
1038 KASSERT(txq->stmp[id].flags == 0,
1039 ("stmp flags are not 0"));
1041 struct sfxge_tx_mapping *stmp = &txq->stmp[id];
1043 /* We cannot use bus_dmamem_alloc() as that may sleep */
1044 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
1045 if (__predict_false(!header))
1047 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
1048 header, tso->header_len,
1049 tso_map_long_header, &dma_addr,
1051 if (__predict_false(dma_addr == 0)) {
1053 /* Succeeded but got >1 segment */
1054 bus_dmamap_unload(txq->packet_dma_tag,
1058 free(header, M_SFXGE);
1063 txq->tso_long_headers++;
1064 stmp->u.heap_buf = header;
1065 stmp->flags = TX_BUF_UNMAP;
1068 tsoh_th = (struct tcphdr *)(header + tso->tcph_off);
1070 /* Copy and update the headers. */
1071 m_copydata(tso->mbuf, 0, tso->header_len, header);
1073 tsoh_th->th_seq = htonl(tso->seqnum);
1074 tso->seqnum += tso->seg_size;
1075 if (tso->out_len > tso->seg_size) {
1076 /* This packet will not finish the TSO burst. */
1077 ip_length = tso->header_len - tso->nh_off + tso->seg_size;
1078 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
1080 /* This packet will be the last in the TSO burst. */
1081 ip_length = tso->header_len - tso->nh_off + tso->out_len;
1084 if (tso->protocol == htons(ETHERTYPE_IP)) {
1085 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off);
1086 tsoh_iph->ip_len = htons(ip_length);
1087 /* XXX We should increment ip_id, but FreeBSD doesn't
1088 * currently allocate extra IDs for multiple segments.
1091 struct ip6_hdr *tsoh_iph =
1092 (struct ip6_hdr *)(header + tso->nh_off);
1093 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph));
1096 /* Make the header visible to the hardware. */
1097 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
1099 /* Form a descriptor for this header. */
1100 desc = &txq->pend_desc[txq->n_pend_desc++];
1101 efx_tx_qdesc_dma_create(txq->common,
1106 id = (id + 1) & txq->ptr_mask;
1108 tso->packet_space = tso->seg_size;
1116 sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
1117 const bus_dma_segment_t *dma_seg, int n_dma_seg,
1120 struct sfxge_tso_state tso;
1122 unsigned skipped = 0;
1124 tso_start(txq, &tso, dma_seg, mbuf);
1126 while (dma_seg->ds_len + skipped <= tso.header_len) {
1127 skipped += dma_seg->ds_len;
1129 KASSERT(n_dma_seg, ("no payload found in TSO packet"));
1132 tso.in_len = dma_seg->ds_len - (tso.header_len - skipped);
1133 tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped);
1135 id = (txq->added + vlan_tagged) & txq->ptr_mask;
1136 if (__predict_false(tso_start_new_packet(txq, &tso, &id)))
1140 tso_fill_packet_with_fragment(txq, &tso);
1141 /* Exactly one DMA descriptor is added */
1142 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1143 id = (id + 1) & txq->ptr_mask;
1145 /* Move onto the next fragment? */
1146 if (tso.in_len == 0) {
1151 tso.in_len = dma_seg->ds_len;
1152 tso.dma_addr = dma_seg->ds_addr;
1155 /* End of packet? */
1156 if (tso.packet_space == 0) {
1157 /* If the queue is now full due to tiny MSS,
1158 * or we can't create another header, discard
1159 * the remainder of the input mbuf but do not
1160 * roll back the work we have done.
1162 if (txq->n_pend_desc + tso.fw_assisted +
1163 1 /* header */ + n_dma_seg >
1164 txq->max_pkt_desc) {
1165 txq->tso_pdrop_too_many++;
1168 if (__predict_false(tso_start_new_packet(txq, &tso,
1170 txq->tso_pdrop_no_rsrc++;
1181 sfxge_tx_qunblock(struct sfxge_txq *txq)
1183 struct sfxge_softc *sc;
1184 struct sfxge_evq *evq;
1187 evq = sc->evq[txq->evq_index];
1189 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
1191 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED))
1194 SFXGE_TXQ_LOCK(txq);
1199 level = txq->added - txq->completed;
1200 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) {
1201 /* reaped must be in sync with blocked */
1202 sfxge_tx_qreap(txq);
1207 sfxge_tx_qdpl_service(txq);
1208 /* note: lock has been dropped */
1212 sfxge_tx_qflush_done(struct sfxge_txq *txq)
1215 txq->flush_state = SFXGE_FLUSH_DONE;
1219 sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
1221 struct sfxge_txq *txq;
1222 struct sfxge_evq *evq;
1225 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
1227 txq = sc->txq[index];
1228 evq = sc->evq[txq->evq_index];
1230 SFXGE_EVQ_LOCK(evq);
1231 SFXGE_TXQ_LOCK(txq);
1233 KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1234 ("txq->init_state != SFXGE_TXQ_STARTED"));
1236 txq->init_state = SFXGE_TXQ_INITIALIZED;
1238 if (txq->flush_state != SFXGE_FLUSH_DONE) {
1239 txq->flush_state = SFXGE_FLUSH_PENDING;
1241 SFXGE_EVQ_UNLOCK(evq);
1242 SFXGE_TXQ_UNLOCK(txq);
1244 /* Flush the transmit queue. */
1245 if (efx_tx_qflush(txq->common) != 0) {
1246 log(LOG_ERR, "%s: Flushing Tx queue %u failed\n",
1247 device_get_nameunit(sc->dev), index);
1248 txq->flush_state = SFXGE_FLUSH_DONE;
1252 /* Spin for 100ms. */
1254 if (txq->flush_state != SFXGE_FLUSH_PENDING)
1256 } while (++count < 20);
1258 SFXGE_EVQ_LOCK(evq);
1259 SFXGE_TXQ_LOCK(txq);
1261 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1262 ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1264 if (txq->flush_state != SFXGE_FLUSH_DONE) {
1266 log(LOG_ERR, "%s: Cannot flush Tx queue %u\n",
1267 device_get_nameunit(sc->dev), index);
1268 txq->flush_state = SFXGE_FLUSH_DONE;
1273 txq->pending = txq->added;
1275 sfxge_tx_qcomplete(txq, evq);
1276 KASSERT(txq->completed == txq->added,
1277 ("txq->completed != txq->added"));
1279 sfxge_tx_qreap(txq);
1280 KASSERT(txq->reaped == txq->completed,
1281 ("txq->reaped != txq->completed"));
1288 /* Destroy the common code transmit queue. */
1289 efx_tx_qdestroy(txq->common);
1292 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1293 EFX_TXQ_NBUFS(sc->txq_entries));
1295 SFXGE_EVQ_UNLOCK(evq);
1296 SFXGE_TXQ_UNLOCK(txq);
1300 sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1302 struct sfxge_txq *txq;
1305 struct sfxge_evq *evq;
1306 unsigned int desc_index;
1309 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
1311 txq = sc->txq[index];
1313 evq = sc->evq[txq->evq_index];
1315 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1316 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1317 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1318 ("evq->init_state != SFXGE_EVQ_STARTED"));
1320 /* Program the buffer table. */
1321 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1322 EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
1325 /* Determine the kind of queue we are creating. */
1326 switch (txq->type) {
1327 case SFXGE_TXQ_NON_CKSUM:
1330 case SFXGE_TXQ_IP_CKSUM:
1331 flags = EFX_CKSUM_IPV4;
1333 case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
1334 flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP;
1337 KASSERT(0, ("Impossible TX queue"));
1342 /* Create the common code transmit queue. */
1343 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
1344 sc->txq_entries, txq->buf_base_id, flags, evq->common,
1345 &txq->common, &desc_index)) != 0)
1348 /* Initialise queue descriptor indexes */
1349 txq->added = txq->pending = txq->completed = txq->reaped = desc_index;
1351 SFXGE_TXQ_LOCK(txq);
1353 /* Enable the transmit queue. */
1354 efx_tx_qenable(txq->common);
1356 txq->init_state = SFXGE_TXQ_STARTED;
1357 txq->flush_state = SFXGE_FLUSH_REQUIRED;
1359 SFXGE_TXQ_UNLOCK(txq);
1364 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1365 EFX_TXQ_NBUFS(sc->txq_entries));
1370 sfxge_tx_stop(struct sfxge_softc *sc)
1374 index = sc->txq_count;
1375 while (--index >= 0)
1376 sfxge_tx_qstop(sc, index);
1378 /* Tear down the transmit module */
1379 efx_tx_fini(sc->enp);
1383 sfxge_tx_start(struct sfxge_softc *sc)
1388 /* Initialize the common code transmit module. */
1389 if ((rc = efx_tx_init(sc->enp)) != 0)
1392 for (index = 0; index < sc->txq_count; index++) {
1393 if ((rc = sfxge_tx_qstart(sc, index)) != 0)
1400 while (--index >= 0)
1401 sfxge_tx_qstop(sc, index);
1403 efx_tx_fini(sc->enp);
1409 sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node)
1411 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev);
1412 struct sysctl_oid *stat_node;
1415 stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO,
1416 "stats", CTLFLAG_RD, NULL,
1417 "Tx queue statistics");
1418 if (stat_node == NULL)
1421 for (id = 0; id < nitems(sfxge_tx_stats); id++) {
1423 ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO,
1424 sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS,
1425 (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset),
1433 * Destroy a transmit queue.
1436 sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1438 struct sfxge_txq *txq;
1441 txq = sc->txq[index];
1443 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1444 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1446 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1449 /* Free the context arrays. */
1450 free(txq->pend_desc, M_SFXGE);
1451 nmaps = sc->txq_entries;
1452 while (nmaps-- != 0)
1453 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1454 free(txq->stmp, M_SFXGE);
1456 /* Release DMA memory mapping. */
1457 sfxge_dma_free(&txq->mem);
1459 sc->txq[index] = NULL;
1461 SFXGE_TXQ_LOCK_DESTROY(txq);
1467 * Estimate maximum number of Tx descriptors required for TSO packet.
1468 * With minimum MSS and maximum mbuf length we might need more (even
1469 * than a ring-ful of descriptors), but this should not happen in
1470 * practice except due to deliberate attack. In that case we will
1471 * truncate the output at a packet boundary.
1474 sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type)
1476 /* One descriptor for every input fragment */
1477 unsigned int max_descs = SFXGE_TX_MAPPING_MAX_SEG;
1479 /* VLAN tagging Tx option descriptor may be required */
1480 if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled)
1483 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) {
1485 * Plus header and payload descriptor for each output segment.
1486 * Minus one since header fragment is already counted.
1488 max_descs += SFXGE_TSO_MAX_SEGS * 2 - 1;
1490 /* FW assisted TSO requires one more descriptor per segment */
1491 if (sc->tso_fw_assisted)
1492 max_descs += SFXGE_TSO_MAX_SEGS;
1499 sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
1500 enum sfxge_txq_type type, unsigned int evq_index)
1503 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1504 struct sysctl_oid *txq_node;
1505 struct sfxge_txq *txq;
1506 struct sfxge_evq *evq;
1507 struct sfxge_tx_dpl *stdp;
1508 struct sysctl_oid *dpl_node;
1513 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1515 txq->entries = sc->txq_entries;
1516 txq->ptr_mask = txq->entries - 1;
1518 sc->txq[txq_index] = txq;
1521 evq = sc->evq[evq_index];
1523 /* Allocate and zero DMA space for the descriptor ring. */
1524 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
1527 /* Allocate buffer table entries. */
1528 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
1531 /* Create a DMA tag for packet mappings. */
1532 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000,
1533 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
1534 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL,
1535 &txq->packet_dma_tag) != 0) {
1536 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1541 /* Allocate pending descriptor array for batching writes. */
1542 txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries,
1543 M_SFXGE, M_ZERO | M_WAITOK);
1545 /* Allocate and initialise mbuf DMA mapping array. */
1546 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
1547 M_SFXGE, M_ZERO | M_WAITOK);
1548 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
1549 rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1550 &txq->stmp[nmaps].map);
1555 snprintf(name, sizeof(name), "%u", txq_index);
1556 txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node),
1557 OID_AUTO, name, CTLFLAG_RD, NULL, "");
1558 if (txq_node == NULL) {
1563 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1564 (rc = tso_init(txq)) != 0)
1567 if (sfxge_tx_dpl_get_max <= 0) {
1568 log(LOG_ERR, "%s=%d must be greater than 0",
1569 SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max);
1571 goto fail_tx_dpl_get_max;
1573 if (sfxge_tx_dpl_get_non_tcp_max <= 0) {
1574 log(LOG_ERR, "%s=%d must be greater than 0",
1575 SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX,
1576 sfxge_tx_dpl_get_non_tcp_max);
1578 goto fail_tx_dpl_get_max;
1580 if (sfxge_tx_dpl_put_max < 0) {
1581 log(LOG_ERR, "%s=%d must be greater or equal to 0",
1582 SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max);
1584 goto fail_tx_dpl_put_max;
1587 /* Initialize the deferred packet list. */
1589 stdp->std_put_max = sfxge_tx_dpl_put_max;
1590 stdp->std_get_max = sfxge_tx_dpl_get_max;
1591 stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max;
1592 stdp->std_getp = &stdp->std_get;
1594 SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index);
1596 dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO,
1597 "dpl", CTLFLAG_RD, NULL,
1598 "Deferred packet list statistics");
1599 if (dpl_node == NULL) {
1604 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1605 "get_count", CTLFLAG_RD | CTLFLAG_STATS,
1606 &stdp->std_get_count, 0, "");
1607 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1608 "get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS,
1609 &stdp->std_get_non_tcp_count, 0, "");
1610 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1611 "get_hiwat", CTLFLAG_RD | CTLFLAG_STATS,
1612 &stdp->std_get_hiwat, 0, "");
1613 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1614 "put_hiwat", CTLFLAG_RD | CTLFLAG_STATS,
1615 &stdp->std_put_hiwat, 0, "");
1617 rc = sfxge_txq_stat_init(txq, txq_node);
1619 goto fail_txq_stat_init;
1622 txq->evq_index = evq_index;
1623 txq->txq_index = txq_index;
1624 txq->init_state = SFXGE_TXQ_INITIALIZED;
1625 txq->hw_vlan_tci = 0;
1627 txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, type);
1633 fail_tx_dpl_put_max:
1634 fail_tx_dpl_get_max:
1637 free(txq->pend_desc, M_SFXGE);
1639 while (nmaps-- != 0)
1640 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1641 free(txq->stmp, M_SFXGE);
1642 bus_dma_tag_destroy(txq->packet_dma_tag);
1645 sfxge_dma_free(esmp);
1651 sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
1653 struct sfxge_softc *sc = arg1;
1654 unsigned int id = arg2;
1658 /* Sum across all TX queues */
1660 for (index = 0; index < sc->txq_count; index++)
1661 sum += *(unsigned long *)((caddr_t)sc->txq[index] +
1662 sfxge_tx_stats[id].offset);
1664 return (SYSCTL_OUT(req, &sum, sizeof(sum)));
1668 sfxge_tx_stat_init(struct sfxge_softc *sc)
1670 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1671 struct sysctl_oid_list *stat_list;
1674 stat_list = SYSCTL_CHILDREN(sc->stats_node);
1676 for (id = 0; id < nitems(sfxge_tx_stats); id++) {
1679 OID_AUTO, sfxge_tx_stats[id].name,
1680 CTLTYPE_ULONG|CTLFLAG_RD,
1681 sc, id, sfxge_tx_stat_handler, "LU",
1687 sfxge_tx_fini(struct sfxge_softc *sc)
1691 index = sc->txq_count;
1692 while (--index >= 0)
1693 sfxge_tx_qfini(sc, index);
1700 sfxge_tx_init(struct sfxge_softc *sc)
1702 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
1703 struct sfxge_intr *intr;
1709 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
1710 ("intr->state != SFXGE_INTR_INITIALIZED"));
1712 sc->txq_count = SFXGE_TXQ_NTYPES - 1 + sc->intr.n_alloc;
1714 sc->tso_fw_assisted = sfxge_tso_fw_assisted;
1715 if (sc->tso_fw_assisted)
1716 sc->tso_fw_assisted =
1717 (encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) &&
1718 (encp->enc_fw_assisted_tso_enabled);
1720 sc->txqs_node = SYSCTL_ADD_NODE(
1721 device_get_sysctl_ctx(sc->dev),
1722 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
1723 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues");
1724 if (sc->txqs_node == NULL) {
1729 /* Initialize the transmit queues */
1730 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
1731 SFXGE_TXQ_NON_CKSUM, 0)) != 0)
1734 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
1735 SFXGE_TXQ_IP_CKSUM, 0)) != 0)
1739 index < sc->txq_count - SFXGE_TXQ_NTYPES + 1;
1741 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NTYPES - 1 + index,
1742 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
1746 sfxge_tx_stat_init(sc);
1751 while (--index >= 0)
1752 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1754 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1757 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);