2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010-2016 Solarflare Communications Inc.
7 * This software was developed in part by Philip Paeps under contract for
8 * Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing official
33 * policies, either expressed or implied, of the FreeBSD Project.
36 /* Theory of operation:
38 * Tx queues allocation and mapping on Siena
40 * One Tx queue with enabled checksum offload is allocated per Rx channel
41 * (event queue). Also 2 Tx queues (one without checksum offload and one
42 * with IP checksum offload only) are allocated and bound to event queue 0.
43 * sfxge_txq_type is used as Tx queue label.
45 * So, event queue plus label mapping to Tx queue index is:
46 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
47 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
48 * See sfxge_get_txq_by_label() sfxge_ev.c
50 * Tx queue allocation and mapping on EF10
52 * One Tx queue with enabled checksum offload is allocated per Rx
53 * channel (event queue). Checksum offload on all Tx queues is enabled or
54 * disabled dynamically by inserting option descriptors, so the additional
55 * queues used on Siena are not required.
57 * TxQ label is always set to zero on EF10 hardware.
58 * So, event queue to Tx queue mapping is simple:
59 * TxQ-index = EvQ-index
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
67 #include <sys/param.h>
68 #include <sys/malloc.h>
71 #include <sys/socket.h>
72 #include <sys/sysctl.h>
73 #include <sys/syslog.h>
74 #include <sys/limits.h>
77 #include <net/ethernet.h>
79 #include <net/if_vlan_var.h>
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip6.h>
84 #include <netinet/tcp.h>
87 #include <net/rss_config.h>
90 #include "common/efx.h"
96 #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max)
97 static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT;
98 TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max);
99 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN,
100 &sfxge_tx_dpl_get_max, 0,
101 "Maximum number of any packets in deferred packet get-list");
103 #define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \
104 SFXGE_PARAM(tx_dpl_get_non_tcp_max)
105 static int sfxge_tx_dpl_get_non_tcp_max =
106 SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT;
107 TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max);
108 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN,
109 &sfxge_tx_dpl_get_non_tcp_max, 0,
110 "Maximum number of non-TCP packets in deferred packet get-list");
112 #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max)
113 static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT;
114 TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max);
115 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN,
116 &sfxge_tx_dpl_put_max, 0,
117 "Maximum number of any packets in deferred packet put-list");
119 #define SFXGE_PARAM_TSO_FW_ASSISTED SFXGE_PARAM(tso_fw_assisted)
120 static int sfxge_tso_fw_assisted = (SFXGE_FATSOV1 | SFXGE_FATSOV2);
121 TUNABLE_INT(SFXGE_PARAM_TSO_FW_ASSISTED, &sfxge_tso_fw_assisted);
122 SYSCTL_INT(_hw_sfxge, OID_AUTO, tso_fw_assisted, CTLFLAG_RDTUN,
123 &sfxge_tso_fw_assisted, 0,
124 "Bitmask of FW-assisted TSO allowed to use if supported by NIC firmware");
127 static const struct {
130 } sfxge_tx_stats[] = {
131 #define SFXGE_TX_STAT(name, member) \
132 { #name, offsetof(struct sfxge_txq, member) }
133 SFXGE_TX_STAT(tso_bursts, tso_bursts),
134 SFXGE_TX_STAT(tso_packets, tso_packets),
135 SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
136 SFXGE_TX_STAT(tso_pdrop_too_many, tso_pdrop_too_many),
137 SFXGE_TX_STAT(tso_pdrop_no_rsrc, tso_pdrop_no_rsrc),
138 SFXGE_TX_STAT(tx_collapses, collapses),
139 SFXGE_TX_STAT(tx_drops, drops),
140 SFXGE_TX_STAT(tx_get_overflow, get_overflow),
141 SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow),
142 SFXGE_TX_STAT(tx_put_overflow, put_overflow),
143 SFXGE_TX_STAT(tx_netdown_drops, netdown_drops),
147 /* Forward declarations. */
148 static void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
149 static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
150 static void sfxge_tx_qunblock(struct sfxge_txq *txq);
151 static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
152 const bus_dma_segment_t *dma_seg, int n_dma_seg,
156 sfxge_next_stmp(struct sfxge_txq *txq, struct sfxge_tx_mapping **pstmp)
158 KASSERT((*pstmp)->flags == 0, ("stmp flags are not 0"));
159 if (__predict_false(*pstmp ==
160 &txq->stmp[txq->ptr_mask]))
161 *pstmp = &txq->stmp[0];
167 sfxge_tx_maybe_toggle_cksum_offload(struct sfxge_txq *txq, struct mbuf *mbuf,
168 struct sfxge_tx_mapping **pstmp)
170 uint16_t new_hw_cksum_flags;
173 if (mbuf->m_pkthdr.csum_flags &
174 (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6 | CSUM_TSO)) {
176 * We always set EFX_TXQ_CKSUM_IPV4 here because this
177 * configuration is the most useful, and this won't
178 * cause any trouble in case of IPv6 traffic anyway.
180 new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
181 } else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
182 new_hw_cksum_flags = EFX_TXQ_CKSUM_IPV4;
184 new_hw_cksum_flags = 0;
187 if (new_hw_cksum_flags == txq->hw_cksum_flags)
190 desc = &txq->pend_desc[txq->n_pend_desc];
191 efx_tx_qdesc_checksum_create(txq->common, new_hw_cksum_flags, desc);
192 txq->hw_cksum_flags = new_hw_cksum_flags;
195 sfxge_next_stmp(txq, pstmp);
201 sfxge_tx_maybe_insert_tag(struct sfxge_txq *txq, struct mbuf *mbuf,
202 struct sfxge_tx_mapping **pstmp)
204 uint16_t this_tag = ((mbuf->m_flags & M_VLANTAG) ?
205 mbuf->m_pkthdr.ether_vtag :
209 if (this_tag == txq->hw_vlan_tci)
212 desc = &txq->pend_desc[txq->n_pend_desc];
213 efx_tx_qdesc_vlantci_create(txq->common, bswap16(this_tag), desc);
214 txq->hw_vlan_tci = this_tag;
217 sfxge_next_stmp(txq, pstmp);
223 sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq)
225 unsigned int completed;
227 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
229 completed = txq->completed;
230 while (completed != txq->pending) {
231 struct sfxge_tx_mapping *stmp;
234 id = completed++ & txq->ptr_mask;
236 stmp = &txq->stmp[id];
237 if (stmp->flags & TX_BUF_UNMAP) {
238 bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
239 if (stmp->flags & TX_BUF_MBUF) {
240 struct mbuf *m = stmp->u.mbuf;
245 free(stmp->u.heap_buf, M_SFXGE);
250 txq->completed = completed;
252 /* Check whether we need to unblock the queue. */
257 level = txq->added - txq->completed;
258 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
259 sfxge_tx_qunblock(txq);
264 sfxge_is_mbuf_non_tcp(struct mbuf *mbuf)
266 /* Absence of TCP checksum flags does not mean that it is non-TCP
267 * but it should be true if user wants to achieve high throughput.
269 return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)));
273 * Reorder the put list and append it to the get list.
276 sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
278 struct sfxge_tx_dpl *stdp;
279 struct mbuf *mbuf, *get_next, **get_tailp;
280 volatile uintptr_t *putp;
283 unsigned int non_tcp_count;
285 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
289 /* Acquire the put list. */
290 putp = &stdp->std_put;
291 put = atomic_readandclear_ptr(putp);
297 /* Reverse the put list. */
298 get_tailp = &mbuf->m_nextpkt;
304 struct mbuf *put_next;
306 non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf);
307 put_next = mbuf->m_nextpkt;
308 mbuf->m_nextpkt = get_next;
313 } while (mbuf != NULL);
315 if (count > stdp->std_put_hiwat)
316 stdp->std_put_hiwat = count;
318 /* Append the reversed put list to the get list. */
319 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
320 *stdp->std_getp = get_next;
321 stdp->std_getp = get_tailp;
322 stdp->std_get_count += count;
323 stdp->std_get_non_tcp_count += non_tcp_count;
327 sfxge_tx_qreap(struct sfxge_txq *txq)
329 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
331 txq->reaped = txq->completed;
335 sfxge_tx_qlist_post(struct sfxge_txq *txq)
337 unsigned int old_added;
338 unsigned int block_level;
342 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
344 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
345 KASSERT(txq->n_pend_desc <= txq->max_pkt_desc,
346 ("txq->n_pend_desc too large"));
347 KASSERT(!txq->blocked, ("txq->blocked"));
349 old_added = txq->added;
351 /* Post the fragment list. */
352 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, txq->n_pend_desc,
353 txq->reaped, &txq->added);
354 KASSERT(rc == 0, ("efx_tx_qdesc_post() failed"));
356 /* If efx_tx_qdesc_post() had to refragment, our information about
357 * buffers to free may be associated with the wrong
360 KASSERT(txq->added - old_added == txq->n_pend_desc,
361 ("efx_tx_qdesc_post() refragmented descriptors"));
363 level = txq->added - txq->reaped;
364 KASSERT(level <= txq->entries, ("overfilled TX queue"));
366 /* Clear the fragment list. */
367 txq->n_pend_desc = 0;
370 * Set the block level to ensure there is space to generate a
371 * large number of descriptors for TSO.
373 block_level = EFX_TXQ_LIMIT(txq->entries) - txq->max_pkt_desc;
375 /* Have we reached the block level? */
376 if (level < block_level)
379 /* Reap, and check again */
381 level = txq->added - txq->reaped;
382 if (level < block_level)
388 * Avoid a race with completion interrupt handling that could leave
393 level = txq->added - txq->reaped;
394 if (level < block_level) {
400 static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
402 bus_dmamap_t *used_map;
404 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG];
406 struct sfxge_tx_mapping *stmp;
412 uint16_t hw_cksum_flags_prev;
413 uint16_t hw_vlan_tci_prev;
416 KASSERT(!txq->blocked, ("txq->blocked"));
418 #if SFXGE_TX_PARSE_EARLY
420 * If software TSO is used, we still need to copy packet header,
421 * even if we have already parsed it early before enqueue.
423 if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) &&
424 (txq->tso_fw_assisted == 0))
425 prefetch_read_many(mbuf->m_data);
428 * Prefetch packet header since we need to parse it and extract
429 * IP ID, TCP sequence number and flags.
431 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
432 prefetch_read_many(mbuf->m_data);
435 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED)) {
440 /* Load the packet for DMA. */
441 id = txq->added & txq->ptr_mask;
442 stmp = &txq->stmp[id];
443 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
444 mbuf, dma_seg, &n_dma_seg, 0);
447 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
448 SFXGE_TX_MAPPING_MAX_SEG);
449 if (new_mbuf == NULL)
453 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
455 dma_seg, &n_dma_seg, 0);
460 /* Make the packet visible to the hardware. */
461 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
463 used_map = &stmp->map;
465 hw_cksum_flags_prev = txq->hw_cksum_flags;
466 hw_vlan_tci_prev = txq->hw_vlan_tci;
469 * The order of option descriptors, which are used to leverage VLAN tag
470 * and checksum offloads, might be important. Changing checksum offload
471 * between VLAN option and packet descriptors probably does not work.
473 n_extra_descs = sfxge_tx_maybe_toggle_cksum_offload(txq, mbuf, &stmp);
474 n_extra_descs += sfxge_tx_maybe_insert_tag(txq, mbuf, &stmp);
476 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
477 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg,
481 stmp = &txq->stmp[(rc - 1) & txq->ptr_mask];
483 /* Add the mapping to the fragment list, and set flags
489 desc = &txq->pend_desc[i + n_extra_descs];
490 eop = (i == n_dma_seg - 1);
491 efx_tx_qdesc_dma_create(txq->common,
499 sfxge_next_stmp(txq, &stmp);
501 txq->n_pend_desc = n_dma_seg + n_extra_descs;
505 * If the mapping required more than one descriptor
506 * then we need to associate the DMA map with the last
507 * descriptor, not the first.
509 if (used_map != &stmp->map) {
511 stmp->map = *used_map;
516 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF;
518 /* Post the fragment list. */
519 sfxge_tx_qlist_post(txq);
524 txq->hw_vlan_tci = hw_vlan_tci_prev;
525 txq->hw_cksum_flags = hw_cksum_flags_prev;
526 bus_dmamap_unload(txq->packet_dma_tag, *used_map);
528 /* Drop the packet on the floor. */
536 * Drain the deferred packet list into the transmit queue.
539 sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
541 struct sfxge_softc *sc;
542 struct sfxge_tx_dpl *stdp;
543 struct mbuf *mbuf, *next;
545 unsigned int non_tcp_count;
549 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
555 if (__predict_true(txq->init_state == SFXGE_TXQ_STARTED)) {
556 prefetch_read_many(sc->enp);
557 prefetch_read_many(txq->common);
560 mbuf = stdp->std_get;
561 count = stdp->std_get_count;
562 non_tcp_count = stdp->std_get_non_tcp_count;
564 if (count > stdp->std_get_hiwat)
565 stdp->std_get_hiwat = count;
568 KASSERT(mbuf != NULL, ("mbuf == NULL"));
570 next = mbuf->m_nextpkt;
571 mbuf->m_nextpkt = NULL;
573 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */
576 prefetch_read_many(next);
578 rc = sfxge_tx_queue_mbuf(txq, mbuf);
580 non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf);
588 /* Push the fragments to the hardware in batches. */
589 if (txq->added - pushed >= SFXGE_TX_BATCH) {
590 efx_tx_qpush(txq->common, txq->added, pushed);
596 KASSERT(mbuf == NULL, ("mbuf != NULL"));
597 KASSERT(non_tcp_count == 0,
598 ("inconsistent TCP/non-TCP detection"));
599 stdp->std_get = NULL;
600 stdp->std_get_count = 0;
601 stdp->std_get_non_tcp_count = 0;
602 stdp->std_getp = &stdp->std_get;
604 stdp->std_get = mbuf;
605 stdp->std_get_count = count;
606 stdp->std_get_non_tcp_count = non_tcp_count;
609 if (txq->added != pushed)
610 efx_tx_qpush(txq->common, txq->added, pushed);
612 KASSERT(txq->blocked || stdp->std_get_count == 0,
613 ("queue unblocked but count is non-zero"));
616 #define SFXGE_TX_QDPL_PENDING(_txq) ((_txq)->dpl.std_put != 0)
619 * Service the deferred packet list.
621 * NOTE: drops the txq mutex!
624 sfxge_tx_qdpl_service(struct sfxge_txq *txq)
626 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
629 if (SFXGE_TX_QDPL_PENDING(txq))
630 sfxge_tx_qdpl_swizzle(txq);
633 sfxge_tx_qdpl_drain(txq);
635 SFXGE_TXQ_UNLOCK(txq);
636 } while (SFXGE_TX_QDPL_PENDING(txq) &&
637 SFXGE_TXQ_TRYLOCK(txq));
641 * Put a packet on the deferred packet get-list.
644 sfxge_tx_qdpl_put_locked(struct sfxge_txq *txq, struct mbuf *mbuf)
646 struct sfxge_tx_dpl *stdp;
650 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
652 SFXGE_TXQ_LOCK_ASSERT_OWNED(txq);
654 if (stdp->std_get_count >= stdp->std_get_max) {
658 if (sfxge_is_mbuf_non_tcp(mbuf)) {
659 if (stdp->std_get_non_tcp_count >=
660 stdp->std_get_non_tcp_max) {
661 txq->get_non_tcp_overflow++;
664 stdp->std_get_non_tcp_count++;
667 *(stdp->std_getp) = mbuf;
668 stdp->std_getp = &mbuf->m_nextpkt;
669 stdp->std_get_count++;
675 * Put a packet on the deferred packet put-list.
677 * We overload the csum_data field in the mbuf to keep track of this length
678 * because there is no cheap alternative to avoid races.
681 sfxge_tx_qdpl_put_unlocked(struct sfxge_txq *txq, struct mbuf *mbuf)
683 struct sfxge_tx_dpl *stdp;
684 volatile uintptr_t *putp;
687 unsigned int put_count;
689 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
691 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq);
694 putp = &stdp->std_put;
695 new = (uintptr_t)mbuf;
700 struct mbuf *mp = (struct mbuf *)old;
701 put_count = mp->m_pkthdr.csum_data;
704 if (put_count >= stdp->std_put_max) {
705 atomic_add_long(&txq->put_overflow, 1);
708 mbuf->m_pkthdr.csum_data = put_count + 1;
709 mbuf->m_nextpkt = (void *)old;
710 } while (atomic_cmpset_ptr(putp, old, new) == 0);
716 * Called from if_transmit - will try to grab the txq lock and enqueue to the
717 * put list if it succeeds, otherwise try to push onto the defer list if space.
720 sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
724 if (!SFXGE_LINK_UP(txq->sc)) {
725 atomic_add_long(&txq->netdown_drops, 1);
730 * Try to grab the txq lock. If we are able to get the lock,
731 * the packet will be appended to the "get list" of the deferred
732 * packet list. Otherwise, it will be pushed on the "put list".
734 if (SFXGE_TXQ_TRYLOCK(txq)) {
735 /* First swizzle put-list to get-list to keep order */
736 sfxge_tx_qdpl_swizzle(txq);
738 rc = sfxge_tx_qdpl_put_locked(txq, m);
740 /* Try to service the list. */
741 sfxge_tx_qdpl_service(txq);
742 /* Lock has been dropped. */
744 rc = sfxge_tx_qdpl_put_unlocked(txq, m);
747 * Try to grab the lock again.
749 * If we are able to get the lock, we need to process
750 * the deferred packet list. If we are not able to get
751 * the lock, another thread is processing the list.
753 if ((rc == 0) && SFXGE_TXQ_TRYLOCK(txq)) {
754 sfxge_tx_qdpl_service(txq);
755 /* Lock has been dropped. */
759 SFXGE_TXQ_LOCK_ASSERT_NOTOWNED(txq);
765 sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
767 struct sfxge_tx_dpl *stdp = &txq->dpl;
768 struct mbuf *mbuf, *next;
772 sfxge_tx_qdpl_swizzle(txq);
773 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
774 next = mbuf->m_nextpkt;
777 stdp->std_get = NULL;
778 stdp->std_get_count = 0;
779 stdp->std_get_non_tcp_count = 0;
780 stdp->std_getp = &stdp->std_get;
782 SFXGE_TXQ_UNLOCK(txq);
786 sfxge_if_qflush(struct ifnet *ifp)
788 struct sfxge_softc *sc;
793 for (i = 0; i < sc->txq_count; i++)
794 sfxge_tx_qdpl_flush(sc->txq[i]);
797 #if SFXGE_TX_PARSE_EARLY
799 /* There is little space for user data in mbuf pkthdr, so we
800 * use l*hlen fields which are not used by the driver otherwise
801 * to store header offsets.
802 * The fields are 8-bit, but it's ok, no header may be longer than 255 bytes.
806 #define TSO_MBUF_PROTO(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
807 /* We abuse l5hlen here because PH_loc can hold only 64 bits of data */
808 #define TSO_MBUF_FLAGS(_mbuf) ((_mbuf)->m_pkthdr.l5hlen)
809 #define TSO_MBUF_PACKETID(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
810 #define TSO_MBUF_SEQNUM(_mbuf) ((_mbuf)->m_pkthdr.PH_loc.thirtytwo[1])
812 static void sfxge_parse_tx_packet(struct mbuf *mbuf)
814 struct ether_header *eh = mtod(mbuf, struct ether_header *);
815 const struct tcphdr *th;
816 struct tcphdr th_copy;
818 /* Find network protocol and header */
819 TSO_MBUF_PROTO(mbuf) = eh->ether_type;
820 if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_VLAN)) {
821 struct ether_vlan_header *veh =
822 mtod(mbuf, struct ether_vlan_header *);
823 TSO_MBUF_PROTO(mbuf) = veh->evl_proto;
824 mbuf->m_pkthdr.l2hlen = sizeof(*veh);
826 mbuf->m_pkthdr.l2hlen = sizeof(*eh);
829 /* Find TCP header */
830 if (TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IP)) {
831 const struct ip *iph = (const struct ip *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen);
833 KASSERT(iph->ip_p == IPPROTO_TCP,
834 ("TSO required on non-TCP packet"));
835 mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + 4 * iph->ip_hl;
836 TSO_MBUF_PACKETID(mbuf) = iph->ip_id;
838 KASSERT(TSO_MBUF_PROTO(mbuf) == htons(ETHERTYPE_IPV6),
839 ("TSO required on non-IP packet"));
840 KASSERT(((const struct ip6_hdr *)mtodo(mbuf, mbuf->m_pkthdr.l2hlen))->ip6_nxt ==
842 ("TSO required on non-TCP packet"));
843 mbuf->m_pkthdr.l3hlen = mbuf->m_pkthdr.l2hlen + sizeof(struct ip6_hdr);
844 TSO_MBUF_PACKETID(mbuf) = 0;
847 KASSERT(mbuf->m_len >= mbuf->m_pkthdr.l3hlen,
848 ("network header is fragmented in mbuf"));
850 /* We need TCP header including flags (window is the next) */
851 if (mbuf->m_len < mbuf->m_pkthdr.l3hlen + offsetof(struct tcphdr, th_win)) {
852 m_copydata(mbuf, mbuf->m_pkthdr.l3hlen, sizeof(th_copy),
856 th = (const struct tcphdr *)mtodo(mbuf, mbuf->m_pkthdr.l3hlen);
859 mbuf->m_pkthdr.l4hlen = mbuf->m_pkthdr.l3hlen + 4 * th->th_off;
860 TSO_MBUF_SEQNUM(mbuf) = ntohl(th->th_seq);
862 /* These flags must not be duplicated */
864 * RST should not be duplicated as well, but FreeBSD kernel
865 * generates TSO packets with RST flag. So, do not assert
868 KASSERT(!(th->th_flags & (TH_URG | TH_SYN)),
869 ("incompatible TCP flag 0x%x on TSO packet",
870 th->th_flags & (TH_URG | TH_SYN)));
871 TSO_MBUF_FLAGS(mbuf) = th->th_flags;
876 * TX start -- called by the stack.
879 sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
881 struct sfxge_softc *sc;
882 struct sfxge_txq *txq;
885 sc = (struct sfxge_softc *)ifp->if_softc;
888 * Transmit may be called when interface is up from the kernel
889 * point of view, but not yet up (in progress) from the driver
890 * point of view. I.e. link aggregation bring up.
891 * Transmit may be called when interface is up from the driver
892 * point of view, but already down from the kernel point of
893 * view. I.e. Rx when interface shutdown is in progress.
895 KASSERT((ifp->if_flags & IFF_UP) || (sc->if_flags & IFF_UP),
896 ("interface not up"));
898 /* Pick the desired transmit queue. */
899 if (sc->txq_dynamic_cksum_toggle_supported |
900 (m->m_pkthdr.csum_flags &
901 (CSUM_DELAY_DATA | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO))) {
908 * Select a TX queue which matches the corresponding
909 * RX queue for the hash in order to assign both
910 * TX and RX parts of the flow to the same CPU
912 if (rss_m2bucket(m, &bucket_id) == 0)
913 index = bucket_id % (sc->txq_count - (SFXGE_TXQ_NTYPES - 1));
915 /* check if flowid is set */
916 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
917 uint32_t hash = m->m_pkthdr.flowid;
918 uint32_t idx = hash % nitems(sc->rx_indir_table);
920 index = sc->rx_indir_table[idx];
923 #if SFXGE_TX_PARSE_EARLY
924 if (m->m_pkthdr.csum_flags & CSUM_TSO)
925 sfxge_parse_tx_packet(m);
927 index += (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) ?
928 SFXGE_TXQ_IP_TCP_UDP_CKSUM : 0;
929 txq = sc->txq[index];
930 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
931 txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
933 txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
936 rc = sfxge_tx_packet_add(txq, m);
944 * Software "TSO". Not quite as good as doing it in hardware, but
945 * still faster than segmenting in the stack.
948 struct sfxge_tso_state {
949 /* Output position */
950 unsigned out_len; /* Remaining length in current segment */
951 unsigned seqnum; /* Current sequence number */
952 unsigned packet_space; /* Remaining space in current packet */
953 unsigned segs_space; /* Remaining number of DMA segments
954 for the packet (FATSOv2 only) */
957 uint64_t dma_addr; /* DMA address of current position */
958 unsigned in_len; /* Remaining length in current mbuf */
960 const struct mbuf *mbuf; /* Input mbuf (head of chain) */
961 u_short protocol; /* Network protocol (after VLAN decap) */
962 ssize_t nh_off; /* Offset of network header */
963 ssize_t tcph_off; /* Offset of TCP header */
964 unsigned header_len; /* Number of bytes of header */
965 unsigned seg_size; /* TCP segment size */
966 int fw_assisted; /* Use FW-assisted TSO */
967 u_short packet_id; /* IPv4 packet ID from the original packet */
968 uint8_t tcp_flags; /* TCP flags */
969 efx_desc_t header_desc; /* Precomputed header descriptor for
973 #if !SFXGE_TX_PARSE_EARLY
974 static const struct ip *tso_iph(const struct sfxge_tso_state *tso)
976 KASSERT(tso->protocol == htons(ETHERTYPE_IP),
977 ("tso_iph() in non-IPv4 state"));
978 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off);
981 static __unused const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso)
983 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
984 ("tso_ip6h() in non-IPv6 state"));
985 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off);
988 static const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
990 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off);
995 /* Size of preallocated TSO header buffers. Larger blocks must be
996 * allocated from the heap.
998 #define TSOH_STD_SIZE 128
1000 /* At most half the descriptors in the queue at any time will refer to
1001 * a TSO header buffer, since they must always be followed by a
1002 * payload descriptor referring to an mbuf.
1004 #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u)
1005 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
1006 #define TSOH_PAGE_COUNT(_txq_entries) \
1007 howmany(TSOH_COUNT(_txq_entries), TSOH_PER_PAGE)
1009 static int tso_init(struct sfxge_txq *txq)
1011 struct sfxge_softc *sc = txq->sc;
1012 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
1015 /* Allocate TSO header buffers */
1016 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
1019 for (i = 0; i < tsoh_page_count; i++) {
1020 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
1029 sfxge_dma_free(&txq->tsoh_buffer[i]);
1030 free(txq->tsoh_buffer, M_SFXGE);
1031 txq->tsoh_buffer = NULL;
1035 static void tso_fini(struct sfxge_txq *txq)
1039 if (txq->tsoh_buffer != NULL) {
1040 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
1041 sfxge_dma_free(&txq->tsoh_buffer[i]);
1042 free(txq->tsoh_buffer, M_SFXGE);
1046 static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso,
1047 const bus_dma_segment_t *hdr_dma_seg,
1050 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->sc->enp);
1051 #if !SFXGE_TX_PARSE_EARLY
1052 struct ether_header *eh = mtod(mbuf, struct ether_header *);
1053 const struct tcphdr *th;
1054 struct tcphdr th_copy;
1057 tso->fw_assisted = txq->tso_fw_assisted;
1060 /* Find network protocol and header */
1061 #if !SFXGE_TX_PARSE_EARLY
1062 tso->protocol = eh->ether_type;
1063 if (tso->protocol == htons(ETHERTYPE_VLAN)) {
1064 struct ether_vlan_header *veh =
1065 mtod(mbuf, struct ether_vlan_header *);
1066 tso->protocol = veh->evl_proto;
1067 tso->nh_off = sizeof(*veh);
1069 tso->nh_off = sizeof(*eh);
1072 tso->protocol = TSO_MBUF_PROTO(mbuf);
1073 tso->nh_off = mbuf->m_pkthdr.l2hlen;
1074 tso->tcph_off = mbuf->m_pkthdr.l3hlen;
1075 tso->packet_id = ntohs(TSO_MBUF_PACKETID(mbuf));
1078 #if !SFXGE_TX_PARSE_EARLY
1079 /* Find TCP header */
1080 if (tso->protocol == htons(ETHERTYPE_IP)) {
1081 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP,
1082 ("TSO required on non-TCP packet"));
1083 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl;
1084 tso->packet_id = ntohs(tso_iph(tso)->ip_id);
1086 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
1087 ("TSO required on non-IP packet"));
1088 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP,
1089 ("TSO required on non-TCP packet"));
1090 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr);
1096 if (tso->fw_assisted &&
1097 __predict_false(tso->tcph_off >
1098 encp->enc_tx_tso_tcp_header_offset_limit)) {
1099 tso->fw_assisted = 0;
1103 #if !SFXGE_TX_PARSE_EARLY
1104 KASSERT(mbuf->m_len >= tso->tcph_off,
1105 ("network header is fragmented in mbuf"));
1106 /* We need TCP header including flags (window is the next) */
1107 if (mbuf->m_len < tso->tcph_off + offsetof(struct tcphdr, th_win)) {
1108 m_copydata(tso->mbuf, tso->tcph_off, sizeof(th_copy),
1114 tso->header_len = tso->tcph_off + 4 * th->th_off;
1116 tso->header_len = mbuf->m_pkthdr.l4hlen;
1118 tso->seg_size = mbuf->m_pkthdr.tso_segsz;
1120 #if !SFXGE_TX_PARSE_EARLY
1121 tso->seqnum = ntohl(th->th_seq);
1123 /* These flags must not be duplicated */
1125 * RST should not be duplicated as well, but FreeBSD kernel
1126 * generates TSO packets with RST flag. So, do not assert
1129 KASSERT(!(th->th_flags & (TH_URG | TH_SYN)),
1130 ("incompatible TCP flag 0x%x on TSO packet",
1131 th->th_flags & (TH_URG | TH_SYN)));
1132 tso->tcp_flags = th->th_flags;
1134 tso->seqnum = TSO_MBUF_SEQNUM(mbuf);
1135 tso->tcp_flags = TSO_MBUF_FLAGS(mbuf);
1138 tso->out_len = mbuf->m_pkthdr.len - tso->header_len;
1140 if (tso->fw_assisted) {
1141 if (hdr_dma_seg->ds_len >= tso->header_len)
1142 efx_tx_qdesc_dma_create(txq->common,
1143 hdr_dma_seg->ds_addr,
1148 tso->fw_assisted = 0;
1153 * tso_fill_packet_with_fragment - form descriptors for the current fragment
1155 * Form descriptors for the current fragment, until we reach the end
1156 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
1159 static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
1160 struct sfxge_tso_state *tso)
1164 uint64_t dma_addr = tso->dma_addr;
1167 if (tso->in_len == 0 || tso->packet_space == 0)
1170 KASSERT(tso->in_len > 0, ("TSO input length went negative"));
1171 KASSERT(tso->packet_space > 0, ("TSO packet space went negative"));
1173 if (tso->fw_assisted & SFXGE_FATSOV2) {
1178 if (n < tso->packet_space) {
1179 tso->packet_space -= n;
1182 tso->packet_space = tso->seg_size -
1183 (n - tso->packet_space) % tso->seg_size;
1185 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1 -
1186 (tso->packet_space != tso->seg_size);
1189 n = min(tso->in_len, tso->packet_space);
1190 tso->packet_space -= n;
1197 * It is OK to use binary OR below to avoid extra branching
1198 * since all conditions may always be checked.
1200 eop = (tso->out_len == 0) | (tso->packet_space == 0) |
1201 (tso->segs_space == 0);
1203 desc = &txq->pend_desc[txq->n_pend_desc++];
1204 efx_tx_qdesc_dma_create(txq->common, dma_addr, n, eop, desc);
1207 /* Callback from bus_dmamap_load() for long TSO headers. */
1208 static void tso_map_long_header(void *dma_addr_ret,
1209 bus_dma_segment_t *segs, int nseg,
1212 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) &&
1213 __predict_true(nseg == 1)) ?
1218 * tso_start_new_packet - generate a new header and prepare for the new packet
1220 * Generate a new header and prepare for the new packet. Return 0 on
1221 * success, or an error code if failed to alloc header.
1223 static int tso_start_new_packet(struct sfxge_txq *txq,
1224 struct sfxge_tso_state *tso,
1227 unsigned int id = *idp;
1228 struct tcphdr *tsoh_th;
1236 if (tso->fw_assisted) {
1237 if (tso->fw_assisted & SFXGE_FATSOV2) {
1238 /* Add 2 FATSOv2 option descriptors */
1239 desc = &txq->pend_desc[txq->n_pend_desc];
1240 efx_tx_qdesc_tso2_create(txq->common,
1246 EFX_TX_FATSOV2_OPT_NDESCS);
1247 desc += EFX_TX_FATSOV2_OPT_NDESCS;
1248 txq->n_pend_desc += EFX_TX_FATSOV2_OPT_NDESCS;
1249 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1250 id = (id + EFX_TX_FATSOV2_OPT_NDESCS) & txq->ptr_mask;
1253 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1;
1255 uint8_t tcp_flags = tso->tcp_flags;
1257 if (tso->out_len > tso->seg_size)
1258 tcp_flags &= ~(TH_FIN | TH_PUSH);
1260 /* Add FATSOv1 option descriptor */
1261 desc = &txq->pend_desc[txq->n_pend_desc++];
1262 efx_tx_qdesc_tso_create(txq->common,
1267 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1268 id = (id + 1) & txq->ptr_mask;
1270 tso->seqnum += tso->seg_size;
1271 tso->segs_space = UINT_MAX;
1274 /* Header DMA descriptor */
1275 *desc = tso->header_desc;
1277 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1278 id = (id + 1) & txq->ptr_mask;
1280 /* Allocate a DMA-mapped header buffer. */
1281 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) {
1282 unsigned int page_index = (id / 2) / TSOH_PER_PAGE;
1283 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE;
1285 header = (txq->tsoh_buffer[page_index].esm_base +
1286 buf_index * TSOH_STD_SIZE);
1287 dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
1288 buf_index * TSOH_STD_SIZE);
1289 map = txq->tsoh_buffer[page_index].esm_map;
1291 KASSERT(txq->stmp[id].flags == 0,
1292 ("stmp flags are not 0"));
1294 struct sfxge_tx_mapping *stmp = &txq->stmp[id];
1296 /* We cannot use bus_dmamem_alloc() as that may sleep */
1297 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
1298 if (__predict_false(!header))
1300 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
1301 header, tso->header_len,
1302 tso_map_long_header, &dma_addr,
1304 if (__predict_false(dma_addr == 0)) {
1306 /* Succeeded but got >1 segment */
1307 bus_dmamap_unload(txq->packet_dma_tag,
1311 free(header, M_SFXGE);
1316 txq->tso_long_headers++;
1317 stmp->u.heap_buf = header;
1318 stmp->flags = TX_BUF_UNMAP;
1321 tsoh_th = (struct tcphdr *)(header + tso->tcph_off);
1323 /* Copy and update the headers. */
1324 m_copydata(tso->mbuf, 0, tso->header_len, header);
1326 tsoh_th->th_seq = htonl(tso->seqnum);
1327 tso->seqnum += tso->seg_size;
1328 if (tso->out_len > tso->seg_size) {
1329 /* This packet will not finish the TSO burst. */
1330 ip_length = tso->header_len - tso->nh_off + tso->seg_size;
1331 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
1333 /* This packet will be the last in the TSO burst. */
1334 ip_length = tso->header_len - tso->nh_off + tso->out_len;
1337 if (tso->protocol == htons(ETHERTYPE_IP)) {
1338 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off);
1339 tsoh_iph->ip_len = htons(ip_length);
1340 /* XXX We should increment ip_id, but FreeBSD doesn't
1341 * currently allocate extra IDs for multiple segments.
1344 struct ip6_hdr *tsoh_iph =
1345 (struct ip6_hdr *)(header + tso->nh_off);
1346 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph));
1349 /* Make the header visible to the hardware. */
1350 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
1352 /* Form a descriptor for this header. */
1353 desc = &txq->pend_desc[txq->n_pend_desc++];
1354 efx_tx_qdesc_dma_create(txq->common,
1359 id = (id + 1) & txq->ptr_mask;
1361 tso->segs_space = UINT_MAX;
1363 tso->packet_space = tso->seg_size;
1371 sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
1372 const bus_dma_segment_t *dma_seg, int n_dma_seg,
1375 struct sfxge_tso_state tso;
1377 unsigned skipped = 0;
1379 tso_start(txq, &tso, dma_seg, mbuf);
1381 while (dma_seg->ds_len + skipped <= tso.header_len) {
1382 skipped += dma_seg->ds_len;
1384 KASSERT(n_dma_seg, ("no payload found in TSO packet"));
1387 tso.in_len = dma_seg->ds_len - (tso.header_len - skipped);
1388 tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped);
1390 id = (txq->added + n_extra_descs) & txq->ptr_mask;
1391 if (__predict_false(tso_start_new_packet(txq, &tso, &id)))
1395 tso_fill_packet_with_fragment(txq, &tso);
1396 /* Exactly one DMA descriptor is added */
1397 KASSERT(txq->stmp[id].flags == 0, ("stmp flags are not 0"));
1398 id = (id + 1) & txq->ptr_mask;
1400 /* Move onto the next fragment? */
1401 if (tso.in_len == 0) {
1406 tso.in_len = dma_seg->ds_len;
1407 tso.dma_addr = dma_seg->ds_addr;
1410 /* End of packet? */
1411 if ((tso.packet_space == 0) | (tso.segs_space == 0)) {
1412 unsigned int n_fatso_opt_desc =
1413 (tso.fw_assisted & SFXGE_FATSOV2) ?
1414 EFX_TX_FATSOV2_OPT_NDESCS :
1415 (tso.fw_assisted & SFXGE_FATSOV1) ? 1 : 0;
1417 /* If the queue is now full due to tiny MSS,
1418 * or we can't create another header, discard
1419 * the remainder of the input mbuf but do not
1420 * roll back the work we have done.
1422 if (txq->n_pend_desc + n_fatso_opt_desc +
1423 1 /* header */ + n_dma_seg > txq->max_pkt_desc) {
1424 txq->tso_pdrop_too_many++;
1427 if (__predict_false(tso_start_new_packet(txq, &tso,
1429 txq->tso_pdrop_no_rsrc++;
1440 sfxge_tx_qunblock(struct sfxge_txq *txq)
1442 struct sfxge_softc *sc;
1443 struct sfxge_evq *evq;
1446 evq = sc->evq[txq->evq_index];
1448 SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);
1450 if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED))
1453 SFXGE_TXQ_LOCK(txq);
1458 level = txq->added - txq->completed;
1459 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) {
1460 /* reaped must be in sync with blocked */
1461 sfxge_tx_qreap(txq);
1466 sfxge_tx_qdpl_service(txq);
1467 /* note: lock has been dropped */
1471 sfxge_tx_qflush_done(struct sfxge_txq *txq)
1474 txq->flush_state = SFXGE_FLUSH_DONE;
1478 sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
1480 struct sfxge_txq *txq;
1481 struct sfxge_evq *evq;
1484 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
1486 txq = sc->txq[index];
1487 evq = sc->evq[txq->evq_index];
1489 SFXGE_EVQ_LOCK(evq);
1490 SFXGE_TXQ_LOCK(txq);
1492 KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1493 ("txq->init_state != SFXGE_TXQ_STARTED"));
1495 txq->init_state = SFXGE_TXQ_INITIALIZED;
1497 if (txq->flush_state != SFXGE_FLUSH_DONE) {
1498 txq->flush_state = SFXGE_FLUSH_PENDING;
1500 SFXGE_EVQ_UNLOCK(evq);
1501 SFXGE_TXQ_UNLOCK(txq);
1503 /* Flush the transmit queue. */
1504 if (efx_tx_qflush(txq->common) != 0) {
1505 log(LOG_ERR, "%s: Flushing Tx queue %u failed\n",
1506 device_get_nameunit(sc->dev), index);
1507 txq->flush_state = SFXGE_FLUSH_DONE;
1511 /* Spin for 100ms. */
1513 if (txq->flush_state != SFXGE_FLUSH_PENDING)
1515 } while (++count < 20);
1517 SFXGE_EVQ_LOCK(evq);
1518 SFXGE_TXQ_LOCK(txq);
1520 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1521 ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1523 if (txq->flush_state != SFXGE_FLUSH_DONE) {
1525 log(LOG_ERR, "%s: Cannot flush Tx queue %u\n",
1526 device_get_nameunit(sc->dev), index);
1527 txq->flush_state = SFXGE_FLUSH_DONE;
1532 txq->pending = txq->added;
1534 sfxge_tx_qcomplete(txq, evq);
1535 KASSERT(txq->completed == txq->added,
1536 ("txq->completed != txq->added"));
1538 sfxge_tx_qreap(txq);
1539 KASSERT(txq->reaped == txq->completed,
1540 ("txq->reaped != txq->completed"));
1547 /* Destroy the common code transmit queue. */
1548 efx_tx_qdestroy(txq->common);
1551 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1552 EFX_TXQ_NBUFS(sc->txq_entries));
1554 txq->hw_cksum_flags = 0;
1556 SFXGE_EVQ_UNLOCK(evq);
1557 SFXGE_TXQ_UNLOCK(txq);
1561 * Estimate maximum number of Tx descriptors required for TSO packet.
1562 * With minimum MSS and maximum mbuf length we might need more (even
1563 * than a ring-ful of descriptors), but this should not happen in
1564 * practice except due to deliberate attack. In that case we will
1565 * truncate the output at a packet boundary.
1568 sfxge_tx_max_pkt_desc(const struct sfxge_softc *sc, enum sfxge_txq_type type,
1569 unsigned int tso_fw_assisted)
1571 /* One descriptor for every input fragment */
1572 unsigned int max_descs = SFXGE_TX_MAPPING_MAX_SEG;
1573 unsigned int sw_tso_max_descs;
1574 unsigned int fa_tso_v1_max_descs = 0;
1575 unsigned int fa_tso_v2_max_descs = 0;
1577 /* Checksum offload Tx option descriptor may be required */
1578 if (sc->txq_dynamic_cksum_toggle_supported)
1581 /* VLAN tagging Tx option descriptor may be required */
1582 if (efx_nic_cfg_get(sc->enp)->enc_hw_tx_insert_vlan_enabled)
1585 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) {
1587 * Plus header and payload descriptor for each output segment.
1588 * Minus one since header fragment is already counted.
1589 * Even if FATSO is used, we should be ready to fallback
1590 * to do it in the driver.
1592 sw_tso_max_descs = SFXGE_TSO_MAX_SEGS * 2 - 1;
1594 /* FW assisted TSOv1 requires one more descriptor per segment
1595 * in comparison to SW TSO */
1596 if (tso_fw_assisted & SFXGE_FATSOV1)
1597 fa_tso_v1_max_descs =
1598 sw_tso_max_descs + SFXGE_TSO_MAX_SEGS;
1600 /* FW assisted TSOv2 requires 3 (2 FATSO plus header) extra
1601 * descriptors per superframe limited by number of DMA fetches
1602 * per packet. The first packet header is already counted.
1604 if (tso_fw_assisted & SFXGE_FATSOV2) {
1605 fa_tso_v2_max_descs =
1606 howmany(SFXGE_TX_MAPPING_MAX_SEG,
1607 EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX - 1) *
1608 (EFX_TX_FATSOV2_OPT_NDESCS + 1) - 1;
1611 max_descs += MAX(sw_tso_max_descs,
1612 MAX(fa_tso_v1_max_descs, fa_tso_v2_max_descs));
1619 sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1621 struct sfxge_txq *txq;
1624 unsigned int tso_fw_assisted;
1626 struct sfxge_evq *evq;
1627 unsigned int desc_index;
1630 SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc);
1632 txq = sc->txq[index];
1634 evq = sc->evq[txq->evq_index];
1636 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1637 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1638 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1639 ("evq->init_state != SFXGE_EVQ_STARTED"));
1641 /* Program the buffer table. */
1642 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1643 EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
1646 /* Determine the kind of queue we are creating. */
1647 tso_fw_assisted = 0;
1648 switch (txq->type) {
1649 case SFXGE_TXQ_NON_CKSUM:
1652 case SFXGE_TXQ_IP_CKSUM:
1653 flags = EFX_TXQ_CKSUM_IPV4;
1655 case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
1656 flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
1657 tso_fw_assisted = sc->tso_fw_assisted;
1658 if (tso_fw_assisted & SFXGE_FATSOV2)
1659 flags |= EFX_TXQ_FATSOV2;
1662 KASSERT(0, ("Impossible TX queue"));
1667 label = (sc->txq_dynamic_cksum_toggle_supported) ? 0 : txq->type;
1669 /* Create the common code transmit queue. */
1670 if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp,
1671 sc->txq_entries, txq->buf_base_id, flags, evq->common,
1672 &txq->common, &desc_index)) != 0) {
1673 /* Retry if no FATSOv2 resources, otherwise fail */
1674 if ((rc != ENOSPC) || (~flags & EFX_TXQ_FATSOV2))
1677 /* Looks like all FATSOv2 contexts are used */
1678 flags &= ~EFX_TXQ_FATSOV2;
1679 tso_fw_assisted &= ~SFXGE_FATSOV2;
1680 if ((rc = efx_tx_qcreate(sc->enp, index, label, esmp,
1681 sc->txq_entries, txq->buf_base_id, flags, evq->common,
1682 &txq->common, &desc_index)) != 0)
1686 /* Initialise queue descriptor indexes */
1687 txq->added = txq->pending = txq->completed = txq->reaped = desc_index;
1689 SFXGE_TXQ_LOCK(txq);
1691 /* Enable the transmit queue. */
1692 efx_tx_qenable(txq->common);
1694 txq->init_state = SFXGE_TXQ_STARTED;
1695 txq->flush_state = SFXGE_FLUSH_REQUIRED;
1696 txq->tso_fw_assisted = tso_fw_assisted;
1698 txq->max_pkt_desc = sfxge_tx_max_pkt_desc(sc, txq->type,
1701 txq->hw_vlan_tci = 0;
1703 txq->hw_cksum_flags = flags &
1704 (EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP);
1706 SFXGE_TXQ_UNLOCK(txq);
1711 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1712 EFX_TXQ_NBUFS(sc->txq_entries));
1717 sfxge_tx_stop(struct sfxge_softc *sc)
1721 index = sc->txq_count;
1722 while (--index >= 0)
1723 sfxge_tx_qstop(sc, index);
1725 /* Tear down the transmit module */
1726 efx_tx_fini(sc->enp);
1730 sfxge_tx_start(struct sfxge_softc *sc)
1735 /* Initialize the common code transmit module. */
1736 if ((rc = efx_tx_init(sc->enp)) != 0)
1739 for (index = 0; index < sc->txq_count; index++) {
1740 if ((rc = sfxge_tx_qstart(sc, index)) != 0)
1747 while (--index >= 0)
1748 sfxge_tx_qstop(sc, index);
1750 efx_tx_fini(sc->enp);
1756 sfxge_txq_stat_init(struct sfxge_txq *txq, struct sysctl_oid *txq_node)
1758 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(txq->sc->dev);
1759 struct sysctl_oid *stat_node;
1762 stat_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO,
1763 "stats", CTLFLAG_RD, NULL,
1764 "Tx queue statistics");
1765 if (stat_node == NULL)
1768 for (id = 0; id < nitems(sfxge_tx_stats); id++) {
1770 ctx, SYSCTL_CHILDREN(stat_node), OID_AUTO,
1771 sfxge_tx_stats[id].name, CTLFLAG_RD | CTLFLAG_STATS,
1772 (unsigned long *)((caddr_t)txq + sfxge_tx_stats[id].offset),
1780 * Destroy a transmit queue.
1783 sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1785 struct sfxge_txq *txq;
1788 txq = sc->txq[index];
1790 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1791 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1793 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1796 /* Free the context arrays. */
1797 free(txq->pend_desc, M_SFXGE);
1798 nmaps = sc->txq_entries;
1799 while (nmaps-- != 0)
1800 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1801 free(txq->stmp, M_SFXGE);
1803 /* Release DMA memory mapping. */
1804 sfxge_dma_free(&txq->mem);
1806 sc->txq[index] = NULL;
1808 SFXGE_TXQ_LOCK_DESTROY(txq);
1814 sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
1815 enum sfxge_txq_type type, unsigned int evq_index)
1817 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
1819 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1820 struct sysctl_oid *txq_node;
1821 struct sfxge_txq *txq;
1822 struct sfxge_evq *evq;
1823 struct sfxge_tx_dpl *stdp;
1824 struct sysctl_oid *dpl_node;
1829 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1831 txq->entries = sc->txq_entries;
1832 txq->ptr_mask = txq->entries - 1;
1834 sc->txq[txq_index] = txq;
1837 evq = sc->evq[evq_index];
1839 /* Allocate and zero DMA space for the descriptor ring. */
1840 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
1843 /* Allocate buffer table entries. */
1844 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
1847 /* Create a DMA tag for packet mappings. */
1848 if (bus_dma_tag_create(sc->parent_dma_tag, 1,
1849 encp->enc_tx_dma_desc_boundary,
1850 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
1851 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG,
1852 encp->enc_tx_dma_desc_size_max, 0, NULL, NULL,
1853 &txq->packet_dma_tag) != 0) {
1854 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1859 /* Allocate pending descriptor array for batching writes. */
1860 txq->pend_desc = malloc(sizeof(efx_desc_t) * sc->txq_entries,
1861 M_SFXGE, M_ZERO | M_WAITOK);
1863 /* Allocate and initialise mbuf DMA mapping array. */
1864 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
1865 M_SFXGE, M_ZERO | M_WAITOK);
1866 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
1867 rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1868 &txq->stmp[nmaps].map);
1873 snprintf(name, sizeof(name), "%u", txq_index);
1874 txq_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->txqs_node),
1875 OID_AUTO, name, CTLFLAG_RD, NULL, "");
1876 if (txq_node == NULL) {
1881 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1882 (rc = tso_init(txq)) != 0)
1885 /* Initialize the deferred packet list. */
1887 stdp->std_put_max = sfxge_tx_dpl_put_max;
1888 stdp->std_get_max = sfxge_tx_dpl_get_max;
1889 stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max;
1890 stdp->std_getp = &stdp->std_get;
1892 SFXGE_TXQ_LOCK_INIT(txq, device_get_nameunit(sc->dev), txq_index);
1894 dpl_node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(txq_node), OID_AUTO,
1895 "dpl", CTLFLAG_RD, NULL,
1896 "Deferred packet list statistics");
1897 if (dpl_node == NULL) {
1902 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1903 "get_count", CTLFLAG_RD | CTLFLAG_STATS,
1904 &stdp->std_get_count, 0, "");
1905 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1906 "get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS,
1907 &stdp->std_get_non_tcp_count, 0, "");
1908 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1909 "get_hiwat", CTLFLAG_RD | CTLFLAG_STATS,
1910 &stdp->std_get_hiwat, 0, "");
1911 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(dpl_node), OID_AUTO,
1912 "put_hiwat", CTLFLAG_RD | CTLFLAG_STATS,
1913 &stdp->std_put_hiwat, 0, "");
1915 rc = sfxge_txq_stat_init(txq, txq_node);
1917 goto fail_txq_stat_init;
1920 txq->evq_index = evq_index;
1921 txq->init_state = SFXGE_TXQ_INITIALIZED;
1929 free(txq->pend_desc, M_SFXGE);
1931 while (nmaps-- != 0)
1932 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1933 free(txq->stmp, M_SFXGE);
1934 bus_dma_tag_destroy(txq->packet_dma_tag);
1937 sfxge_dma_free(esmp);
1943 sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
1945 struct sfxge_softc *sc = arg1;
1946 unsigned int id = arg2;
1950 /* Sum across all TX queues */
1952 for (index = 0; index < sc->txq_count; index++)
1953 sum += *(unsigned long *)((caddr_t)sc->txq[index] +
1954 sfxge_tx_stats[id].offset);
1956 return (SYSCTL_OUT(req, &sum, sizeof(sum)));
1960 sfxge_tx_stat_init(struct sfxge_softc *sc)
1962 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1963 struct sysctl_oid_list *stat_list;
1966 stat_list = SYSCTL_CHILDREN(sc->stats_node);
1968 for (id = 0; id < nitems(sfxge_tx_stats); id++) {
1971 OID_AUTO, sfxge_tx_stats[id].name,
1972 CTLTYPE_ULONG|CTLFLAG_RD,
1973 sc, id, sfxge_tx_stat_handler, "LU",
1979 sfxge_tx_get_drops(struct sfxge_softc *sc)
1983 struct sfxge_txq *txq;
1985 /* Sum across all TX queues */
1986 for (index = 0; index < sc->txq_count; index++) {
1987 txq = sc->txq[index];
1989 * In theory, txq->put_overflow and txq->netdown_drops
1990 * should use atomic operation and other should be
1991 * obtained under txq lock, but it is just statistics.
1993 drops += txq->drops + txq->get_overflow +
1994 txq->get_non_tcp_overflow +
1995 txq->put_overflow + txq->netdown_drops +
1996 txq->tso_pdrop_too_many + txq->tso_pdrop_no_rsrc;
2002 sfxge_tx_fini(struct sfxge_softc *sc)
2006 index = sc->txq_count;
2007 while (--index >= 0)
2008 sfxge_tx_qfini(sc, index);
2015 sfxge_tx_init(struct sfxge_softc *sc)
2017 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
2018 struct sfxge_intr *intr;
2024 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
2025 ("intr->state != SFXGE_INTR_INITIALIZED"));
2027 if (sfxge_tx_dpl_get_max <= 0) {
2028 log(LOG_ERR, "%s=%d must be greater than 0",
2029 SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max);
2031 goto fail_tx_dpl_get_max;
2033 if (sfxge_tx_dpl_get_non_tcp_max <= 0) {
2034 log(LOG_ERR, "%s=%d must be greater than 0",
2035 SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX,
2036 sfxge_tx_dpl_get_non_tcp_max);
2038 goto fail_tx_dpl_get_non_tcp_max;
2040 if (sfxge_tx_dpl_put_max < 0) {
2041 log(LOG_ERR, "%s=%d must be greater or equal to 0",
2042 SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max);
2044 goto fail_tx_dpl_put_max;
2047 sc->txq_count = SFXGE_EVQ0_N_TXQ(sc) - 1 + sc->intr.n_alloc;
2049 sc->tso_fw_assisted = sfxge_tso_fw_assisted;
2050 if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO) ||
2051 (!encp->enc_fw_assisted_tso_enabled))
2052 sc->tso_fw_assisted &= ~SFXGE_FATSOV1;
2053 if ((~encp->enc_features & EFX_FEATURE_FW_ASSISTED_TSO_V2) ||
2054 (!encp->enc_fw_assisted_tso_v2_enabled))
2055 sc->tso_fw_assisted &= ~SFXGE_FATSOV2;
2057 sc->txqs_node = SYSCTL_ADD_NODE(
2058 device_get_sysctl_ctx(sc->dev),
2059 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
2060 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues");
2061 if (sc->txqs_node == NULL) {
2066 /* Initialize the transmit queues */
2067 if (sc->txq_dynamic_cksum_toggle_supported == B_FALSE) {
2068 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
2069 SFXGE_TXQ_NON_CKSUM, 0)) != 0)
2072 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
2073 SFXGE_TXQ_IP_CKSUM, 0)) != 0)
2078 index < sc->txq_count - SFXGE_EVQ0_N_TXQ(sc) + 1;
2080 if ((rc = sfxge_tx_qinit(sc, SFXGE_EVQ0_N_TXQ(sc) - 1 + index,
2081 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
2085 sfxge_tx_stat_init(sc);
2090 while (--index >= 0)
2091 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
2093 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
2096 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
2101 fail_tx_dpl_put_max:
2102 fail_tx_dpl_get_non_tcp_max:
2103 fail_tx_dpl_get_max: