2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 /* Theory of operation:
32 * Tx queues allocation and mapping
34 * One Tx queue with enabled checksum offload is allocated per Rx channel
35 * (event queue). Also 2 Tx queues (one without checksum offload and one
36 * with IP checksum offload only) are allocated and bound to event queue 0.
37 * sfxge_txq_type is used as Tx queue label.
39 * So, event queue plus label mapping to Tx queue index is:
40 * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES)
41 * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1
42 * See sfxge_get_txq_by_label() sfxge_ev.c
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
48 #include <sys/types.h>
51 #include <sys/socket.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
56 #include <net/ethernet.h>
58 #include <net/if_vlan_var.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet/tcp.h>
65 #include "common/efx.h"
70 /* Set the block level to ensure there is space to generate a
71 * large number of descriptors for TSO. With minimum MSS and
72 * maximum mbuf length we might need more than a ring-ful of
73 * descriptors, but this should not happen in practice except
74 * due to deliberate attack. In that case we will truncate
75 * the output at a packet boundary. Allow for a reasonable
78 #define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
79 #define SFXGE_TXQ_BLOCK_LEVEL(_entries) ((_entries) - SFXGE_TSO_MAX_DESC)
83 #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max)
84 static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT;
85 TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max);
86 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN,
87 &sfxge_tx_dpl_get_max, 0,
88 "Maximum number of packets in deferred packet get-list");
90 #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max)
91 static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT;
92 TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max);
93 SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN,
94 &sfxge_tx_dpl_put_max, 0,
95 "Maximum number of packets in deferred packet put-list");
100 /* Forward declarations. */
101 static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
102 static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
103 static void sfxge_tx_qunblock(struct sfxge_txq *txq);
104 static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
105 const bus_dma_segment_t *dma_seg, int n_dma_seg);
108 sfxge_tx_qcomplete(struct sfxge_txq *txq)
110 struct sfxge_softc *sc;
111 struct sfxge_evq *evq;
112 unsigned int completed;
115 evq = sc->evq[txq->evq_index];
117 mtx_assert(&evq->lock, MA_OWNED);
119 completed = txq->completed;
120 while (completed != txq->pending) {
121 struct sfxge_tx_mapping *stmp;
124 id = completed++ & txq->ptr_mask;
126 stmp = &txq->stmp[id];
127 if (stmp->flags & TX_BUF_UNMAP) {
128 bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
129 if (stmp->flags & TX_BUF_MBUF) {
130 struct mbuf *m = stmp->u.mbuf;
135 free(stmp->u.heap_buf, M_SFXGE);
140 txq->completed = completed;
142 /* Check whether we need to unblock the queue. */
147 level = txq->added - txq->completed;
148 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
149 sfxge_tx_qunblock(txq);
156 * Reorder the put list and append it to the get list.
159 sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
161 struct sfxge_tx_dpl *stdp;
162 struct mbuf *mbuf, *get_next, **get_tailp;
163 volatile uintptr_t *putp;
167 mtx_assert(&txq->lock, MA_OWNED);
171 /* Acquire the put list. */
172 putp = &stdp->std_put;
173 put = atomic_readandclear_ptr(putp);
179 /* Reverse the put list. */
180 get_tailp = &mbuf->m_nextpkt;
185 struct mbuf *put_next;
187 put_next = mbuf->m_nextpkt;
188 mbuf->m_nextpkt = get_next;
193 } while (mbuf != NULL);
195 /* Append the reversed put list to the get list. */
196 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
197 *stdp->std_getp = get_next;
198 stdp->std_getp = get_tailp;
199 stdp->std_get_count += count;
202 #endif /* SFXGE_HAVE_MQ */
205 sfxge_tx_qreap(struct sfxge_txq *txq)
207 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
209 txq->reaped = txq->completed;
213 sfxge_tx_qlist_post(struct sfxge_txq *txq)
215 unsigned int old_added;
219 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
221 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
222 KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC,
223 ("txq->n_pend_desc too large"));
224 KASSERT(!txq->blocked, ("txq->blocked"));
226 old_added = txq->added;
228 /* Post the fragment list. */
229 rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc,
230 txq->reaped, &txq->added);
231 KASSERT(rc == 0, ("efx_tx_qpost() failed"));
233 /* If efx_tx_qpost() had to refragment, our information about
234 * buffers to free may be associated with the wrong
237 KASSERT(txq->added - old_added == txq->n_pend_desc,
238 ("efx_tx_qpost() refragmented descriptors"));
240 level = txq->added - txq->reaped;
241 KASSERT(level <= txq->entries, ("overfilled TX queue"));
243 /* Clear the fragment list. */
244 txq->n_pend_desc = 0;
246 /* Have we reached the block level? */
247 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
250 /* Reap, and check again */
252 level = txq->added - txq->reaped;
253 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries))
259 * Avoid a race with completion interrupt handling that could leave
264 level = txq->added - txq->reaped;
265 if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) {
271 static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
273 bus_dmamap_t *used_map;
275 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG];
277 struct sfxge_tx_mapping *stmp;
283 KASSERT(!txq->blocked, ("txq->blocked"));
285 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
286 prefetch_read_many(mbuf->m_data);
288 if (txq->init_state != SFXGE_TXQ_STARTED) {
293 /* Load the packet for DMA. */
294 id = txq->added & txq->ptr_mask;
295 stmp = &txq->stmp[id];
296 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
297 mbuf, dma_seg, &n_dma_seg, 0);
300 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
301 SFXGE_TX_MAPPING_MAX_SEG);
302 if (new_mbuf == NULL)
306 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
308 dma_seg, &n_dma_seg, 0);
313 /* Make the packet visible to the hardware. */
314 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
316 used_map = &stmp->map;
318 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
319 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg);
322 stmp = &txq->stmp[rc];
324 /* Add the mapping to the fragment list, and set flags
329 desc = &txq->pend_desc[i];
330 desc->eb_addr = dma_seg[i].ds_addr;
331 desc->eb_size = dma_seg[i].ds_len;
332 if (i == n_dma_seg - 1) {
340 if (__predict_false(stmp ==
341 &txq->stmp[txq->ptr_mask]))
342 stmp = &txq->stmp[0];
346 txq->n_pend_desc = n_dma_seg;
350 * If the mapping required more than one descriptor
351 * then we need to associate the DMA map with the last
352 * descriptor, not the first.
354 if (used_map != &stmp->map) {
356 stmp->map = *used_map;
361 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF;
363 /* Post the fragment list. */
364 sfxge_tx_qlist_post(txq);
369 bus_dmamap_unload(txq->packet_dma_tag, *used_map);
371 /* Drop the packet on the floor. */
381 * Drain the deferred packet list into the transmit queue.
384 sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
386 struct sfxge_softc *sc;
387 struct sfxge_tx_dpl *stdp;
388 struct mbuf *mbuf, *next;
393 mtx_assert(&txq->lock, MA_OWNED);
399 prefetch_read_many(sc->enp);
400 prefetch_read_many(txq->common);
402 mbuf = stdp->std_get;
403 count = stdp->std_get_count;
406 KASSERT(mbuf != NULL, ("mbuf == NULL"));
408 next = mbuf->m_nextpkt;
409 mbuf->m_nextpkt = NULL;
411 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */
414 prefetch_read_many(next);
416 rc = sfxge_tx_queue_mbuf(txq, mbuf);
425 /* Push the fragments to the hardware in batches. */
426 if (txq->added - pushed >= SFXGE_TX_BATCH) {
427 efx_tx_qpush(txq->common, txq->added);
433 KASSERT(mbuf == NULL, ("mbuf != NULL"));
434 stdp->std_get = NULL;
435 stdp->std_get_count = 0;
436 stdp->std_getp = &stdp->std_get;
438 stdp->std_get = mbuf;
439 stdp->std_get_count = count;
442 if (txq->added != pushed)
443 efx_tx_qpush(txq->common, txq->added);
445 KASSERT(txq->blocked || stdp->std_get_count == 0,
446 ("queue unblocked but count is non-zero"));
449 #define SFXGE_TX_QDPL_PENDING(_txq) \
450 ((_txq)->dpl.std_put != 0)
453 * Service the deferred packet list.
455 * NOTE: drops the txq mutex!
458 sfxge_tx_qdpl_service(struct sfxge_txq *txq)
460 mtx_assert(&txq->lock, MA_OWNED);
463 if (SFXGE_TX_QDPL_PENDING(txq))
464 sfxge_tx_qdpl_swizzle(txq);
467 sfxge_tx_qdpl_drain(txq);
469 mtx_unlock(&txq->lock);
470 } while (SFXGE_TX_QDPL_PENDING(txq) &&
471 mtx_trylock(&txq->lock));
475 * Put a packet on the deferred packet list.
477 * If we are called with the txq lock held, we put the packet on the "get
478 * list", otherwise we atomically push it on the "put list". The swizzle
479 * function takes care of ordering.
481 * The length of the put list is bounded by SFXGE_TX_MAX_DEFFERED. We
482 * overload the csum_data field in the mbuf to keep track of this length
483 * because there is no cheap alternative to avoid races.
486 sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked)
488 struct sfxge_tx_dpl *stdp;
492 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
495 mtx_assert(&txq->lock, MA_OWNED);
497 sfxge_tx_qdpl_swizzle(txq);
499 if (stdp->std_get_count >= stdp->std_get_max)
502 *(stdp->std_getp) = mbuf;
503 stdp->std_getp = &mbuf->m_nextpkt;
504 stdp->std_get_count++;
506 volatile uintptr_t *putp;
511 putp = &stdp->std_put;
512 new = (uintptr_t)mbuf;
517 struct mbuf *mp = (struct mbuf *)old;
518 old_len = mp->m_pkthdr.csum_data;
521 if (old_len >= stdp->std_put_max)
523 mbuf->m_pkthdr.csum_data = old_len + 1;
524 mbuf->m_nextpkt = (void *)old;
525 } while (atomic_cmpset_ptr(putp, old, new) == 0);
532 * Called from if_transmit - will try to grab the txq lock and enqueue to the
533 * put list if it succeeds, otherwise will push onto the defer list.
536 sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
541 if (!SFXGE_LINK_UP(txq->sc)) {
547 * Try to grab the txq lock. If we are able to get the lock,
548 * the packet will be appended to the "get list" of the deferred
549 * packet list. Otherwise, it will be pushed on the "put list".
551 locked = mtx_trylock(&txq->lock);
553 if (sfxge_tx_qdpl_put(txq, m, locked) != 0) {
555 mtx_unlock(&txq->lock);
561 * Try to grab the lock again.
563 * If we are able to get the lock, we need to process the deferred
564 * packet list. If we are not able to get the lock, another thread
565 * is processing the list.
568 locked = mtx_trylock(&txq->lock);
571 /* Try to service the list. */
572 sfxge_tx_qdpl_service(txq);
573 /* Lock has been dropped. */
580 atomic_add_long(&txq->early_drops, 1);
585 sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
587 struct sfxge_tx_dpl *stdp = &txq->dpl;
588 struct mbuf *mbuf, *next;
590 mtx_lock(&txq->lock);
592 sfxge_tx_qdpl_swizzle(txq);
593 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
594 next = mbuf->m_nextpkt;
597 stdp->std_get = NULL;
598 stdp->std_get_count = 0;
599 stdp->std_getp = &stdp->std_get;
601 mtx_unlock(&txq->lock);
605 sfxge_if_qflush(struct ifnet *ifp)
607 struct sfxge_softc *sc;
612 for (i = 0; i < SFXGE_TX_SCALE(sc); i++)
613 sfxge_tx_qdpl_flush(sc->txq[i]);
617 * TX start -- called by the stack.
620 sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
622 struct sfxge_softc *sc;
623 struct sfxge_txq *txq;
626 sc = (struct sfxge_softc *)ifp->if_softc;
628 KASSERT(ifp->if_flags & IFF_UP, ("interface not up"));
630 /* Pick the desired transmit queue. */
631 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) {
634 if (m->m_flags & M_FLOWID) {
635 uint32_t hash = m->m_pkthdr.flowid;
637 index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX];
639 txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index];
640 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
641 txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
643 txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
646 rc = sfxge_tx_packet_add(txq, m);
651 #else /* !SFXGE_HAVE_MQ */
653 static void sfxge_if_start_locked(struct ifnet *ifp)
655 struct sfxge_softc *sc = ifp->if_softc;
656 struct sfxge_txq *txq;
658 unsigned int pushed[SFXGE_TXQ_NTYPES];
659 unsigned int q_index;
661 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
665 if (!sc->port.link_up)
668 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
669 txq = sc->txq[q_index];
670 pushed[q_index] = txq->added;
673 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
674 IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf);
678 ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */
680 /* Pick the desired transmit queue. */
681 if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO))
682 q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM;
683 else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP)
684 q_index = SFXGE_TXQ_IP_CKSUM;
686 q_index = SFXGE_TXQ_NON_CKSUM;
687 txq = sc->txq[q_index];
689 if (sfxge_tx_queue_mbuf(txq, mbuf) != 0)
693 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
697 /* Push the fragments to the hardware in batches. */
698 if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) {
699 efx_tx_qpush(txq->common, txq->added);
700 pushed[q_index] = txq->added;
704 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
705 txq = sc->txq[q_index];
706 if (txq->added != pushed[q_index])
707 efx_tx_qpush(txq->common, txq->added);
711 void sfxge_if_start(struct ifnet *ifp)
713 struct sfxge_softc *sc = ifp->if_softc;
715 mtx_lock(&sc->tx_lock);
716 sfxge_if_start_locked(ifp);
717 mtx_unlock(&sc->tx_lock);
721 sfxge_tx_qdpl_service(struct sfxge_txq *txq)
723 struct sfxge_softc *sc = txq->sc;
724 struct ifnet *ifp = sc->ifnet;
726 mtx_assert(&sc->tx_lock, MA_OWNED);
727 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
728 sfxge_if_start_locked(ifp);
729 mtx_unlock(&sc->tx_lock);
732 #endif /* SFXGE_HAVE_MQ */
735 * Software "TSO". Not quite as good as doing it in hardware, but
736 * still faster than segmenting in the stack.
739 struct sfxge_tso_state {
740 /* Output position */
741 unsigned out_len; /* Remaining length in current segment */
742 unsigned seqnum; /* Current sequence number */
743 unsigned packet_space; /* Remaining space in current packet */
746 unsigned dma_seg_i; /* Current DMA segment number */
747 uint64_t dma_addr; /* DMA address of current position */
748 unsigned in_len; /* Remaining length in current mbuf */
750 const struct mbuf *mbuf; /* Input mbuf (head of chain) */
751 u_short protocol; /* Network protocol (after VLAN decap) */
752 ssize_t nh_off; /* Offset of network header */
753 ssize_t tcph_off; /* Offset of TCP header */
754 unsigned header_len; /* Number of bytes of header */
755 int full_packet_size; /* Number of bytes to put in each outgoing
759 static inline const struct ip *tso_iph(const struct sfxge_tso_state *tso)
761 KASSERT(tso->protocol == htons(ETHERTYPE_IP),
762 ("tso_iph() in non-IPv4 state"));
763 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off);
765 static inline const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso)
767 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
768 ("tso_ip6h() in non-IPv6 state"));
769 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off);
771 static inline const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
773 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off);
776 /* Size of preallocated TSO header buffers. Larger blocks must be
777 * allocated from the heap.
779 #define TSOH_STD_SIZE 128
781 /* At most half the descriptors in the queue at any time will refer to
782 * a TSO header buffer, since they must always be followed by a
783 * payload descriptor referring to an mbuf.
785 #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u)
786 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
787 #define TSOH_PAGE_COUNT(_txq_entries) \
788 ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
790 static int tso_init(struct sfxge_txq *txq)
792 struct sfxge_softc *sc = txq->sc;
793 unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries);
796 /* Allocate TSO header buffers */
797 txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]),
800 for (i = 0; i < tsoh_page_count; i++) {
801 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
810 sfxge_dma_free(&txq->tsoh_buffer[i]);
811 free(txq->tsoh_buffer, M_SFXGE);
812 txq->tsoh_buffer = NULL;
816 static void tso_fini(struct sfxge_txq *txq)
820 if (txq->tsoh_buffer != NULL) {
821 for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++)
822 sfxge_dma_free(&txq->tsoh_buffer[i]);
823 free(txq->tsoh_buffer, M_SFXGE);
827 static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf)
829 struct ether_header *eh = mtod(mbuf, struct ether_header *);
833 /* Find network protocol and header */
834 tso->protocol = eh->ether_type;
835 if (tso->protocol == htons(ETHERTYPE_VLAN)) {
836 struct ether_vlan_header *veh =
837 mtod(mbuf, struct ether_vlan_header *);
838 tso->protocol = veh->evl_proto;
839 tso->nh_off = sizeof(*veh);
841 tso->nh_off = sizeof(*eh);
844 /* Find TCP header */
845 if (tso->protocol == htons(ETHERTYPE_IP)) {
846 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP,
847 ("TSO required on non-TCP packet"));
848 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl;
850 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
851 ("TSO required on non-IP packet"));
852 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP,
853 ("TSO required on non-TCP packet"));
854 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr);
857 /* We assume all headers are linear in the head mbuf */
858 tso->header_len = tso->tcph_off + 4 * tso_tcph(tso)->th_off;
859 KASSERT(tso->header_len <= mbuf->m_len, ("packet headers fragmented"));
860 tso->full_packet_size = tso->header_len + mbuf->m_pkthdr.tso_segsz;
862 tso->seqnum = ntohl(tso_tcph(tso)->th_seq);
864 /* These flags must not be duplicated */
865 KASSERT(!(tso_tcph(tso)->th_flags & (TH_URG | TH_SYN | TH_RST)),
866 ("incompatible TCP flag on TSO packet"));
868 tso->out_len = mbuf->m_pkthdr.len - tso->header_len;
872 * tso_fill_packet_with_fragment - form descriptors for the current fragment
874 * Form descriptors for the current fragment, until we reach the end
875 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
878 static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
879 struct sfxge_tso_state *tso)
884 if (tso->in_len == 0 || tso->packet_space == 0)
887 KASSERT(tso->in_len > 0, ("TSO input length went negative"));
888 KASSERT(tso->packet_space > 0, ("TSO packet space went negative"));
890 n = min(tso->in_len, tso->packet_space);
892 tso->packet_space -= n;
896 desc = &txq->pend_desc[txq->n_pend_desc++];
897 desc->eb_addr = tso->dma_addr;
899 desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0;
904 /* Callback from bus_dmamap_load() for long TSO headers. */
905 static void tso_map_long_header(void *dma_addr_ret,
906 bus_dma_segment_t *segs, int nseg,
909 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) &&
910 __predict_true(nseg == 1)) ?
915 * tso_start_new_packet - generate a new header and prepare for the new packet
917 * Generate a new header and prepare for the new packet. Return 0 on
918 * success, or an error code if failed to alloc header.
920 static int tso_start_new_packet(struct sfxge_txq *txq,
921 struct sfxge_tso_state *tso,
924 struct sfxge_tx_mapping *stmp = &txq->stmp[id];
925 struct tcphdr *tsoh_th;
933 /* Allocate a DMA-mapped header buffer. */
934 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) {
935 unsigned int page_index = (id / 2) / TSOH_PER_PAGE;
936 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE;
938 header = (txq->tsoh_buffer[page_index].esm_base +
939 buf_index * TSOH_STD_SIZE);
940 dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
941 buf_index * TSOH_STD_SIZE);
942 map = txq->tsoh_buffer[page_index].esm_map;
946 /* We cannot use bus_dmamem_alloc() as that may sleep */
947 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
948 if (__predict_false(!header))
950 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
951 header, tso->header_len,
952 tso_map_long_header, &dma_addr,
954 if (__predict_false(dma_addr == 0)) {
956 /* Succeeded but got >1 segment */
957 bus_dmamap_unload(txq->packet_dma_tag,
961 free(header, M_SFXGE);
966 txq->tso_long_headers++;
967 stmp->u.heap_buf = header;
968 stmp->flags = TX_BUF_UNMAP;
971 tsoh_th = (struct tcphdr *)(header + tso->tcph_off);
973 /* Copy and update the headers. */
974 memcpy(header, tso->mbuf->m_data, tso->header_len);
976 tsoh_th->th_seq = htonl(tso->seqnum);
977 tso->seqnum += tso->mbuf->m_pkthdr.tso_segsz;
978 if (tso->out_len > tso->mbuf->m_pkthdr.tso_segsz) {
979 /* This packet will not finish the TSO burst. */
980 ip_length = tso->full_packet_size - tso->nh_off;
981 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
983 /* This packet will be the last in the TSO burst. */
984 ip_length = tso->header_len - tso->nh_off + tso->out_len;
987 if (tso->protocol == htons(ETHERTYPE_IP)) {
988 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off);
989 tsoh_iph->ip_len = htons(ip_length);
990 /* XXX We should increment ip_id, but FreeBSD doesn't
991 * currently allocate extra IDs for multiple segments.
994 struct ip6_hdr *tsoh_iph =
995 (struct ip6_hdr *)(header + tso->nh_off);
996 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph));
999 /* Make the header visible to the hardware. */
1000 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
1002 tso->packet_space = tso->mbuf->m_pkthdr.tso_segsz;
1005 /* Form a descriptor for this header. */
1006 desc = &txq->pend_desc[txq->n_pend_desc++];
1007 desc->eb_addr = dma_addr;
1008 desc->eb_size = tso->header_len;
1015 sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
1016 const bus_dma_segment_t *dma_seg, int n_dma_seg)
1018 struct sfxge_tso_state tso;
1019 unsigned int id, next_id;
1021 tso_start(&tso, mbuf);
1023 /* Grab the first payload fragment. */
1024 if (dma_seg->ds_len == tso.header_len) {
1026 KASSERT(n_dma_seg, ("no payload found in TSO packet"));
1028 tso.in_len = dma_seg->ds_len;
1029 tso.dma_addr = dma_seg->ds_addr;
1031 tso.in_len = dma_seg->ds_len - tso.header_len;
1032 tso.dma_addr = dma_seg->ds_addr + tso.header_len;
1035 id = txq->added & txq->ptr_mask;
1036 if (__predict_false(tso_start_new_packet(txq, &tso, id)))
1040 id = (id + 1) & txq->ptr_mask;
1041 tso_fill_packet_with_fragment(txq, &tso);
1043 /* Move onto the next fragment? */
1044 if (tso.in_len == 0) {
1049 tso.in_len = dma_seg->ds_len;
1050 tso.dma_addr = dma_seg->ds_addr;
1053 /* End of packet? */
1054 if (tso.packet_space == 0) {
1055 /* If the queue is now full due to tiny MSS,
1056 * or we can't create another header, discard
1057 * the remainder of the input mbuf but do not
1058 * roll back the work we have done.
1060 if (txq->n_pend_desc >
1061 SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
1063 next_id = (id + 1) & txq->ptr_mask;
1064 if (__predict_false(tso_start_new_packet(txq, &tso,
1076 sfxge_tx_qunblock(struct sfxge_txq *txq)
1078 struct sfxge_softc *sc;
1079 struct sfxge_evq *evq;
1082 evq = sc->evq[txq->evq_index];
1084 mtx_assert(&evq->lock, MA_OWNED);
1086 if (txq->init_state != SFXGE_TXQ_STARTED)
1089 mtx_lock(SFXGE_TXQ_LOCK(txq));
1094 level = txq->added - txq->completed;
1095 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries))
1099 sfxge_tx_qdpl_service(txq);
1100 /* note: lock has been dropped */
1104 sfxge_tx_qflush_done(struct sfxge_txq *txq)
1107 txq->flush_state = SFXGE_FLUSH_DONE;
1111 sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
1113 struct sfxge_txq *txq;
1114 struct sfxge_evq *evq;
1117 txq = sc->txq[index];
1118 evq = sc->evq[txq->evq_index];
1120 mtx_lock(SFXGE_TXQ_LOCK(txq));
1122 KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1123 ("txq->init_state != SFXGE_TXQ_STARTED"));
1125 txq->init_state = SFXGE_TXQ_INITIALIZED;
1126 txq->flush_state = SFXGE_FLUSH_PENDING;
1128 /* Flush the transmit queue. */
1129 efx_tx_qflush(txq->common);
1131 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1135 /* Spin for 100ms. */
1138 if (txq->flush_state != SFXGE_FLUSH_PENDING)
1140 } while (++count < 20);
1142 mtx_lock(&evq->lock);
1143 mtx_lock(SFXGE_TXQ_LOCK(txq));
1145 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1146 ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1148 txq->flush_state = SFXGE_FLUSH_DONE;
1151 txq->pending = txq->added;
1153 sfxge_tx_qcomplete(txq);
1154 KASSERT(txq->completed == txq->added,
1155 ("txq->completed != txq->added"));
1157 sfxge_tx_qreap(txq);
1158 KASSERT(txq->reaped == txq->completed,
1159 ("txq->reaped != txq->completed"));
1166 /* Destroy the common code transmit queue. */
1167 efx_tx_qdestroy(txq->common);
1170 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1171 EFX_TXQ_NBUFS(sc->txq_entries));
1173 mtx_unlock(&evq->lock);
1174 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1178 sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1180 struct sfxge_txq *txq;
1183 struct sfxge_evq *evq;
1186 txq = sc->txq[index];
1188 evq = sc->evq[txq->evq_index];
1190 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1191 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1192 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1193 ("evq->init_state != SFXGE_EVQ_STARTED"));
1195 /* Program the buffer table. */
1196 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1197 EFX_TXQ_NBUFS(sc->txq_entries))) != 0)
1200 /* Determine the kind of queue we are creating. */
1201 switch (txq->type) {
1202 case SFXGE_TXQ_NON_CKSUM:
1205 case SFXGE_TXQ_IP_CKSUM:
1206 flags = EFX_CKSUM_IPV4;
1208 case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
1209 flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP;
1212 KASSERT(0, ("Impossible TX queue"));
1217 /* Create the common code transmit queue. */
1218 if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp,
1219 sc->txq_entries, txq->buf_base_id, flags, evq->common,
1220 &txq->common)) != 0)
1223 mtx_lock(SFXGE_TXQ_LOCK(txq));
1225 /* Enable the transmit queue. */
1226 efx_tx_qenable(txq->common);
1228 txq->init_state = SFXGE_TXQ_STARTED;
1230 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1235 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1236 EFX_TXQ_NBUFS(sc->txq_entries));
1241 sfxge_tx_stop(struct sfxge_softc *sc)
1243 const efx_nic_cfg_t *encp;
1246 index = SFXGE_TX_SCALE(sc);
1247 while (--index >= 0)
1248 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1250 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
1252 encp = efx_nic_cfg_get(sc->enp);
1253 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
1255 /* Tear down the transmit module */
1256 efx_tx_fini(sc->enp);
1260 sfxge_tx_start(struct sfxge_softc *sc)
1265 /* Initialize the common code transmit module. */
1266 if ((rc = efx_tx_init(sc->enp)) != 0)
1269 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_NON_CKSUM)) != 0)
1272 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_CKSUM)) != 0)
1275 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
1276 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM +
1284 while (--index >= 0)
1285 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1287 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
1290 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
1293 efx_tx_fini(sc->enp);
1299 * Destroy a transmit queue.
1302 sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1304 struct sfxge_txq *txq;
1307 txq = sc->txq[index];
1309 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1310 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1312 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1315 /* Free the context arrays. */
1316 free(txq->pend_desc, M_SFXGE);
1317 nmaps = sc->txq_entries;
1318 while (nmaps-- != 0)
1319 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1320 free(txq->stmp, M_SFXGE);
1322 /* Release DMA memory mapping. */
1323 sfxge_dma_free(&txq->mem);
1325 sc->txq[index] = NULL;
1327 #ifdef SFXGE_HAVE_MQ
1328 mtx_destroy(&txq->lock);
1335 sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
1336 enum sfxge_txq_type type, unsigned int evq_index)
1339 struct sysctl_oid *txq_node;
1340 struct sfxge_txq *txq;
1341 struct sfxge_evq *evq;
1342 #ifdef SFXGE_HAVE_MQ
1343 struct sfxge_tx_dpl *stdp;
1349 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1351 txq->entries = sc->txq_entries;
1352 txq->ptr_mask = txq->entries - 1;
1354 sc->txq[txq_index] = txq;
1357 evq = sc->evq[evq_index];
1359 /* Allocate and zero DMA space for the descriptor ring. */
1360 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0)
1362 (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries));
1364 /* Allocate buffer table entries. */
1365 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries),
1368 /* Create a DMA tag for packet mappings. */
1369 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000,
1370 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
1371 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL,
1372 &txq->packet_dma_tag) != 0) {
1373 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1378 /* Allocate pending descriptor array for batching writes. */
1379 txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries,
1380 M_SFXGE, M_ZERO | M_WAITOK);
1382 /* Allocate and initialise mbuf DMA mapping array. */
1383 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries,
1384 M_SFXGE, M_ZERO | M_WAITOK);
1385 for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) {
1386 rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1387 &txq->stmp[nmaps].map);
1392 snprintf(name, sizeof(name), "%u", txq_index);
1393 txq_node = SYSCTL_ADD_NODE(
1394 device_get_sysctl_ctx(sc->dev),
1395 SYSCTL_CHILDREN(sc->txqs_node),
1396 OID_AUTO, name, CTLFLAG_RD, NULL, "");
1397 if (txq_node == NULL) {
1402 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1403 (rc = tso_init(txq)) != 0)
1406 #ifdef SFXGE_HAVE_MQ
1407 if (sfxge_tx_dpl_get_max <= 0) {
1408 log(LOG_ERR, "%s=%d must be greater than 0",
1409 SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max);
1411 goto fail_tx_dpl_get_max;
1413 if (sfxge_tx_dpl_put_max < 0) {
1414 log(LOG_ERR, "%s=%d must be greater or equal to 0",
1415 SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max);
1417 goto fail_tx_dpl_put_max;
1420 /* Initialize the deferred packet list. */
1422 stdp->std_put_max = sfxge_tx_dpl_put_max;
1423 stdp->std_get_max = sfxge_tx_dpl_get_max;
1424 stdp->std_getp = &stdp->std_get;
1426 mtx_init(&txq->lock, "txq", NULL, MTX_DEF);
1428 SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
1429 SYSCTL_CHILDREN(txq_node), OID_AUTO,
1430 "dpl_get_count", CTLFLAG_RD | CTLFLAG_STATS,
1431 &stdp->std_get_count, 0, "");
1435 txq->evq_index = evq_index;
1436 txq->txq_index = txq_index;
1437 txq->init_state = SFXGE_TXQ_INITIALIZED;
1441 fail_tx_dpl_put_max:
1442 fail_tx_dpl_get_max:
1445 free(txq->pend_desc, M_SFXGE);
1447 while (nmaps-- != 0)
1448 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1449 free(txq->stmp, M_SFXGE);
1450 bus_dma_tag_destroy(txq->packet_dma_tag);
1453 sfxge_dma_free(esmp);
1458 static const struct {
1461 } sfxge_tx_stats[] = {
1462 #define SFXGE_TX_STAT(name, member) \
1463 { #name, offsetof(struct sfxge_txq, member) }
1464 SFXGE_TX_STAT(tso_bursts, tso_bursts),
1465 SFXGE_TX_STAT(tso_packets, tso_packets),
1466 SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
1467 SFXGE_TX_STAT(tx_collapses, collapses),
1468 SFXGE_TX_STAT(tx_drops, drops),
1469 SFXGE_TX_STAT(tx_early_drops, early_drops),
1473 sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
1475 struct sfxge_softc *sc = arg1;
1476 unsigned int id = arg2;
1480 /* Sum across all TX queues */
1483 index < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc);
1485 sum += *(unsigned long *)((caddr_t)sc->txq[index] +
1486 sfxge_tx_stats[id].offset);
1488 return (SYSCTL_OUT(req, &sum, sizeof(sum)));
1492 sfxge_tx_stat_init(struct sfxge_softc *sc)
1494 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1495 struct sysctl_oid_list *stat_list;
1498 stat_list = SYSCTL_CHILDREN(sc->stats_node);
1501 id < sizeof(sfxge_tx_stats) / sizeof(sfxge_tx_stats[0]);
1505 OID_AUTO, sfxge_tx_stats[id].name,
1506 CTLTYPE_ULONG|CTLFLAG_RD,
1507 sc, id, sfxge_tx_stat_handler, "LU",
1513 sfxge_tx_fini(struct sfxge_softc *sc)
1517 index = SFXGE_TX_SCALE(sc);
1518 while (--index >= 0)
1519 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1521 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1522 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
1527 sfxge_tx_init(struct sfxge_softc *sc)
1529 struct sfxge_intr *intr;
1535 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
1536 ("intr->state != SFXGE_INTR_INITIALIZED"));
1538 sc->txqs_node = SYSCTL_ADD_NODE(
1539 device_get_sysctl_ctx(sc->dev),
1540 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
1541 OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues");
1542 if (sc->txqs_node == NULL) {
1547 /* Initialize the transmit queues */
1548 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
1549 SFXGE_TXQ_NON_CKSUM, 0)) != 0)
1552 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
1553 SFXGE_TXQ_IP_CKSUM, 0)) != 0)
1556 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
1557 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index,
1558 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
1562 sfxge_tx_stat_init(sc);
1567 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1569 while (--index >= 0)
1570 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1573 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);