2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/types.h>
36 #include <sys/socket.h>
37 #include <sys/sysctl.h>
40 #include <net/ethernet.h>
42 #include <net/if_vlan_var.h>
44 #include <netinet/in.h>
45 #include <netinet/ip.h>
46 #include <netinet/ip6.h>
47 #include <netinet/tcp.h>
49 #include "common/efx.h"
54 /* Set the block level to ensure there is space to generate a
55 * large number of descriptors for TSO. With minimum MSS and
56 * maximum mbuf length we might need more than a ring-ful of
57 * descriptors, but this should not happen in practice except
58 * due to deliberate attack. In that case we will truncate
59 * the output at a packet boundary. Allow for a reasonable
62 #define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1)
63 #define SFXGE_TXQ_BLOCK_LEVEL (SFXGE_NDESCS - SFXGE_TSO_MAX_DESC)
65 /* Forward declarations. */
66 static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq);
67 static void sfxge_tx_qlist_post(struct sfxge_txq *txq);
68 static void sfxge_tx_qunblock(struct sfxge_txq *txq);
69 static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
70 const bus_dma_segment_t *dma_seg, int n_dma_seg);
73 sfxge_tx_qcomplete(struct sfxge_txq *txq)
75 struct sfxge_softc *sc;
76 struct sfxge_evq *evq;
77 unsigned int completed;
80 evq = sc->evq[txq->evq_index];
82 mtx_assert(&evq->lock, MA_OWNED);
84 completed = txq->completed;
85 while (completed != txq->pending) {
86 struct sfxge_tx_mapping *stmp;
89 id = completed++ & (SFXGE_NDESCS - 1);
91 stmp = &txq->stmp[id];
92 if (stmp->flags & TX_BUF_UNMAP) {
93 bus_dmamap_unload(txq->packet_dma_tag, stmp->map);
94 if (stmp->flags & TX_BUF_MBUF) {
95 struct mbuf *m = stmp->u.mbuf;
100 free(stmp->u.heap_buf, M_SFXGE);
105 txq->completed = completed;
107 /* Check whether we need to unblock the queue. */
112 level = txq->added - txq->completed;
113 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
114 sfxge_tx_qunblock(txq);
121 * Reorder the put list and append it to the get list.
124 sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq)
126 struct sfxge_tx_dpl *stdp;
127 struct mbuf *mbuf, *get_next, **get_tailp;
128 volatile uintptr_t *putp;
132 mtx_assert(&txq->lock, MA_OWNED);
136 /* Acquire the put list. */
137 putp = &stdp->std_put;
138 put = atomic_readandclear_ptr(putp);
144 /* Reverse the put list. */
145 get_tailp = &mbuf->m_nextpkt;
150 struct mbuf *put_next;
152 put_next = mbuf->m_nextpkt;
153 mbuf->m_nextpkt = get_next;
158 } while (mbuf != NULL);
160 /* Append the reversed put list to the get list. */
161 KASSERT(*get_tailp == NULL, ("*get_tailp != NULL"));
162 *stdp->std_getp = get_next;
163 stdp->std_getp = get_tailp;
164 stdp->std_count += count;
167 #endif /* SFXGE_HAVE_MQ */
170 sfxge_tx_qreap(struct sfxge_txq *txq)
172 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
174 txq->reaped = txq->completed;
178 sfxge_tx_qlist_post(struct sfxge_txq *txq)
180 unsigned int old_added;
184 mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED);
186 KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0"));
187 KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC,
188 ("txq->n_pend_desc too large"));
189 KASSERT(!txq->blocked, ("txq->blocked"));
191 old_added = txq->added;
193 /* Post the fragment list. */
194 rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc,
195 txq->reaped, &txq->added);
196 KASSERT(rc == 0, ("efx_tx_qpost() failed"));
198 /* If efx_tx_qpost() had to refragment, our information about
199 * buffers to free may be associated with the wrong
202 KASSERT(txq->added - old_added == txq->n_pend_desc,
203 ("efx_tx_qpost() refragmented descriptors"));
205 level = txq->added - txq->reaped;
206 KASSERT(level <= SFXGE_NDESCS, ("overfilled TX queue"));
208 /* Clear the fragment list. */
209 txq->n_pend_desc = 0;
211 /* Have we reached the block level? */
212 if (level < SFXGE_TXQ_BLOCK_LEVEL)
215 /* Reap, and check again */
217 level = txq->added - txq->reaped;
218 if (level < SFXGE_TXQ_BLOCK_LEVEL)
224 * Avoid a race with completion interrupt handling that could leave
229 level = txq->added - txq->reaped;
230 if (level < SFXGE_TXQ_BLOCK_LEVEL) {
236 static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
238 bus_dmamap_t *used_map;
240 bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG];
242 struct sfxge_tx_mapping *stmp;
248 KASSERT(!txq->blocked, ("txq->blocked"));
250 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO)
251 prefetch_read_many(mbuf->m_data);
253 if (txq->init_state != SFXGE_TXQ_STARTED) {
258 /* Load the packet for DMA. */
259 id = txq->added & (SFXGE_NDESCS - 1);
260 stmp = &txq->stmp[id];
261 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map,
262 mbuf, dma_seg, &n_dma_seg, 0);
265 struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
266 SFXGE_TX_MAPPING_MAX_SEG);
267 if (new_mbuf == NULL)
271 rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag,
273 dma_seg, &n_dma_seg, 0);
278 /* Make the packet visible to the hardware. */
279 bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE);
281 used_map = &stmp->map;
283 if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
284 rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg);
287 stmp = &txq->stmp[rc];
289 /* Add the mapping to the fragment list, and set flags
294 desc = &txq->pend_desc[i];
295 desc->eb_addr = dma_seg[i].ds_addr;
296 desc->eb_size = dma_seg[i].ds_len;
297 if (i == n_dma_seg - 1) {
305 if (__predict_false(stmp ==
306 &txq->stmp[SFXGE_NDESCS - 1]))
307 stmp = &txq->stmp[0];
311 txq->n_pend_desc = n_dma_seg;
315 * If the mapping required more than one descriptor
316 * then we need to associate the DMA map with the last
317 * descriptor, not the first.
319 if (used_map != &stmp->map) {
321 stmp->map = *used_map;
326 stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF;
328 /* Post the fragment list. */
329 sfxge_tx_qlist_post(txq);
334 bus_dmamap_unload(txq->packet_dma_tag, *used_map);
336 /* Drop the packet on the floor. */
346 * Drain the deferred packet list into the transmit queue.
349 sfxge_tx_qdpl_drain(struct sfxge_txq *txq)
351 struct sfxge_softc *sc;
352 struct sfxge_tx_dpl *stdp;
353 struct mbuf *mbuf, *next;
358 mtx_assert(&txq->lock, MA_OWNED);
364 prefetch_read_many(sc->enp);
365 prefetch_read_many(txq->common);
367 mbuf = stdp->std_get;
368 count = stdp->std_count;
371 KASSERT(mbuf != NULL, ("mbuf == NULL"));
373 next = mbuf->m_nextpkt;
374 mbuf->m_nextpkt = NULL;
376 ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */
379 prefetch_read_many(next);
381 rc = sfxge_tx_queue_mbuf(txq, mbuf);
390 /* Push the fragments to the hardware in batches. */
391 if (txq->added - pushed >= SFXGE_TX_BATCH) {
392 efx_tx_qpush(txq->common, txq->added);
398 KASSERT(mbuf == NULL, ("mbuf != NULL"));
399 stdp->std_get = NULL;
401 stdp->std_getp = &stdp->std_get;
403 stdp->std_get = mbuf;
404 stdp->std_count = count;
407 if (txq->added != pushed)
408 efx_tx_qpush(txq->common, txq->added);
410 KASSERT(txq->blocked || stdp->std_count == 0,
411 ("queue unblocked but count is non-zero"));
414 #define SFXGE_TX_QDPL_PENDING(_txq) \
415 ((_txq)->dpl.std_put != 0)
418 * Service the deferred packet list.
420 * NOTE: drops the txq mutex!
423 sfxge_tx_qdpl_service(struct sfxge_txq *txq)
425 mtx_assert(&txq->lock, MA_OWNED);
428 if (SFXGE_TX_QDPL_PENDING(txq))
429 sfxge_tx_qdpl_swizzle(txq);
432 sfxge_tx_qdpl_drain(txq);
434 mtx_unlock(&txq->lock);
435 } while (SFXGE_TX_QDPL_PENDING(txq) &&
436 mtx_trylock(&txq->lock));
440 * Put a packet on the deferred packet list.
442 * If we are called with the txq lock held, we put the packet on the "get
443 * list", otherwise we atomically push it on the "put list". The swizzle
444 * function takes care of ordering.
446 * The length of the put list is bounded by SFXGE_TX_MAX_DEFFERED. We
447 * overload the csum_data field in the mbuf to keep track of this length
448 * because there is no cheap alternative to avoid races.
451 sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked)
453 struct sfxge_tx_dpl *stdp;
457 KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL"));
460 mtx_assert(&txq->lock, MA_OWNED);
462 sfxge_tx_qdpl_swizzle(txq);
464 *(stdp->std_getp) = mbuf;
465 stdp->std_getp = &mbuf->m_nextpkt;
468 volatile uintptr_t *putp;
473 putp = &stdp->std_put;
474 new = (uintptr_t)mbuf;
479 struct mbuf *mp = (struct mbuf *)old;
480 old_len = mp->m_pkthdr.csum_data;
483 if (old_len >= SFXGE_TX_MAX_DEFERRED)
485 mbuf->m_pkthdr.csum_data = old_len + 1;
486 mbuf->m_nextpkt = (void *)old;
487 } while (atomic_cmpset_ptr(putp, old, new) == 0);
494 * Called from if_transmit - will try to grab the txq lock and enqueue to the
495 * put list if it succeeds, otherwise will push onto the defer list.
498 sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m)
504 * Try to grab the txq lock. If we are able to get the lock,
505 * the packet will be appended to the "get list" of the deferred
506 * packet list. Otherwise, it will be pushed on the "put list".
508 locked = mtx_trylock(&txq->lock);
511 * Can only fail if we weren't able to get the lock.
513 if (sfxge_tx_qdpl_put(txq, m, locked) != 0) {
515 ("sfxge_tx_qdpl_put() failed locked"));
521 * Try to grab the lock again.
523 * If we are able to get the lock, we need to process the deferred
524 * packet list. If we are not able to get the lock, another thread
525 * is processing the list.
528 locked = mtx_trylock(&txq->lock);
531 /* Try to service the list. */
532 sfxge_tx_qdpl_service(txq);
533 /* Lock has been dropped. */
544 sfxge_tx_qdpl_flush(struct sfxge_txq *txq)
546 struct sfxge_tx_dpl *stdp = &txq->dpl;
547 struct mbuf *mbuf, *next;
549 mtx_lock(&txq->lock);
551 sfxge_tx_qdpl_swizzle(txq);
552 for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) {
553 next = mbuf->m_nextpkt;
556 stdp->std_get = NULL;
558 stdp->std_getp = &stdp->std_get;
560 mtx_unlock(&txq->lock);
564 sfxge_if_qflush(struct ifnet *ifp)
566 struct sfxge_softc *sc;
571 for (i = 0; i < SFXGE_TX_SCALE(sc); i++)
572 sfxge_tx_qdpl_flush(sc->txq[i]);
576 * TX start -- called by the stack.
579 sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
581 struct sfxge_softc *sc;
582 struct sfxge_txq *txq;
585 sc = (struct sfxge_softc *)ifp->if_softc;
587 KASSERT(ifp->if_flags & IFF_UP, ("interface not up"));
589 if (!SFXGE_LINK_UP(sc)) {
594 /* Pick the desired transmit queue. */
595 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) {
598 if (m->m_flags & M_FLOWID) {
599 uint32_t hash = m->m_pkthdr.flowid;
601 index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX];
603 txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index];
604 } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
605 txq = sc->txq[SFXGE_TXQ_IP_CKSUM];
607 txq = sc->txq[SFXGE_TXQ_NON_CKSUM];
610 rc = sfxge_tx_packet_add(txq, m);
615 #else /* !SFXGE_HAVE_MQ */
617 static void sfxge_if_start_locked(struct ifnet *ifp)
619 struct sfxge_softc *sc = ifp->if_softc;
620 struct sfxge_txq *txq;
622 unsigned int pushed[SFXGE_TXQ_NTYPES];
623 unsigned int q_index;
625 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
629 if (!sc->port.link_up)
632 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
633 txq = sc->txq[q_index];
634 pushed[q_index] = txq->added;
637 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
638 IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf);
642 ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */
644 /* Pick the desired transmit queue. */
645 if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO))
646 q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM;
647 else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP)
648 q_index = SFXGE_TXQ_IP_CKSUM;
650 q_index = SFXGE_TXQ_NON_CKSUM;
651 txq = sc->txq[q_index];
653 if (sfxge_tx_queue_mbuf(txq, mbuf) != 0)
657 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
661 /* Push the fragments to the hardware in batches. */
662 if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) {
663 efx_tx_qpush(txq->common, txq->added);
664 pushed[q_index] = txq->added;
668 for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) {
669 txq = sc->txq[q_index];
670 if (txq->added != pushed[q_index])
671 efx_tx_qpush(txq->common, txq->added);
675 void sfxge_if_start(struct ifnet *ifp)
677 struct sfxge_softc *sc = ifp->if_softc;
679 mtx_lock(&sc->tx_lock);
680 sfxge_if_start_locked(ifp);
681 mtx_unlock(&sc->tx_lock);
685 sfxge_tx_qdpl_service(struct sfxge_txq *txq)
687 struct sfxge_softc *sc = txq->sc;
688 struct ifnet *ifp = sc->ifnet;
690 mtx_assert(&sc->tx_lock, MA_OWNED);
691 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
692 sfxge_if_start_locked(ifp);
693 mtx_unlock(&sc->tx_lock);
696 #endif /* SFXGE_HAVE_MQ */
699 * Software "TSO". Not quite as good as doing it in hardware, but
700 * still faster than segmenting in the stack.
703 struct sfxge_tso_state {
704 /* Output position */
705 unsigned out_len; /* Remaining length in current segment */
706 unsigned seqnum; /* Current sequence number */
707 unsigned packet_space; /* Remaining space in current packet */
710 unsigned dma_seg_i; /* Current DMA segment number */
711 uint64_t dma_addr; /* DMA address of current position */
712 unsigned in_len; /* Remaining length in current mbuf */
714 const struct mbuf *mbuf; /* Input mbuf (head of chain) */
715 u_short protocol; /* Network protocol (after VLAN decap) */
716 ssize_t nh_off; /* Offset of network header */
717 ssize_t tcph_off; /* Offset of TCP header */
718 unsigned header_len; /* Number of bytes of header */
719 int full_packet_size; /* Number of bytes to put in each outgoing
723 static inline const struct ip *tso_iph(const struct sfxge_tso_state *tso)
725 KASSERT(tso->protocol == htons(ETHERTYPE_IP),
726 ("tso_iph() in non-IPv4 state"));
727 return (const struct ip *)(tso->mbuf->m_data + tso->nh_off);
729 static inline const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso)
731 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
732 ("tso_ip6h() in non-IPv6 state"));
733 return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off);
735 static inline const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso)
737 return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off);
740 /* Size of preallocated TSO header buffers. Larger blocks must be
741 * allocated from the heap.
743 #define TSOH_STD_SIZE 128
745 /* At most half the descriptors in the queue at any time will refer to
746 * a TSO header buffer, since they must always be followed by a
747 * payload descriptor referring to an mbuf.
749 #define TSOH_COUNT (SFXGE_NDESCS / 2u)
750 #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
751 #define TSOH_PAGE_COUNT ((TSOH_COUNT + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE)
753 static int tso_init(struct sfxge_txq *txq)
755 struct sfxge_softc *sc = txq->sc;
758 /* Allocate TSO header buffers */
759 txq->tsoh_buffer = malloc(TSOH_PAGE_COUNT * sizeof(txq->tsoh_buffer[0]),
762 for (i = 0; i < TSOH_PAGE_COUNT; i++) {
763 rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]);
772 sfxge_dma_free(&txq->tsoh_buffer[i]);
773 free(txq->tsoh_buffer, M_SFXGE);
774 txq->tsoh_buffer = NULL;
778 static void tso_fini(struct sfxge_txq *txq)
782 if (txq->tsoh_buffer) {
783 for (i = 0; i < TSOH_PAGE_COUNT; i++)
784 sfxge_dma_free(&txq->tsoh_buffer[i]);
785 free(txq->tsoh_buffer, M_SFXGE);
789 static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf)
791 struct ether_header *eh = mtod(mbuf, struct ether_header *);
795 /* Find network protocol and header */
796 tso->protocol = eh->ether_type;
797 if (tso->protocol == htons(ETHERTYPE_VLAN)) {
798 struct ether_vlan_header *veh =
799 mtod(mbuf, struct ether_vlan_header *);
800 tso->protocol = veh->evl_proto;
801 tso->nh_off = sizeof(*veh);
803 tso->nh_off = sizeof(*eh);
806 /* Find TCP header */
807 if (tso->protocol == htons(ETHERTYPE_IP)) {
808 KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP,
809 ("TSO required on non-TCP packet"));
810 tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl;
812 KASSERT(tso->protocol == htons(ETHERTYPE_IPV6),
813 ("TSO required on non-IP packet"));
814 KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP,
815 ("TSO required on non-TCP packet"));
816 tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr);
819 /* We assume all headers are linear in the head mbuf */
820 tso->header_len = tso->tcph_off + 4 * tso_tcph(tso)->th_off;
821 KASSERT(tso->header_len <= mbuf->m_len, ("packet headers fragmented"));
822 tso->full_packet_size = tso->header_len + mbuf->m_pkthdr.tso_segsz;
824 tso->seqnum = ntohl(tso_tcph(tso)->th_seq);
826 /* These flags must not be duplicated */
827 KASSERT(!(tso_tcph(tso)->th_flags & (TH_URG | TH_SYN | TH_RST)),
828 ("incompatible TCP flag on TSO packet"));
830 tso->out_len = mbuf->m_pkthdr.len - tso->header_len;
834 * tso_fill_packet_with_fragment - form descriptors for the current fragment
836 * Form descriptors for the current fragment, until we reach the end
837 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
840 static void tso_fill_packet_with_fragment(struct sfxge_txq *txq,
841 struct sfxge_tso_state *tso)
846 if (tso->in_len == 0 || tso->packet_space == 0)
849 KASSERT(tso->in_len > 0, ("TSO input length went negative"));
850 KASSERT(tso->packet_space > 0, ("TSO packet space went negative"));
852 n = min(tso->in_len, tso->packet_space);
854 tso->packet_space -= n;
858 desc = &txq->pend_desc[txq->n_pend_desc++];
859 desc->eb_addr = tso->dma_addr;
861 desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0;
866 /* Callback from bus_dmamap_load() for long TSO headers. */
867 static void tso_map_long_header(void *dma_addr_ret,
868 bus_dma_segment_t *segs, int nseg,
871 *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) &&
872 __predict_true(nseg == 1)) ?
877 * tso_start_new_packet - generate a new header and prepare for the new packet
879 * Generate a new header and prepare for the new packet. Return 0 on
880 * success, or an error code if failed to alloc header.
882 static int tso_start_new_packet(struct sfxge_txq *txq,
883 struct sfxge_tso_state *tso,
886 struct sfxge_tx_mapping *stmp = &txq->stmp[id];
887 struct tcphdr *tsoh_th;
895 /* Allocate a DMA-mapped header buffer. */
896 if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) {
897 unsigned int page_index = (id / 2) / TSOH_PER_PAGE;
898 unsigned int buf_index = (id / 2) % TSOH_PER_PAGE;
900 header = (txq->tsoh_buffer[page_index].esm_base +
901 buf_index * TSOH_STD_SIZE);
902 dma_addr = (txq->tsoh_buffer[page_index].esm_addr +
903 buf_index * TSOH_STD_SIZE);
904 map = txq->tsoh_buffer[page_index].esm_map;
908 /* We cannot use bus_dmamem_alloc() as that may sleep */
909 header = malloc(tso->header_len, M_SFXGE, M_NOWAIT);
910 if (__predict_false(!header))
912 rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map,
913 header, tso->header_len,
914 tso_map_long_header, &dma_addr,
916 if (__predict_false(dma_addr == 0)) {
918 /* Succeeded but got >1 segment */
919 bus_dmamap_unload(txq->packet_dma_tag,
923 free(header, M_SFXGE);
928 txq->tso_long_headers++;
929 stmp->u.heap_buf = header;
930 stmp->flags = TX_BUF_UNMAP;
933 tsoh_th = (struct tcphdr *)(header + tso->tcph_off);
935 /* Copy and update the headers. */
936 memcpy(header, tso->mbuf->m_data, tso->header_len);
938 tsoh_th->th_seq = htonl(tso->seqnum);
939 tso->seqnum += tso->mbuf->m_pkthdr.tso_segsz;
940 if (tso->out_len > tso->mbuf->m_pkthdr.tso_segsz) {
941 /* This packet will not finish the TSO burst. */
942 ip_length = tso->full_packet_size - tso->nh_off;
943 tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
945 /* This packet will be the last in the TSO burst. */
946 ip_length = tso->header_len - tso->nh_off + tso->out_len;
949 if (tso->protocol == htons(ETHERTYPE_IP)) {
950 struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off);
951 tsoh_iph->ip_len = htons(ip_length);
952 /* XXX We should increment ip_id, but FreeBSD doesn't
953 * currently allocate extra IDs for multiple segments.
956 struct ip6_hdr *tsoh_iph =
957 (struct ip6_hdr *)(header + tso->nh_off);
958 tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph));
961 /* Make the header visible to the hardware. */
962 bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE);
964 tso->packet_space = tso->mbuf->m_pkthdr.tso_segsz;
967 /* Form a descriptor for this header. */
968 desc = &txq->pend_desc[txq->n_pend_desc++];
969 desc->eb_addr = dma_addr;
970 desc->eb_size = tso->header_len;
977 sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf,
978 const bus_dma_segment_t *dma_seg, int n_dma_seg)
980 struct sfxge_tso_state tso;
981 unsigned int id, next_id;
983 tso_start(&tso, mbuf);
985 /* Grab the first payload fragment. */
986 if (dma_seg->ds_len == tso.header_len) {
988 KASSERT(n_dma_seg, ("no payload found in TSO packet"));
990 tso.in_len = dma_seg->ds_len;
991 tso.dma_addr = dma_seg->ds_addr;
993 tso.in_len = dma_seg->ds_len - tso.header_len;
994 tso.dma_addr = dma_seg->ds_addr + tso.header_len;
997 id = txq->added & (SFXGE_NDESCS - 1);
998 if (__predict_false(tso_start_new_packet(txq, &tso, id)))
1002 id = (id + 1) & (SFXGE_NDESCS - 1);
1003 tso_fill_packet_with_fragment(txq, &tso);
1005 /* Move onto the next fragment? */
1006 if (tso.in_len == 0) {
1011 tso.in_len = dma_seg->ds_len;
1012 tso.dma_addr = dma_seg->ds_addr;
1015 /* End of packet? */
1016 if (tso.packet_space == 0) {
1017 /* If the queue is now full due to tiny MSS,
1018 * or we can't create another header, discard
1019 * the remainder of the input mbuf but do not
1020 * roll back the work we have done.
1022 if (txq->n_pend_desc >
1023 SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG))
1025 next_id = (id + 1) & (SFXGE_NDESCS - 1);
1026 if (__predict_false(tso_start_new_packet(txq, &tso,
1038 sfxge_tx_qunblock(struct sfxge_txq *txq)
1040 struct sfxge_softc *sc;
1041 struct sfxge_evq *evq;
1044 evq = sc->evq[txq->evq_index];
1046 mtx_assert(&evq->lock, MA_OWNED);
1048 if (txq->init_state != SFXGE_TXQ_STARTED)
1051 mtx_lock(SFXGE_TXQ_LOCK(txq));
1056 level = txq->added - txq->completed;
1057 if (level <= SFXGE_TXQ_UNBLOCK_LEVEL)
1061 sfxge_tx_qdpl_service(txq);
1062 /* note: lock has been dropped */
1066 sfxge_tx_qflush_done(struct sfxge_txq *txq)
1069 txq->flush_state = SFXGE_FLUSH_DONE;
1073 sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index)
1075 struct sfxge_txq *txq;
1076 struct sfxge_evq *evq;
1079 txq = sc->txq[index];
1080 evq = sc->evq[txq->evq_index];
1082 mtx_lock(SFXGE_TXQ_LOCK(txq));
1084 KASSERT(txq->init_state == SFXGE_TXQ_STARTED,
1085 ("txq->init_state != SFXGE_TXQ_STARTED"));
1087 txq->init_state = SFXGE_TXQ_INITIALIZED;
1088 txq->flush_state = SFXGE_FLUSH_PENDING;
1090 /* Flush the transmit queue. */
1091 efx_tx_qflush(txq->common);
1093 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1097 /* Spin for 100ms. */
1100 if (txq->flush_state != SFXGE_FLUSH_PENDING)
1102 } while (++count < 20);
1104 mtx_lock(&evq->lock);
1105 mtx_lock(SFXGE_TXQ_LOCK(txq));
1107 KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED,
1108 ("txq->flush_state == SFXGE_FLUSH_FAILED"));
1110 txq->flush_state = SFXGE_FLUSH_DONE;
1113 txq->pending = txq->added;
1115 sfxge_tx_qcomplete(txq);
1116 KASSERT(txq->completed == txq->added,
1117 ("txq->completed != txq->added"));
1119 sfxge_tx_qreap(txq);
1120 KASSERT(txq->reaped == txq->completed,
1121 ("txq->reaped != txq->completed"));
1128 /* Destroy the common code transmit queue. */
1129 efx_tx_qdestroy(txq->common);
1132 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1133 EFX_TXQ_NBUFS(SFXGE_NDESCS));
1135 mtx_unlock(&evq->lock);
1136 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1140 sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index)
1142 struct sfxge_txq *txq;
1145 struct sfxge_evq *evq;
1148 txq = sc->txq[index];
1150 evq = sc->evq[txq->evq_index];
1152 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1153 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1154 KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
1155 ("evq->init_state != SFXGE_EVQ_STARTED"));
1157 /* Program the buffer table. */
1158 if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp,
1159 EFX_TXQ_NBUFS(SFXGE_NDESCS))) != 0)
1162 /* Determine the kind of queue we are creating. */
1163 switch (txq->type) {
1164 case SFXGE_TXQ_NON_CKSUM:
1167 case SFXGE_TXQ_IP_CKSUM:
1168 flags = EFX_CKSUM_IPV4;
1170 case SFXGE_TXQ_IP_TCP_UDP_CKSUM:
1171 flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP;
1174 KASSERT(0, ("Impossible TX queue"));
1179 /* Create the common code transmit queue. */
1180 if ((rc = efx_tx_qcreate(sc->enp, index, index, esmp,
1181 SFXGE_NDESCS, txq->buf_base_id, flags, evq->common,
1182 &txq->common)) != 0)
1185 mtx_lock(SFXGE_TXQ_LOCK(txq));
1187 /* Enable the transmit queue. */
1188 efx_tx_qenable(txq->common);
1190 txq->init_state = SFXGE_TXQ_STARTED;
1192 mtx_unlock(SFXGE_TXQ_LOCK(txq));
1197 efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id,
1198 EFX_TXQ_NBUFS(SFXGE_NDESCS));
1203 sfxge_tx_stop(struct sfxge_softc *sc)
1205 const efx_nic_cfg_t *encp;
1208 index = SFXGE_TX_SCALE(sc);
1209 while (--index >= 0)
1210 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1212 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
1214 encp = efx_nic_cfg_get(sc->enp);
1215 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
1217 /* Tear down the transmit module */
1218 efx_tx_fini(sc->enp);
1222 sfxge_tx_start(struct sfxge_softc *sc)
1227 /* Initialize the common code transmit module. */
1228 if ((rc = efx_tx_init(sc->enp)) != 0)
1231 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_NON_CKSUM)) != 0)
1234 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_CKSUM)) != 0)
1237 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
1238 if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM +
1246 while (--index >= 0)
1247 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1249 sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM);
1252 sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM);
1255 efx_tx_fini(sc->enp);
1261 * Destroy a transmit queue.
1264 sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index)
1266 struct sfxge_txq *txq;
1267 unsigned int nmaps = SFXGE_NDESCS;
1269 txq = sc->txq[index];
1271 KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
1272 ("txq->init_state != SFXGE_TXQ_INITIALIZED"));
1274 if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM)
1277 /* Free the context arrays. */
1278 free(txq->pend_desc, M_SFXGE);
1280 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1281 free(txq->stmp, M_SFXGE);
1283 /* Release DMA memory mapping. */
1284 sfxge_dma_free(&txq->mem);
1286 sc->txq[index] = NULL;
1288 #ifdef SFXGE_HAVE_MQ
1289 mtx_destroy(&txq->lock);
1296 sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index,
1297 enum sfxge_txq_type type, unsigned int evq_index)
1299 struct sfxge_txq *txq;
1300 struct sfxge_evq *evq;
1301 #ifdef SFXGE_HAVE_MQ
1302 struct sfxge_tx_dpl *stdp;
1308 txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK);
1311 sc->txq[txq_index] = txq;
1314 evq = sc->evq[evq_index];
1316 /* Allocate and zero DMA space for the descriptor ring. */
1317 if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(SFXGE_NDESCS), esmp)) != 0)
1319 (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(SFXGE_NDESCS));
1321 /* Allocate buffer table entries. */
1322 sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(SFXGE_NDESCS),
1325 /* Create a DMA tag for packet mappings. */
1326 if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000,
1327 MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL,
1328 NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL,
1329 &txq->packet_dma_tag) != 0) {
1330 device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
1335 /* Allocate pending descriptor array for batching writes. */
1336 txq->pend_desc = malloc(sizeof(efx_buffer_t) * SFXGE_NDESCS,
1337 M_SFXGE, M_ZERO | M_WAITOK);
1339 /* Allocate and initialise mbuf DMA mapping array. */
1340 txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * SFXGE_NDESCS,
1341 M_SFXGE, M_ZERO | M_WAITOK);
1342 for (nmaps = 0; nmaps < SFXGE_NDESCS; nmaps++) {
1343 rc = bus_dmamap_create(txq->packet_dma_tag, 0,
1344 &txq->stmp[nmaps].map);
1349 if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM &&
1350 (rc = tso_init(txq)) != 0)
1353 #ifdef SFXGE_HAVE_MQ
1354 /* Initialize the deferred packet list. */
1356 stdp->std_getp = &stdp->std_get;
1358 mtx_init(&txq->lock, "txq", NULL, MTX_DEF);
1362 txq->evq_index = evq_index;
1363 txq->txq_index = txq_index;
1364 txq->init_state = SFXGE_TXQ_INITIALIZED;
1369 free(txq->pend_desc, M_SFXGE);
1372 bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map);
1373 free(txq->stmp, M_SFXGE);
1374 bus_dma_tag_destroy(txq->packet_dma_tag);
1377 sfxge_dma_free(esmp);
1382 static const struct {
1385 } sfxge_tx_stats[] = {
1386 #define SFXGE_TX_STAT(name, member) \
1387 { #name, offsetof(struct sfxge_txq, member) }
1388 SFXGE_TX_STAT(tso_bursts, tso_bursts),
1389 SFXGE_TX_STAT(tso_packets, tso_packets),
1390 SFXGE_TX_STAT(tso_long_headers, tso_long_headers),
1391 SFXGE_TX_STAT(tx_collapses, collapses),
1392 SFXGE_TX_STAT(tx_drops, drops),
1396 sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS)
1398 struct sfxge_softc *sc = arg1;
1399 unsigned int id = arg2;
1403 /* Sum across all TX queues */
1406 index < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc);
1408 sum += *(unsigned long *)((caddr_t)sc->txq[index] +
1409 sfxge_tx_stats[id].offset);
1411 return SYSCTL_OUT(req, &sum, sizeof(sum));
1415 sfxge_tx_stat_init(struct sfxge_softc *sc)
1417 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
1418 struct sysctl_oid_list *stat_list;
1421 stat_list = SYSCTL_CHILDREN(sc->stats_node);
1424 id < sizeof(sfxge_tx_stats) / sizeof(sfxge_tx_stats[0]);
1428 OID_AUTO, sfxge_tx_stats[id].name,
1429 CTLTYPE_ULONG|CTLFLAG_RD,
1430 sc, id, sfxge_tx_stat_handler, "LU",
1436 sfxge_tx_fini(struct sfxge_softc *sc)
1440 index = SFXGE_TX_SCALE(sc);
1441 while (--index >= 0)
1442 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1444 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1445 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);
1450 sfxge_tx_init(struct sfxge_softc *sc)
1452 struct sfxge_intr *intr;
1458 KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
1459 ("intr->state != SFXGE_INTR_INITIALIZED"));
1461 /* Initialize the transmit queues */
1462 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM,
1463 SFXGE_TXQ_NON_CKSUM, 0)) != 0)
1466 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM,
1467 SFXGE_TXQ_IP_CKSUM, 0)) != 0)
1470 for (index = 0; index < SFXGE_TX_SCALE(sc); index++) {
1471 if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index,
1472 SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0)
1476 sfxge_tx_stat_init(sc);
1481 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM);
1483 while (--index >= 0)
1484 sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index);
1487 sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM);