2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <netinet/in.h>
36 #include <netinet/ip.h>
37 #include <netinet/tcp.h>
39 /* Maximum number of DMA segments needed to map an mbuf chain. With
40 * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
41 * clusters. (The chain could be longer than this initially, but can
42 * be shortened with m_collapse().)
44 #define SFXGE_TX_MAPPING_MAX_SEG (64 / 2 + 1)
46 /* Maximum number of DMA segments needed to map an output packet. It
47 * could overlap all mbufs in the chain and also require an extra
48 * segment for a TSO header.
50 #define SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1)
53 * Buffer mapping flags.
55 * Buffers and DMA mappings must be freed when the last descriptor
56 * referring to them is completed. Set the TX_BUF_UNMAP and
57 * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
58 * chain. Set only the TX_BUF_UNMAP flag on a descriptor referring to
61 enum sfxge_tx_buf_flags {
67 * Buffer mapping information for descriptors in flight.
69 struct sfxge_tx_mapping {
75 enum sfxge_tx_buf_flags flags;
78 #define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT 64
79 #define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 64
82 * Deferred packet list.
85 uintptr_t std_put; /* Head of put list. */
86 struct mbuf *std_get; /* Head of get list. */
87 struct mbuf **std_getp; /* Tail of get list. */
88 unsigned int std_count; /* Count of packets. */
92 #define SFXGE_TX_BUFFER_SIZE 0x400
93 #define SFXGE_TX_HEADER_SIZE 0x100
94 #define SFXGE_TX_COPY_THRESHOLD 0x200
96 enum sfxge_txq_state {
97 SFXGE_TXQ_UNINITIALIZED = 0,
98 SFXGE_TXQ_INITIALIZED,
102 enum sfxge_txq_type {
103 SFXGE_TXQ_NON_CKSUM = 0,
105 SFXGE_TXQ_IP_TCP_UDP_CKSUM,
109 #define SFXGE_TXQ_UNBLOCK_LEVEL (EFX_TXQ_LIMIT(SFXGE_NDESCS) / 4)
111 #define SFXGE_TX_BATCH 64
114 #define SFXGE_TXQ_LOCK(txq) (&(txq)->lock)
115 #define SFXGE_TX_SCALE(sc) ((sc)->intr.n_alloc)
117 #define SFXGE_TXQ_LOCK(txq) (&(txq)->sc->tx_lock)
118 #define SFXGE_TX_SCALE(sc) 1
122 /* The following fields should be written very rarely */
123 struct sfxge_softc *sc;
124 enum sfxge_txq_state init_state;
125 enum sfxge_flush_state flush_state;
126 enum sfxge_txq_type type;
127 unsigned int txq_index;
128 unsigned int evq_index;
130 unsigned int buf_base_id;
132 struct sfxge_tx_mapping *stmp; /* Packets in flight. */
133 bus_dma_tag_t packet_dma_tag;
134 efx_buffer_t *pend_desc;
136 struct sfxge_txq *next;
138 efsys_mem_t *tsoh_buffer;
140 /* This field changes more often and is read regularly on both
141 * the initiation and completion paths
143 int blocked __aligned(CACHE_LINE_SIZE);
145 /* The following fields change more often, and are used mostly
146 * on the initiation path
149 struct mtx lock __aligned(CACHE_LINE_SIZE);
150 struct sfxge_tx_dpl dpl; /* Deferred packet list. */
151 unsigned int n_pend_desc;
153 unsigned int n_pend_desc __aligned(CACHE_LINE_SIZE);
158 unsigned long tso_bursts;
159 unsigned long tso_packets;
160 unsigned long tso_long_headers;
161 unsigned long collapses;
163 unsigned long early_drops;
165 /* The following fields change more often, and are used mostly
166 * on the completion path
168 unsigned int pending __aligned(CACHE_LINE_SIZE);
169 unsigned int completed;
172 extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *);
174 extern int sfxge_tx_init(struct sfxge_softc *sc);
175 extern void sfxge_tx_fini(struct sfxge_softc *sc);
176 extern int sfxge_tx_start(struct sfxge_softc *sc);
177 extern void sfxge_tx_stop(struct sfxge_softc *sc);
178 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq);
179 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
181 extern void sfxge_if_qflush(struct ifnet *ifp);
182 extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
184 extern void sfxge_if_start(struct ifnet *ifp);