2 * Copyright (c) 2010-2011 Solarflare Communications, Inc.
5 * This software was developed in part by Philip Paeps under contract for
6 * Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <netinet/in.h>
36 #include <netinet/ip.h>
37 #include <netinet/tcp.h>
39 /* Maximum size of TSO packet */
40 #define SFXGE_TSO_MAX_SIZE (65535)
43 * Maximum number of segments to be created for a TSO packet.
44 * Allow for a reasonable minimum MSS of 512.
46 #define SFXGE_TSO_MAX_SEGS howmany(SFXGE_TSO_MAX_SIZE, 512)
48 /* Maximum number of DMA segments needed to map an mbuf chain. With
49 * TSO, the mbuf length may be just over 64K, divided into 2K mbuf
50 * clusters. (The chain could be longer than this initially, but can
51 * be shortened with m_collapse().)
53 #define SFXGE_TX_MAPPING_MAX_SEG \
54 (1 + howmany(SFXGE_TSO_MAX_SIZE, MCLBYTES))
57 * Buffer mapping flags.
59 * Buffers and DMA mappings must be freed when the last descriptor
60 * referring to them is completed. Set the TX_BUF_UNMAP and
61 * TX_BUF_MBUF flags on the last descriptor generated for an mbuf
62 * chain. Set only the TX_BUF_UNMAP flag on a descriptor referring to
65 enum sfxge_tx_buf_flags {
71 * Buffer mapping information for descriptors in flight.
73 struct sfxge_tx_mapping {
79 enum sfxge_tx_buf_flags flags;
82 #define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT (64 * 1024)
83 #define SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT 1024
84 #define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 1024
87 * Deferred packet list.
90 unsigned int std_get_max; /* Maximum number of packets
92 unsigned int std_get_non_tcp_max; /* Maximum number
95 unsigned int std_put_max; /* Maximum number of packets
97 uintptr_t std_put; /* Head of put list. */
98 struct mbuf *std_get; /* Head of get list. */
99 struct mbuf **std_getp; /* Tail of get list. */
100 unsigned int std_get_count; /* Packets in get list. */
101 unsigned int std_get_non_tcp_count; /* Non-TCP packets
103 unsigned int std_get_hiwat; /* Packets in get list
105 unsigned int std_put_hiwat; /* Packets in put list
110 #define SFXGE_TX_BUFFER_SIZE 0x400
111 #define SFXGE_TX_HEADER_SIZE 0x100
112 #define SFXGE_TX_COPY_THRESHOLD 0x200
114 enum sfxge_txq_state {
115 SFXGE_TXQ_UNINITIALIZED = 0,
116 SFXGE_TXQ_INITIALIZED,
120 enum sfxge_txq_type {
121 SFXGE_TXQ_NON_CKSUM = 0,
123 SFXGE_TXQ_IP_TCP_UDP_CKSUM,
127 #define SFXGE_TXQ_UNBLOCK_LEVEL(_entries) (EFX_TXQ_LIMIT(_entries) / 4)
129 #define SFXGE_TX_BATCH 64
131 #define SFXGE_TXQ_LOCK_INIT(_txq, _ifname, _txq_index) \
133 struct sfxge_txq *__txq = (_txq); \
135 snprintf((__txq)->lock_name, \
136 sizeof((__txq)->lock_name), \
137 "%s:txq%u", (_ifname), (_txq_index)); \
138 mtx_init(&(__txq)->lock, (__txq)->lock_name, \
141 #define SFXGE_TXQ_LOCK_DESTROY(_txq) \
142 mtx_destroy(&(_txq)->lock)
143 #define SFXGE_TXQ_LOCK(_txq) \
144 mtx_lock(&(_txq)->lock)
145 #define SFXGE_TXQ_TRYLOCK(_txq) \
146 mtx_trylock(&(_txq)->lock)
147 #define SFXGE_TXQ_UNLOCK(_txq) \
148 mtx_unlock(&(_txq)->lock)
149 #define SFXGE_TXQ_LOCK_ASSERT_OWNED(_txq) \
150 mtx_assert(&(_txq)->lock, MA_OWNED)
154 /* The following fields should be written very rarely */
155 struct sfxge_softc *sc;
156 enum sfxge_txq_state init_state;
157 enum sfxge_flush_state flush_state;
158 enum sfxge_txq_type type;
159 unsigned int txq_index;
160 unsigned int evq_index;
162 unsigned int buf_base_id;
163 unsigned int entries;
164 unsigned int ptr_mask;
166 struct sfxge_tx_mapping *stmp; /* Packets in flight. */
167 bus_dma_tag_t packet_dma_tag;
168 efx_buffer_t *pend_desc;
171 efsys_mem_t *tsoh_buffer;
173 char lock_name[SFXGE_LOCK_NAME_MAX];
175 /* This field changes more often and is read regularly on both
176 * the initiation and completion paths
178 int blocked __aligned(CACHE_LINE_SIZE);
180 /* The following fields change more often, and are used mostly
181 * on the initiation path
183 struct mtx lock __aligned(CACHE_LINE_SIZE);
184 struct sfxge_tx_dpl dpl; /* Deferred packet list. */
185 unsigned int n_pend_desc;
189 unsigned long tso_bursts;
190 unsigned long tso_packets;
191 unsigned long tso_long_headers;
192 unsigned long collapses;
194 unsigned long get_overflow;
195 unsigned long get_non_tcp_overflow;
196 unsigned long put_overflow;
197 unsigned long netdown_drops;
198 unsigned long tso_pdrop_too_many;
199 unsigned long tso_pdrop_no_rsrc;
201 /* The following fields change more often, and are used mostly
202 * on the completion path
204 unsigned int pending __aligned(CACHE_LINE_SIZE);
205 unsigned int completed;
206 struct sfxge_txq *next;
211 extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *);
213 extern int sfxge_tx_init(struct sfxge_softc *sc);
214 extern void sfxge_tx_fini(struct sfxge_softc *sc);
215 extern int sfxge_tx_start(struct sfxge_softc *sc);
216 extern void sfxge_tx_stop(struct sfxge_softc *sc);
217 extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq);
218 extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
219 extern void sfxge_if_qflush(struct ifnet *ifp);
220 extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);