2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 Chelsio Communications, Inc.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include "opt_inet6.h"
35 #include "opt_kern_tls.h"
36 #include "opt_ratelimit.h"
38 #include <sys/types.h>
39 #include <sys/eventhandler.h>
41 #include <sys/socket.h>
42 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
49 #include <sys/sglist.h>
50 #include <sys/sysctl.h>
52 #include <sys/socketvar.h>
53 #include <sys/counter.h>
55 #include <net/ethernet.h>
57 #include <net/if_vlan_var.h>
58 #include <net/if_vxlan.h>
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 #include <netinet/ip6.h>
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
64 #include <machine/in_cksum.h>
65 #include <machine/md_var.h>
69 #include <machine/bus.h>
70 #include <sys/selinfo.h>
71 #include <net/if_var.h>
72 #include <net/netmap.h>
73 #include <dev/netmap/netmap_kern.h>
76 #include "common/common.h"
77 #include "common/t4_regs.h"
78 #include "common/t4_regs_values.h"
79 #include "common/t4_msg.h"
81 #include "t4_mp_ring.h"
83 #ifdef T4_PKT_TIMESTAMP
84 #define RX_COPY_THRESHOLD (MINCLSIZE - 8)
86 #define RX_COPY_THRESHOLD MINCLSIZE
89 /* Internal mbuf flags stored in PH_loc.eight[1]. */
91 #define MC_RAW_WR 0x02
95 * Ethernet frames are DMA'd at this byte offset into the freelist buffer.
96 * 0-7 are valid values.
98 static int fl_pktshift = 0;
99 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0,
100 "payload DMA offset in rx buffer (bytes)");
103 * Pad ethernet payload up to this boundary.
104 * -1: driver should figure out a good value.
105 * 0: disable padding.
106 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value.
109 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0,
110 "payload pad boundary (bytes)");
113 * Status page length.
114 * -1: driver should figure out a good value.
115 * 64 or 128 are the only other valid values.
117 static int spg_len = -1;
118 SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0,
119 "status page size (bytes)");
123 * -1: no congestion feedback (not recommended).
124 * 0: backpressure the channel instead of dropping packets right away.
125 * 1: no backpressure, drop packets for the congested queue immediately.
127 static int cong_drop = 0;
128 SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0,
129 "Congestion control for RX queues (0 = backpressure, 1 = drop");
132 * Deliver multiple frames in the same free list buffer if they fit.
133 * -1: let the driver decide whether to enable buffer packing or not.
134 * 0: disable buffer packing.
135 * 1: enable buffer packing.
137 static int buffer_packing = -1;
138 SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing,
139 0, "Enable buffer packing");
142 * Start next frame in a packed buffer at this boundary.
143 * -1: driver should figure out a good value.
144 * T4: driver will ignore this and use the same value as fl_pad above.
145 * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value.
147 static int fl_pack = -1;
148 SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0,
149 "payload pack boundary (bytes)");
152 * Largest rx cluster size that the driver is allowed to allocate.
154 static int largest_rx_cluster = MJUM16BYTES;
155 SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN,
156 &largest_rx_cluster, 0, "Largest rx cluster (bytes)");
159 * Size of cluster allocation that's most likely to succeed. The driver will
160 * fall back to this size if it fails to allocate clusters larger than this.
162 static int safest_rx_cluster = PAGE_SIZE;
163 SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN,
164 &safest_rx_cluster, 0, "Safe rx cluster (bytes)");
168 * Knob to control TCP timestamp rewriting, and the granularity of the tick used
169 * for rewriting. -1 and 0-3 are all valid values.
170 * -1: hardware should leave the TCP timestamps alone.
176 static int tsclk = -1;
177 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tsclk, CTLFLAG_RDTUN, &tsclk, 0,
178 "Control TCP timestamp rewriting when using pacing");
180 static int eo_max_backlog = 1024 * 1024;
181 SYSCTL_INT(_hw_cxgbe, OID_AUTO, eo_max_backlog, CTLFLAG_RDTUN, &eo_max_backlog,
182 0, "Maximum backlog of ratelimited data per flow");
186 * The interrupt holdoff timers are multiplied by this value on T6+.
187 * 1 and 3-17 (both inclusive) are legal values.
189 static int tscale = 1;
190 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0,
191 "Interrupt holdoff timer scale on T6+");
194 * Number of LRO entries in the lro_ctrl structure per rx queue.
196 static int lro_entries = TCP_LRO_ENTRIES;
197 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0,
198 "Number of LRO entries per RX queue");
201 * This enables presorting of frames before they're fed into tcp_lro_rx.
203 static int lro_mbufs = 0;
204 SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0,
205 "Enable presorting of LRO frames");
207 static counter_u64_t pullups;
208 SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, pullups, CTLFLAG_RD, &pullups,
209 "Number of mbuf pullups performed");
211 static counter_u64_t defrags;
212 SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, defrags, CTLFLAG_RD, &defrags,
213 "Number of mbuf defrags performed");
215 static int t4_tx_coalesce = 1;
216 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce, CTLFLAG_RWTUN, &t4_tx_coalesce, 0,
217 "tx coalescing allowed");
220 * The driver will make aggressive attempts at tx coalescing if it sees these
221 * many packets eligible for coalescing in quick succession, with no more than
222 * the specified gap in between the eth_tx calls that delivered the packets.
224 static int t4_tx_coalesce_pkts = 32;
225 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce_pkts, CTLFLAG_RWTUN,
226 &t4_tx_coalesce_pkts, 0,
227 "# of consecutive packets (1 - 255) that will trigger tx coalescing");
228 static int t4_tx_coalesce_gap = 5;
229 SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce_gap, CTLFLAG_RWTUN,
230 &t4_tx_coalesce_gap, 0, "tx gap (in microseconds)");
232 static int service_iq(struct sge_iq *, int);
233 static int service_iq_fl(struct sge_iq *, int);
234 static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t);
235 static int eth_rx(struct adapter *, struct sge_rxq *, const struct iq_desc *,
237 static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
239 static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *);
240 static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t,
241 struct sge_iq *, char *);
242 static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *,
243 struct sysctl_ctx_list *, struct sysctl_oid *);
244 static void free_iq_fl(struct adapter *, struct sge_iq *, struct sge_fl *);
245 static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
247 static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *,
248 struct sysctl_oid *, struct sge_fl *);
249 static int alloc_iq_fl_hwq(struct vi_info *, struct sge_iq *, struct sge_fl *);
250 static int free_iq_fl_hwq(struct adapter *, struct sge_iq *, struct sge_fl *);
251 static int alloc_fwq(struct adapter *);
252 static void free_fwq(struct adapter *);
253 static int alloc_ctrlq(struct adapter *, int);
254 static void free_ctrlq(struct adapter *, int);
255 static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, int);
256 static void free_rxq(struct vi_info *, struct sge_rxq *);
257 static void add_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
260 static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int,
262 static void free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
263 static void add_ofld_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
264 struct sge_ofld_rxq *);
266 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
267 static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
268 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
269 static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
271 static int alloc_eq(struct adapter *, struct sge_eq *, struct sysctl_ctx_list *,
272 struct sysctl_oid *);
273 static void free_eq(struct adapter *, struct sge_eq *);
274 static void add_eq_sysctls(struct adapter *, struct sysctl_ctx_list *,
275 struct sysctl_oid *, struct sge_eq *);
276 static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
277 static int free_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
278 static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
279 struct sysctl_ctx_list *, struct sysctl_oid *);
280 static void free_wrq(struct adapter *, struct sge_wrq *);
281 static void add_wrq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
283 static int alloc_txq(struct vi_info *, struct sge_txq *, int);
284 static void free_txq(struct vi_info *, struct sge_txq *);
285 static void add_txq_sysctls(struct vi_info *, struct sysctl_ctx_list *,
286 struct sysctl_oid *, struct sge_txq *);
287 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
288 static int alloc_ofld_txq(struct vi_info *, struct sge_ofld_txq *, int);
289 static void free_ofld_txq(struct vi_info *, struct sge_ofld_txq *);
290 static void add_ofld_txq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
291 struct sge_ofld_txq *);
293 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
294 static inline void ring_fl_db(struct adapter *, struct sge_fl *);
295 static int refill_fl(struct adapter *, struct sge_fl *, int);
296 static void refill_sfl(void *);
297 static int find_refill_source(struct adapter *, int, bool);
298 static void add_fl_to_sfl(struct adapter *, struct sge_fl *);
300 static inline void get_pkt_gl(struct mbuf *, struct sglist *);
301 static inline u_int txpkt_len16(u_int, const u_int);
302 static inline u_int txpkt_vm_len16(u_int, const u_int);
303 static inline void calculate_mbuf_len16(struct mbuf *, bool);
304 static inline u_int txpkts0_len16(u_int);
305 static inline u_int txpkts1_len16(void);
306 static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int);
307 static u_int write_txpkt_wr(struct adapter *, struct sge_txq *, struct mbuf *,
309 static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *,
311 static int add_to_txpkts_vf(struct adapter *, struct sge_txq *, struct mbuf *,
313 static int add_to_txpkts_pf(struct adapter *, struct sge_txq *, struct mbuf *,
315 static u_int write_txpkts_wr(struct adapter *, struct sge_txq *);
316 static u_int write_txpkts_vm_wr(struct adapter *, struct sge_txq *);
317 static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int);
318 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
319 static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int);
320 static inline uint16_t read_hw_cidx(struct sge_eq *);
321 static inline u_int reclaimable_tx_desc(struct sge_eq *);
322 static inline u_int total_available_tx_desc(struct sge_eq *);
323 static u_int reclaim_tx_descs(struct sge_txq *, u_int);
324 static void tx_reclaim(void *, int);
325 static __be64 get_flit(struct sglist_seg *, int, int);
326 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
328 static int handle_fw_msg(struct sge_iq *, const struct rss_header *,
330 static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *);
331 static void wrq_tx_drain(void *, int);
332 static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *);
334 static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS);
336 #if defined(INET) || defined(INET6)
337 static inline u_int txpkt_eo_len16(u_int, u_int, u_int);
339 static int ethofld_fw4_ack(struct sge_iq *, const struct rss_header *,
343 static counter_u64_t extfree_refs;
344 static counter_u64_t extfree_rels;
346 an_handler_t t4_an_handler;
347 fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES];
348 cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS];
349 cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES];
350 cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES];
351 cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES];
352 cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES];
353 cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES];
356 t4_register_an_handler(an_handler_t h)
360 MPASS(h == NULL || t4_an_handler == NULL);
362 loc = (uintptr_t *)&t4_an_handler;
363 atomic_store_rel_ptr(loc, (uintptr_t)h);
367 t4_register_fw_msg_handler(int type, fw_msg_handler_t h)
371 MPASS(type < nitems(t4_fw_msg_handler));
372 MPASS(h == NULL || t4_fw_msg_handler[type] == NULL);
374 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
375 * handler dispatch table. Reject any attempt to install a handler for
378 MPASS(type != FW_TYPE_RSSCPL);
379 MPASS(type != FW6_TYPE_RSSCPL);
381 loc = (uintptr_t *)&t4_fw_msg_handler[type];
382 atomic_store_rel_ptr(loc, (uintptr_t)h);
386 t4_register_cpl_handler(int opcode, cpl_handler_t h)
390 MPASS(opcode < nitems(t4_cpl_handler));
391 MPASS(h == NULL || t4_cpl_handler[opcode] == NULL);
393 loc = (uintptr_t *)&t4_cpl_handler[opcode];
394 atomic_store_rel_ptr(loc, (uintptr_t)h);
398 set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
401 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
408 if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) {
410 * The return code for filter-write is put in the CPL cookie so
411 * we have to rely on the hardware tid (is_ftid) to determine
412 * that this is a response to a filter.
414 cookie = CPL_COOKIE_FILTER;
416 cookie = G_COOKIE(cpl->cookie);
418 MPASS(cookie > CPL_COOKIE_RESERVED);
419 MPASS(cookie < nitems(set_tcb_rpl_handlers));
421 return (set_tcb_rpl_handlers[cookie](iq, rss, m));
425 l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
428 const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
433 cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER;
434 return (l2t_write_rpl_handlers[cookie](iq, rss, m));
438 act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss,
441 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
442 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status)));
445 MPASS(cookie != CPL_COOKIE_RESERVED);
447 return (act_open_rpl_handlers[cookie](iq, rss, m));
451 abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss,
454 struct adapter *sc = iq->adapter;
458 if (is_hashfilter(sc))
459 cookie = CPL_COOKIE_HASHFILTER;
461 cookie = CPL_COOKIE_TOM;
463 return (abort_rpl_rss_handlers[cookie](iq, rss, m));
467 fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
469 struct adapter *sc = iq->adapter;
470 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
471 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
475 if (is_etid(sc, tid))
476 cookie = CPL_COOKIE_ETHOFLD;
478 cookie = CPL_COOKIE_TOM;
480 return (fw4_ack_handlers[cookie](iq, rss, m));
484 t4_init_shared_cpl_handlers(void)
487 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler);
488 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler);
489 t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler);
490 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler);
491 t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler);
495 t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
499 MPASS(opcode < nitems(t4_cpl_handler));
500 MPASS(cookie > CPL_COOKIE_RESERVED);
501 MPASS(cookie < NUM_CPL_COOKIES);
502 MPASS(t4_cpl_handler[opcode] != NULL);
505 case CPL_SET_TCB_RPL:
506 loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie];
508 case CPL_L2T_WRITE_RPL:
509 loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie];
511 case CPL_ACT_OPEN_RPL:
512 loc = (uintptr_t *)&act_open_rpl_handlers[cookie];
514 case CPL_ABORT_RPL_RSS:
515 loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie];
518 loc = (uintptr_t *)&fw4_ack_handlers[cookie];
524 MPASS(h == NULL || *loc == (uintptr_t)NULL);
525 atomic_store_rel_ptr(loc, (uintptr_t)h);
529 * Called on MOD_LOAD. Validates and calculates the SGE tunables.
535 if (fl_pktshift < 0 || fl_pktshift > 7) {
536 printf("Invalid hw.cxgbe.fl_pktshift value (%d),"
537 " using 0 instead.\n", fl_pktshift);
541 if (spg_len != 64 && spg_len != 128) {
544 #if defined(__i386__) || defined(__amd64__)
545 len = cpu_clflush_line_size > 64 ? 128 : 64;
550 printf("Invalid hw.cxgbe.spg_len value (%d),"
551 " using %d instead.\n", spg_len, len);
556 if (cong_drop < -1 || cong_drop > 1) {
557 printf("Invalid hw.cxgbe.cong_drop value (%d),"
558 " using 0 instead.\n", cong_drop);
562 if (tscale != 1 && (tscale < 3 || tscale > 17)) {
563 printf("Invalid hw.cxgbe.tscale value (%d),"
564 " using 1 instead.\n", tscale);
568 if (largest_rx_cluster != MCLBYTES &&
569 #if MJUMPAGESIZE != MCLBYTES
570 largest_rx_cluster != MJUMPAGESIZE &&
572 largest_rx_cluster != MJUM9BYTES &&
573 largest_rx_cluster != MJUM16BYTES) {
574 printf("Invalid hw.cxgbe.largest_rx_cluster value (%d),"
575 " using %d instead.\n", largest_rx_cluster, MJUM16BYTES);
576 largest_rx_cluster = MJUM16BYTES;
579 if (safest_rx_cluster != MCLBYTES &&
580 #if MJUMPAGESIZE != MCLBYTES
581 safest_rx_cluster != MJUMPAGESIZE &&
583 safest_rx_cluster != MJUM9BYTES &&
584 safest_rx_cluster != MJUM16BYTES) {
585 printf("Invalid hw.cxgbe.safest_rx_cluster value (%d),"
586 " using %d instead.\n", safest_rx_cluster, MJUMPAGESIZE);
587 safest_rx_cluster = MJUMPAGESIZE;
590 extfree_refs = counter_u64_alloc(M_WAITOK);
591 extfree_rels = counter_u64_alloc(M_WAITOK);
592 pullups = counter_u64_alloc(M_WAITOK);
593 defrags = counter_u64_alloc(M_WAITOK);
594 counter_u64_zero(extfree_refs);
595 counter_u64_zero(extfree_rels);
596 counter_u64_zero(pullups);
597 counter_u64_zero(defrags);
599 t4_init_shared_cpl_handlers();
600 t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg);
601 t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg);
602 t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
604 t4_register_shared_cpl_handler(CPL_FW4_ACK, ethofld_fw4_ack,
607 t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
608 t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl);
612 t4_sge_modunload(void)
615 counter_u64_free(extfree_refs);
616 counter_u64_free(extfree_rels);
617 counter_u64_free(pullups);
618 counter_u64_free(defrags);
622 t4_sge_extfree_refs(void)
626 rels = counter_u64_fetch(extfree_rels);
627 refs = counter_u64_fetch(extfree_refs);
629 return (refs - rels);
633 #define MAX_PACK_BOUNDARY 512
636 setup_pad_and_pack_boundaries(struct adapter *sc)
639 int pad, pack, pad_shift;
641 pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT :
642 X_INGPADBOUNDARY_SHIFT;
644 if (fl_pad < (1 << pad_shift) ||
645 fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) ||
648 * If there is any chance that we might use buffer packing and
649 * the chip is a T4, then pick 64 as the pad/pack boundary. Set
650 * it to the minimum allowed in all other cases.
652 pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift;
655 * For fl_pad = 0 we'll still write a reasonable value to the
656 * register but all the freelists will opt out of padding.
657 * We'll complain here only if the user tried to set it to a
658 * value greater than 0 that was invalid.
661 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value"
662 " (%d), using %d instead.\n", fl_pad, pad);
665 m = V_INGPADBOUNDARY(M_INGPADBOUNDARY);
666 v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift);
667 t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
670 if (fl_pack != -1 && fl_pack != pad) {
671 /* Complain but carry on. */
672 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored,"
673 " using %d instead.\n", fl_pack, pad);
679 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 ||
680 !powerof2(fl_pack)) {
681 if (sc->params.pci.mps > MAX_PACK_BOUNDARY)
682 pack = MAX_PACK_BOUNDARY;
684 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE);
685 MPASS(powerof2(pack));
693 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value"
694 " (%d), using %d instead.\n", fl_pack, pack);
697 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY);
699 v = V_INGPACKBOUNDARY(0);
701 v = V_INGPACKBOUNDARY(ilog2(pack) - 5);
703 MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */
704 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v);
708 * adap->params.vpd.cclk must be set up before this is called.
711 t4_tweak_chip_settings(struct adapter *sc)
715 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
716 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk;
717 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
718 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
719 static int sw_buf_sizes[] = {
721 #if MJUMPAGESIZE != MCLBYTES
728 KASSERT(sc->flags & MASTER_PF,
729 ("%s: trying to change chip settings when not master.", __func__));
731 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE;
732 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
733 V_EGRSTATUSPAGESIZE(spg_len == 128);
734 t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
736 setup_pad_and_pack_boundaries(sc);
738 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
739 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
740 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
741 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
742 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) |
743 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
744 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
745 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
746 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v);
748 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096);
749 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536);
750 reg = A_SGE_FL_BUFFER_SIZE2;
751 for (i = 0; i < nitems(sw_buf_sizes); i++) {
752 MPASS(reg <= A_SGE_FL_BUFFER_SIZE15);
753 t4_write_reg(sc, reg, sw_buf_sizes[i]);
755 MPASS(reg <= A_SGE_FL_BUFFER_SIZE15);
756 t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE);
760 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) |
761 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]);
762 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v);
764 KASSERT(intr_timer[0] <= timer_max,
765 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0],
767 for (i = 1; i < nitems(intr_timer); i++) {
768 KASSERT(intr_timer[i] >= intr_timer[i - 1],
769 ("%s: timers not listed in increasing order (%d)",
772 while (intr_timer[i] > timer_max) {
773 if (i == nitems(intr_timer) - 1) {
774 intr_timer[i] = timer_max;
777 intr_timer[i] += intr_timer[i - 1];
782 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
783 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1]));
784 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v);
785 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
786 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3]));
787 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v);
788 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
789 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5]));
790 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v);
792 if (chip_id(sc) >= CHELSIO_T6) {
793 m = V_TSCALE(M_TSCALE);
797 v = V_TSCALE(tscale - 2);
798 t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v);
800 if (sc->debug_flags & DF_DISABLE_TCB_CACHE) {
801 m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN |
802 V_WRTHRTHRESH(M_WRTHRTHRESH);
803 t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1);
805 v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN |
807 t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1);
811 /* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */
812 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
813 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v);
816 * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been
817 * chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we
818 * may have to deal with is MAXPHYS + 1 page.
820 v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4);
821 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v);
823 /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */
824 m = v = F_TDDPTAGTCB | F_ISCSITAGTCB;
825 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
827 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
829 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
830 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v);
834 * SGE wants the buffer to be at least 64B and then a multiple of 16. Its
835 * address mut be 16B aligned. If padding is in use the buffer's start and end
836 * need to be aligned to the pad boundary as well. We'll just make sure that
837 * the size is a multiple of the pad boundary here, it is up to the buffer
838 * allocation code to make sure the start of the buffer is aligned.
841 hwsz_ok(struct adapter *sc, int hwsz)
843 int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1;
845 return (hwsz >= 64 && (hwsz & mask) == 0);
849 * Initialize the rx buffer sizes and figure out which zones the buffers will
853 t4_init_rx_buf_info(struct adapter *sc)
855 struct sge *s = &sc->sge;
856 struct sge_params *sp = &sc->params.sge;
858 static int sw_buf_sizes[] = { /* Sorted by size */
860 #if MJUMPAGESIZE != MCLBYTES
866 struct rx_buf_info *rxb;
869 rxb = &s->rx_buf_info[0];
870 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
871 rxb->size1 = sw_buf_sizes[i];
872 rxb->zone = m_getzone(rxb->size1);
873 rxb->type = m_gettype(rxb->size1);
877 for (j = 0; j < SGE_FLBUF_SIZES; j++) {
878 int hwsize = sp->sge_fl_buffer_size[j];
880 if (!hwsz_ok(sc, hwsize))
883 /* hwidx for size1 */
884 if (rxb->hwidx1 == -1 && rxb->size1 == hwsize)
887 /* hwidx for size2 (buffer packing) */
888 if (rxb->size1 - CL_METADATA_SIZE < hwsize)
890 n = rxb->size1 - hwsize - CL_METADATA_SIZE;
894 break; /* stop looking */
896 if (rxb->hwidx2 != -1) {
897 if (n < sp->sge_fl_buffer_size[rxb->hwidx2] -
898 hwsize - CL_METADATA_SIZE) {
902 } else if (n <= 2 * CL_METADATA_SIZE) {
907 if (rxb->hwidx2 != -1)
908 sc->flags |= BUF_PACKING_OK;
909 if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster)
915 * Verify some basic SGE settings for the PF and VF driver, and other
916 * miscellaneous settings for the PF driver.
919 t4_verify_chip_settings(struct adapter *sc)
921 struct sge_params *sp = &sc->params.sge;
924 const uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
930 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
935 * If this changes then every single use of PAGE_SHIFT in the driver
936 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift.
938 if (sp->page_shift != PAGE_SHIFT) {
939 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r);
943 if (sc->flags & IS_VF)
946 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
947 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
949 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r);
950 if (sc->vres.ddp.size != 0)
954 m = v = F_TDDPTAGTCB;
955 r = t4_read_reg(sc, A_ULP_RX_CTL);
957 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r);
958 if (sc->vres.ddp.size != 0)
962 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
964 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
965 r = t4_read_reg(sc, A_TP_PARA_REG5);
967 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r);
968 if (sc->vres.ddp.size != 0)
976 t4_create_dma_tag(struct adapter *sc)
980 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
981 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE,
982 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL,
985 device_printf(sc->dev,
986 "failed to create main DMA tag: %d\n", rc);
993 t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
994 struct sysctl_oid_list *children)
996 struct sge_params *sp = &sc->params.sge;
998 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes",
999 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
1000 sysctl_bufsizes, "A", "freelist buffer sizes");
1002 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD,
1003 NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)");
1005 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD,
1006 NULL, sp->pad_boundary, "payload pad boundary (bytes)");
1008 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD,
1009 NULL, sp->spg_len, "status page size (bytes)");
1011 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD,
1012 NULL, cong_drop, "congestion drop setting");
1014 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD,
1015 NULL, sp->pack_boundary, "payload pack boundary (bytes)");
1019 t4_destroy_dma_tag(struct adapter *sc)
1022 bus_dma_tag_destroy(sc->dmat);
1028 * Allocate and initialize the firmware event queue, control queues, and special
1029 * purpose rx queues owned by the adapter.
1031 * Returns errno on failure. Resources allocated up to that point may still be
1032 * allocated. Caller is responsible for cleanup in case this function fails.
1035 t4_setup_adapter_queues(struct adapter *sc)
1039 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1042 * Firmware event queue
1049 * That's all for the VF driver.
1051 if (sc->flags & IS_VF)
1055 * XXX: General purpose rx queues, one per port.
1059 * Control queues, one per port.
1061 for_each_port(sc, i) {
1062 rc = alloc_ctrlq(sc, i);
1074 t4_teardown_adapter_queues(struct adapter *sc)
1078 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
1080 if (!(sc->flags & IS_VF)) {
1081 for_each_port(sc, i)
1089 /* Maximum payload that could arrive with a single iq descriptor. */
1091 max_rx_payload(struct adapter *sc, struct ifnet *ifp, const bool ofld)
1095 /* large enough even when hw VLAN extraction is disabled */
1096 maxp = sc->params.sge.fl_pktshift + ETHER_HDR_LEN +
1097 ETHER_VLAN_ENCAP_LEN + ifp->if_mtu;
1098 if (ofld && sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS &&
1099 maxp < sc->params.tp.max_rx_pdu)
1100 maxp = sc->params.tp.max_rx_pdu;
1105 t4_setup_vi_queues(struct vi_info *vi)
1107 int rc = 0, i, intr_idx;
1108 struct sge_rxq *rxq;
1109 struct sge_txq *txq;
1111 struct sge_ofld_rxq *ofld_rxq;
1113 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1114 struct sge_ofld_txq *ofld_txq;
1117 int saved_idx, iqidx;
1118 struct sge_nm_rxq *nm_rxq;
1119 struct sge_nm_txq *nm_txq;
1121 struct adapter *sc = vi->adapter;
1122 struct ifnet *ifp = vi->ifp;
1125 /* Interrupt vector to start from (when using multiple vectors) */
1126 intr_idx = vi->first_intr;
1129 saved_idx = intr_idx;
1130 if (ifp->if_capabilities & IFCAP_NETMAP) {
1132 /* netmap is supported with direct interrupts only. */
1133 MPASS(!forwarding_intr_to_fwq(sc));
1134 MPASS(vi->first_intr >= 0);
1137 * We don't have buffers to back the netmap rx queues
1138 * right now so we create the queues in a way that
1139 * doesn't set off any congestion signal in the chip.
1141 for_each_nm_rxq(vi, i, nm_rxq) {
1142 rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i);
1148 for_each_nm_txq(vi, i, nm_txq) {
1149 iqidx = vi->first_nm_rxq + (i % vi->nnmrxq);
1150 rc = alloc_nm_txq(vi, nm_txq, iqidx, i);
1156 /* Normal rx queues and netmap rx queues share the same interrupts. */
1157 intr_idx = saved_idx;
1161 * Allocate rx queues first because a default iqid is required when
1162 * creating a tx queue.
1164 maxp = max_rx_payload(sc, ifp, false);
1165 for_each_rxq(vi, i, rxq) {
1166 rc = alloc_rxq(vi, rxq, i, intr_idx, maxp);
1169 if (!forwarding_intr_to_fwq(sc))
1173 if (ifp->if_capabilities & IFCAP_NETMAP)
1174 intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq);
1177 maxp = max_rx_payload(sc, ifp, true);
1178 for_each_ofld_rxq(vi, i, ofld_rxq) {
1179 rc = alloc_ofld_rxq(vi, ofld_rxq, i, intr_idx, maxp);
1182 if (!forwarding_intr_to_fwq(sc))
1188 * Now the tx queues.
1190 for_each_txq(vi, i, txq) {
1191 rc = alloc_txq(vi, txq, i);
1195 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1196 for_each_ofld_txq(vi, i, ofld_txq) {
1197 rc = alloc_ofld_txq(vi, ofld_txq, i);
1204 t4_teardown_vi_queues(vi);
1213 t4_teardown_vi_queues(struct vi_info *vi)
1216 struct sge_rxq *rxq;
1217 struct sge_txq *txq;
1218 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1219 struct sge_ofld_txq *ofld_txq;
1222 struct sge_ofld_rxq *ofld_rxq;
1225 struct sge_nm_rxq *nm_rxq;
1226 struct sge_nm_txq *nm_txq;
1230 if (vi->ifp->if_capabilities & IFCAP_NETMAP) {
1231 for_each_nm_txq(vi, i, nm_txq) {
1232 free_nm_txq(vi, nm_txq);
1235 for_each_nm_rxq(vi, i, nm_rxq) {
1236 free_nm_rxq(vi, nm_rxq);
1242 * Take down all the tx queues first, as they reference the rx queues
1243 * (for egress updates, etc.).
1246 for_each_txq(vi, i, txq) {
1249 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
1250 for_each_ofld_txq(vi, i, ofld_txq) {
1251 free_ofld_txq(vi, ofld_txq);
1256 * Then take down the rx queues.
1259 for_each_rxq(vi, i, rxq) {
1263 for_each_ofld_rxq(vi, i, ofld_rxq) {
1264 free_ofld_rxq(vi, ofld_rxq);
1272 * Interrupt handler when the driver is using only 1 interrupt. This is a very
1275 * a) Deals with errors, if any.
1276 * b) Services firmware event queue, which is taking interrupts for all other
1280 t4_intr_all(void *arg)
1282 struct adapter *sc = arg;
1283 struct sge_iq *fwq = &sc->sge.fwq;
1285 MPASS(sc->intr_count == 1);
1287 if (sc->intr_type == INTR_INTX)
1288 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
1295 * Interrupt handler for errors (installed directly when multiple interrupts are
1296 * being used, or called by t4_intr_all).
1299 t4_intr_err(void *arg)
1301 struct adapter *sc = arg;
1303 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0;
1305 if (sc->flags & ADAP_ERR)
1308 v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE));
1311 t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v);
1314 t4_slow_intr_handler(sc, verbose);
1318 * Interrupt handler for iq-only queues. The firmware event queue is the only
1319 * such queue right now.
1322 t4_intr_evt(void *arg)
1324 struct sge_iq *iq = arg;
1326 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
1328 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
1333 * Interrupt handler for iq+fl queues.
1338 struct sge_iq *iq = arg;
1340 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) {
1341 service_iq_fl(iq, 0);
1342 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE);
1348 * Interrupt handler for netmap rx queues.
1351 t4_nm_intr(void *arg)
1353 struct sge_nm_rxq *nm_rxq = arg;
1355 if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) {
1356 service_nm_rxq(nm_rxq);
1357 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON);
1362 * Interrupt handler for vectors shared between NIC and netmap rx queues.
1365 t4_vi_intr(void *arg)
1367 struct irq *irq = arg;
1369 MPASS(irq->nm_rxq != NULL);
1370 t4_nm_intr(irq->nm_rxq);
1372 MPASS(irq->rxq != NULL);
1378 * Deals with interrupts on an iq-only (no freelist) queue.
1381 service_iq(struct sge_iq *iq, int budget)
1384 struct adapter *sc = iq->adapter;
1385 struct iq_desc *d = &iq->desc[iq->cidx];
1386 int ndescs = 0, limit;
1389 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
1391 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
1392 KASSERT((iq->flags & IQ_HAS_FL) == 0,
1393 ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq,
1395 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
1396 MPASS((iq->flags & IQ_LRO_ENABLED) == 0);
1398 limit = budget ? budget : iq->qsize / 16;
1401 * We always come back and check the descriptor ring for new indirect
1402 * interrupts and other responses after running a single handler.
1405 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) {
1409 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen);
1410 lq = be32toh(d->rsp.pldbuflen_qid);
1413 case X_RSPD_TYPE_FLBUF:
1414 panic("%s: data for an iq (%p) with no freelist",
1419 case X_RSPD_TYPE_CPL:
1420 KASSERT(d->rss.opcode < NUM_CPL_CMDS,
1421 ("%s: bad opcode %02x.", __func__,
1423 t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL);
1426 case X_RSPD_TYPE_INTR:
1428 * There are 1K interrupt-capable queues (qids 0
1429 * through 1023). A response type indicating a
1430 * forwarded interrupt with a qid >= 1K is an
1431 * iWARP async notification.
1433 if (__predict_true(lq >= 1024)) {
1434 t4_an_handler(iq, &d->rsp);
1438 q = sc->sge.iqmap[lq - sc->sge.iq_start -
1440 if (atomic_cmpset_int(&q->state, IQS_IDLE,
1442 if (service_iq_fl(q, q->qsize / 16) == 0) {
1443 (void) atomic_cmpset_int(&q->state,
1444 IQS_BUSY, IQS_IDLE);
1446 STAILQ_INSERT_TAIL(&iql, q,
1454 ("%s: illegal response type %d on iq %p",
1455 __func__, rsp_type, iq));
1457 "%s: illegal response type %d on iq %p",
1458 device_get_nameunit(sc->dev), rsp_type, iq);
1463 if (__predict_false(++iq->cidx == iq->sidx)) {
1465 iq->gen ^= F_RSPD_GEN;
1468 if (__predict_false(++ndescs == limit)) {
1469 t4_write_reg(sc, sc->sge_gts_reg,
1471 V_INGRESSQID(iq->cntxt_id) |
1472 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1476 return (EINPROGRESS);
1481 if (STAILQ_EMPTY(&iql))
1485 * Process the head only, and send it to the back of the list if
1486 * it's still not done.
1488 q = STAILQ_FIRST(&iql);
1489 STAILQ_REMOVE_HEAD(&iql, link);
1490 if (service_iq_fl(q, q->qsize / 8) == 0)
1491 (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE);
1493 STAILQ_INSERT_TAIL(&iql, q, link);
1496 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
1497 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
1502 #if defined(INET) || defined(INET6)
1504 sort_before_lro(struct lro_ctrl *lro)
1507 return (lro->lro_mbuf_max != 0);
1511 static inline uint64_t
1512 last_flit_to_ns(struct adapter *sc, uint64_t lf)
1514 uint64_t n = be64toh(lf) & 0xfffffffffffffff; /* 60b, not 64b. */
1516 if (n > UINT64_MAX / 1000000)
1517 return (n / sc->params.vpd.cclk * 1000000);
1519 return (n * 1000000 / sc->params.vpd.cclk);
1523 move_to_next_rxbuf(struct sge_fl *fl)
1527 if (__predict_false((++fl->cidx & 7) == 0)) {
1528 uint16_t cidx = fl->cidx >> 3;
1530 if (__predict_false(cidx == fl->sidx))
1531 fl->cidx = cidx = 0;
1537 * Deals with interrupts on an iq+fl queue.
1540 service_iq_fl(struct sge_iq *iq, int budget)
1542 struct sge_rxq *rxq = iq_to_rxq(iq);
1544 struct adapter *sc = iq->adapter;
1545 struct iq_desc *d = &iq->desc[iq->cidx];
1547 int rsp_type, starved;
1549 uint16_t fl_hw_cidx;
1551 #if defined(INET) || defined(INET6)
1552 const struct timeval lro_timeout = {0, sc->lro_timeout};
1553 struct lro_ctrl *lro = &rxq->lro;
1556 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
1557 MPASS(iq->flags & IQ_HAS_FL);
1560 #if defined(INET) || defined(INET6)
1561 if (iq->flags & IQ_ADJ_CREDIT) {
1562 MPASS(sort_before_lro(lro));
1563 iq->flags &= ~IQ_ADJ_CREDIT;
1564 if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) {
1565 tcp_lro_flush_all(lro);
1566 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) |
1567 V_INGRESSQID((u32)iq->cntxt_id) |
1568 V_SEINTARM(iq->intr_params));
1574 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0);
1577 limit = budget ? budget : iq->qsize / 16;
1579 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */
1580 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) {
1585 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen);
1586 lq = be32toh(d->rsp.pldbuflen_qid);
1589 case X_RSPD_TYPE_FLBUF:
1590 if (lq & F_RSPD_NEWBUF) {
1591 if (fl->rx_offset > 0)
1592 move_to_next_rxbuf(fl);
1593 lq = G_RSPD_LEN(lq);
1595 if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) {
1597 refill_fl(sc, fl, 64);
1599 fl_hw_cidx = fl->hw_cidx;
1602 if (d->rss.opcode == CPL_RX_PKT) {
1603 if (__predict_true(eth_rx(sc, rxq, d, lq) == 0))
1607 m0 = get_fl_payload(sc, fl, lq);
1608 if (__predict_false(m0 == NULL))
1613 case X_RSPD_TYPE_CPL:
1614 KASSERT(d->rss.opcode < NUM_CPL_CMDS,
1615 ("%s: bad opcode %02x.", __func__, d->rss.opcode));
1616 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0);
1619 case X_RSPD_TYPE_INTR:
1622 * There are 1K interrupt-capable queues (qids 0
1623 * through 1023). A response type indicating a
1624 * forwarded interrupt with a qid >= 1K is an
1625 * iWARP async notification. That is the only
1626 * acceptable indirect interrupt on this queue.
1628 if (__predict_false(lq < 1024)) {
1629 panic("%s: indirect interrupt on iq_fl %p "
1630 "with qid %u", __func__, iq, lq);
1633 t4_an_handler(iq, &d->rsp);
1637 KASSERT(0, ("%s: illegal response type %d on iq %p",
1638 __func__, rsp_type, iq));
1639 log(LOG_ERR, "%s: illegal response type %d on iq %p",
1640 device_get_nameunit(sc->dev), rsp_type, iq);
1645 if (__predict_false(++iq->cidx == iq->sidx)) {
1647 iq->gen ^= F_RSPD_GEN;
1650 if (__predict_false(++ndescs == limit)) {
1651 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
1652 V_INGRESSQID(iq->cntxt_id) |
1653 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1655 #if defined(INET) || defined(INET6)
1656 if (iq->flags & IQ_LRO_ENABLED &&
1657 !sort_before_lro(lro) &&
1658 sc->lro_timeout != 0) {
1659 tcp_lro_flush_inactive(lro, &lro_timeout);
1663 return (EINPROGRESS);
1668 #if defined(INET) || defined(INET6)
1669 if (iq->flags & IQ_LRO_ENABLED) {
1670 if (ndescs > 0 && lro->lro_mbuf_count > 8) {
1671 MPASS(sort_before_lro(lro));
1672 /* hold back one credit and don't flush LRO state */
1673 iq->flags |= IQ_ADJ_CREDIT;
1676 tcp_lro_flush_all(lro);
1681 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
1682 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
1685 starved = refill_fl(sc, fl, 64);
1687 if (__predict_false(starved != 0))
1688 add_fl_to_sfl(sc, fl);
1693 static inline struct cluster_metadata *
1694 cl_metadata(struct fl_sdesc *sd)
1697 return ((void *)(sd->cl + sd->moff));
1701 rxb_free(struct mbuf *m)
1703 struct cluster_metadata *clm = m->m_ext.ext_arg1;
1705 uma_zfree(clm->zone, clm->cl);
1706 counter_u64_add(extfree_rels, 1);
1710 * The mbuf returned comes from zone_muf and carries the payload in one of these
1712 * a) complete frame inside the mbuf
1713 * b) m_cljset (for clusters without metadata)
1714 * d) m_extaddref (cluster with metadata)
1716 static struct mbuf *
1717 get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset,
1721 struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1722 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx];
1723 struct cluster_metadata *clm;
1727 if (fl->flags & FL_BUF_PACKING) {
1730 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */
1731 len = min(remaining, blen);
1732 payload = sd->cl + fl->rx_offset;
1734 l = fr_offset + len;
1735 pad = roundup2(l, fl->buf_boundary) - l;
1736 if (fl->rx_offset + len + pad < rxb->size2)
1738 MPASS(fl->rx_offset + blen <= rxb->size2);
1740 MPASS(fl->rx_offset == 0); /* not packing */
1742 len = min(remaining, blen);
1746 if (fr_offset == 0) {
1747 m = m_gethdr(M_NOWAIT, MT_DATA);
1748 if (__predict_false(m == NULL))
1750 m->m_pkthdr.len = remaining;
1752 m = m_get(M_NOWAIT, MT_DATA);
1753 if (__predict_false(m == NULL))
1758 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) {
1759 /* copy data to mbuf */
1760 bcopy(payload, mtod(m, caddr_t), len);
1761 if (fl->flags & FL_BUF_PACKING) {
1762 fl->rx_offset += blen;
1763 MPASS(fl->rx_offset <= rxb->size2);
1764 if (fl->rx_offset < rxb->size2)
1765 return (m); /* without advancing the cidx */
1767 } else if (fl->flags & FL_BUF_PACKING) {
1768 clm = cl_metadata(sd);
1769 if (sd->nmbuf++ == 0) {
1771 clm->zone = rxb->zone;
1773 counter_u64_add(extfree_refs, 1);
1775 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm,
1778 fl->rx_offset += blen;
1779 MPASS(fl->rx_offset <= rxb->size2);
1780 if (fl->rx_offset < rxb->size2)
1781 return (m); /* without advancing the cidx */
1783 m_cljset(m, sd->cl, rxb->type);
1784 sd->cl = NULL; /* consumed, not a recycle candidate */
1787 move_to_next_rxbuf(fl);
1792 static struct mbuf *
1793 get_fl_payload(struct adapter *sc, struct sge_fl *fl, const u_int plen)
1795 struct mbuf *m0, *m, **pnext;
1798 if (__predict_false(fl->flags & FL_BUF_RESUME)) {
1799 M_ASSERTPKTHDR(fl->m0);
1800 MPASS(fl->m0->m_pkthdr.len == plen);
1801 MPASS(fl->remaining < plen);
1805 remaining = fl->remaining;
1806 fl->flags &= ~FL_BUF_RESUME;
1811 * Payload starts at rx_offset in the current hw buffer. Its length is
1812 * 'len' and it may span multiple hw buffers.
1815 m0 = get_scatter_segment(sc, fl, 0, plen);
1818 remaining = plen - m0->m_len;
1819 pnext = &m0->m_next;
1820 while (remaining > 0) {
1822 MPASS(fl->rx_offset == 0);
1823 m = get_scatter_segment(sc, fl, plen - remaining, remaining);
1824 if (__predict_false(m == NULL)) {
1827 fl->remaining = remaining;
1828 fl->flags |= FL_BUF_RESUME;
1833 remaining -= m->m_len;
1842 skip_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset,
1845 struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1846 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx];
1849 if (fl->flags & FL_BUF_PACKING) {
1852 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */
1853 len = min(remaining, blen);
1855 l = fr_offset + len;
1856 pad = roundup2(l, fl->buf_boundary) - l;
1857 if (fl->rx_offset + len + pad < rxb->size2)
1859 fl->rx_offset += blen;
1860 MPASS(fl->rx_offset <= rxb->size2);
1861 if (fl->rx_offset < rxb->size2)
1862 return (len); /* without advancing the cidx */
1864 MPASS(fl->rx_offset == 0); /* not packing */
1866 len = min(remaining, blen);
1868 move_to_next_rxbuf(fl);
1873 skip_fl_payload(struct adapter *sc, struct sge_fl *fl, int plen)
1875 int remaining, fr_offset, len;
1879 while (remaining > 0) {
1880 len = skip_scatter_segment(sc, fl, fr_offset, remaining);
1887 get_segment_len(struct adapter *sc, struct sge_fl *fl, int plen)
1890 struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1891 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx];
1893 if (fl->flags & FL_BUF_PACKING)
1894 len = rxb->size2 - fl->rx_offset;
1898 return (min(plen, len));
1902 eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d,
1906 struct ifnet *ifp = rxq->ifp;
1907 struct sge_fl *fl = &rxq->fl;
1908 struct vi_info *vi = ifp->if_softc;
1909 const struct cpl_rx_pkt *cpl;
1910 #if defined(INET) || defined(INET6)
1911 struct lro_ctrl *lro = &rxq->lro;
1913 uint16_t err_vec, tnl_type, tnlhdr_len;
1914 static const int sw_hashtype[4][2] = {
1915 {M_HASHTYPE_NONE, M_HASHTYPE_NONE},
1916 {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6},
1917 {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6},
1918 {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6},
1920 static const int sw_csum_flags[2][2] = {
1924 CSUM_L3_CALC | CSUM_L3_VALID |
1925 CSUM_L4_CALC | CSUM_L4_VALID |
1926 CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
1927 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID,
1931 CSUM_L3_CALC | CSUM_L3_VALID |
1932 CSUM_L4_CALC | CSUM_L4_VALID |
1933 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID,
1938 CSUM_L4_CALC | CSUM_L4_VALID |
1939 CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID |
1940 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID,
1942 /* IP6, inner IP6 */
1944 CSUM_L4_CALC | CSUM_L4_VALID |
1945 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID,
1949 MPASS(plen > sc->params.sge.fl_pktshift);
1950 if (vi->pfil != NULL && PFIL_HOOKED_IN(vi->pfil) &&
1951 __predict_true((fl->flags & FL_BUF_RESUME) == 0)) {
1952 struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
1956 slen = get_segment_len(sc, fl, plen) -
1957 sc->params.sge.fl_pktshift;
1958 frame = sd->cl + fl->rx_offset + sc->params.sge.fl_pktshift;
1959 CURVNET_SET_QUIET(ifp->if_vnet);
1960 rc = pfil_run_hooks(vi->pfil, frame, ifp,
1961 slen | PFIL_MEMPTR | PFIL_IN, NULL);
1963 if (rc == PFIL_DROPPED || rc == PFIL_CONSUMED) {
1964 skip_fl_payload(sc, fl, plen);
1967 if (rc == PFIL_REALLOCED) {
1968 skip_fl_payload(sc, fl, plen);
1969 m0 = pfil_mem2mbuf(frame);
1974 m0 = get_fl_payload(sc, fl, plen);
1975 if (__predict_false(m0 == NULL))
1978 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift;
1979 m0->m_len -= sc->params.sge.fl_pktshift;
1980 m0->m_data += sc->params.sge.fl_pktshift;
1983 m0->m_pkthdr.rcvif = ifp;
1984 M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]);
1985 m0->m_pkthdr.flowid = be32toh(d->rss.hash_val);
1987 cpl = (const void *)(&d->rss + 1);
1988 if (sc->params.tp.rx_pkt_encap) {
1989 const uint16_t ev = be16toh(cpl->err_vec);
1991 err_vec = G_T6_COMPR_RXERR_VEC(ev);
1992 tnl_type = G_T6_RX_TNL_TYPE(ev);
1993 tnlhdr_len = G_T6_RX_TNLHDR_LEN(ev);
1995 err_vec = be16toh(cpl->err_vec);
1999 if (cpl->csum_calc && err_vec == 0) {
2000 int ipv6 = !!(cpl->l2info & htobe32(F_RXF_IP6));
2002 /* checksum(s) calculated and found to be correct. */
2004 MPASS((cpl->l2info & htobe32(F_RXF_IP)) ^
2005 (cpl->l2info & htobe32(F_RXF_IP6)));
2006 m0->m_pkthdr.csum_data = be16toh(cpl->csum);
2007 if (tnl_type == 0) {
2008 if (!ipv6 && ifp->if_capenable & IFCAP_RXCSUM) {
2009 m0->m_pkthdr.csum_flags = CSUM_L3_CALC |
2010 CSUM_L3_VALID | CSUM_L4_CALC |
2012 } else if (ipv6 && ifp->if_capenable & IFCAP_RXCSUM_IPV6) {
2013 m0->m_pkthdr.csum_flags = CSUM_L4_CALC |
2018 MPASS(tnl_type == RX_PKT_TNL_TYPE_VXLAN);
2020 M_HASHTYPE_SETINNER(m0);
2021 if (__predict_false(cpl->ip_frag)) {
2023 * csum_data is for the inner frame (which is an
2024 * IP fragment) and is not 0xffff. There is no
2025 * way to pass the inner csum_data to the stack.
2026 * We don't want the stack to use the inner
2027 * csum_data to validate the outer frame or it
2028 * will get rejected. So we fix csum_data here
2029 * and let sw do the checksum of inner IP
2032 * XXX: Need 32b for csum_data2 in an rx mbuf.
2033 * Maybe stuff it into rcv_tstmp?
2035 m0->m_pkthdr.csum_data = 0xffff;
2037 m0->m_pkthdr.csum_flags = CSUM_L4_CALC |
2040 m0->m_pkthdr.csum_flags = CSUM_L3_CALC |
2041 CSUM_L3_VALID | CSUM_L4_CALC |
2047 MPASS(m0->m_pkthdr.csum_data == 0xffff);
2049 outer_ipv6 = tnlhdr_len >=
2050 sizeof(struct ether_header) +
2051 sizeof(struct ip6_hdr);
2052 m0->m_pkthdr.csum_flags =
2053 sw_csum_flags[outer_ipv6][ipv6];
2055 rxq->vxlan_rxcsum++;
2060 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
2061 m0->m_flags |= M_VLANTAG;
2062 rxq->vlan_extraction++;
2065 if (rxq->iq.flags & IQ_RX_TIMESTAMP) {
2067 * Fill up rcv_tstmp but do not set M_TSTMP.
2068 * rcv_tstmp is not in the format that the
2069 * kernel expects and we don't want to mislead
2070 * it. For now this is only for custom code
2071 * that knows how to interpret cxgbe's stamp.
2073 m0->m_pkthdr.rcv_tstmp =
2074 last_flit_to_ns(sc, d->rsp.u.last_flit);
2076 m0->m_flags |= M_TSTMP;
2081 m0->m_pkthdr.numa_domain = ifp->if_numa_domain;
2083 #if defined(INET) || defined(INET6)
2084 if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 &&
2085 (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 ||
2086 M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) {
2087 if (sort_before_lro(lro)) {
2088 tcp_lro_queue_mbuf(lro, m0);
2089 return (0); /* queued for sort, then LRO */
2091 if (tcp_lro_rx(lro, m0, 0) == 0)
2092 return (0); /* queued for LRO */
2095 ifp->if_input(ifp, m0);
2101 * Must drain the wrq or make sure that someone else will.
2104 wrq_tx_drain(void *arg, int n)
2106 struct sge_wrq *wrq = arg;
2107 struct sge_eq *eq = &wrq->eq;
2110 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
2111 drain_wrq_wr_list(wrq->adapter, wrq);
2116 drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq)
2118 struct sge_eq *eq = &wrq->eq;
2119 u_int available, dbdiff; /* # of hardware descriptors */
2122 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */
2124 EQ_LOCK_ASSERT_OWNED(eq);
2125 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
2126 wr = STAILQ_FIRST(&wrq->wr_list);
2127 MPASS(wr != NULL); /* Must be called with something useful to do */
2128 MPASS(eq->pidx == eq->dbidx);
2132 eq->cidx = read_hw_cidx(eq);
2133 if (eq->pidx == eq->cidx)
2134 available = eq->sidx - 1;
2136 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
2138 MPASS(wr->wrq == wrq);
2139 n = howmany(wr->wr_len, EQ_ESIZE);
2143 dst = (void *)&eq->desc[eq->pidx];
2144 if (__predict_true(eq->sidx - eq->pidx > n)) {
2145 /* Won't wrap, won't end exactly at the status page. */
2146 bcopy(&wr->wr[0], dst, wr->wr_len);
2149 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE;
2151 bcopy(&wr->wr[0], dst, first_portion);
2152 if (wr->wr_len > first_portion) {
2153 bcopy(&wr->wr[first_portion], &eq->desc[0],
2154 wr->wr_len - first_portion);
2156 eq->pidx = n - (eq->sidx - eq->pidx);
2158 wrq->tx_wrs_copied++;
2160 if (available < eq->sidx / 4 &&
2161 atomic_cmpset_int(&eq->equiq, 0, 1)) {
2163 * XXX: This is not 100% reliable with some
2164 * types of WRs. But this is a very unusual
2165 * situation for an ofld/ctrl queue anyway.
2167 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
2173 ring_eq_db(sc, eq, dbdiff);
2177 STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
2179 MPASS(wrq->nwr_pending > 0);
2181 MPASS(wrq->ndesc_needed >= n);
2182 wrq->ndesc_needed -= n;
2183 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL);
2186 ring_eq_db(sc, eq, dbdiff);
2190 * Doesn't fail. Holds on to work requests it can't send right away.
2193 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr)
2196 struct sge_eq *eq = &wrq->eq;
2199 EQ_LOCK_ASSERT_OWNED(eq);
2201 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN);
2202 MPASS((wr->wr_len & 0x7) == 0);
2204 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link);
2206 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE);
2208 if (!TAILQ_EMPTY(&wrq->incomplete_wrs))
2209 return; /* commit_wrq_wr will drain wr_list as well. */
2211 drain_wrq_wr_list(sc, wrq);
2213 /* Doorbell must have caught up to the pidx. */
2214 MPASS(eq->pidx == eq->dbidx);
2218 t4_update_fl_bufsize(struct ifnet *ifp)
2220 struct vi_info *vi = ifp->if_softc;
2221 struct adapter *sc = vi->adapter;
2222 struct sge_rxq *rxq;
2224 struct sge_ofld_rxq *ofld_rxq;
2229 maxp = max_rx_payload(sc, ifp, false);
2230 for_each_rxq(vi, i, rxq) {
2234 fl->zidx = find_refill_source(sc, maxp,
2235 fl->flags & FL_BUF_PACKING);
2239 maxp = max_rx_payload(sc, ifp, true);
2240 for_each_ofld_rxq(vi, i, ofld_rxq) {
2244 fl->zidx = find_refill_source(sc, maxp,
2245 fl->flags & FL_BUF_PACKING);
2252 mbuf_nsegs(struct mbuf *m)
2256 KASSERT(m->m_pkthdr.inner_l5hlen > 0,
2257 ("%s: mbuf %p missing information on # of segments.", __func__, m));
2259 return (m->m_pkthdr.inner_l5hlen);
2263 set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs)
2267 m->m_pkthdr.inner_l5hlen = nsegs;
2271 mbuf_cflags(struct mbuf *m)
2275 return (m->m_pkthdr.PH_loc.eight[4]);
2279 set_mbuf_cflags(struct mbuf *m, uint8_t flags)
2283 m->m_pkthdr.PH_loc.eight[4] = flags;
2287 mbuf_len16(struct mbuf *m)
2292 n = m->m_pkthdr.PH_loc.eight[0];
2293 if (!(mbuf_cflags(m) & MC_TLS))
2294 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16);
2300 set_mbuf_len16(struct mbuf *m, uint8_t len16)
2304 if (!(mbuf_cflags(m) & MC_TLS))
2305 MPASS(len16 > 0 && len16 <= SGE_MAX_WR_LEN / 16);
2306 m->m_pkthdr.PH_loc.eight[0] = len16;
2311 mbuf_eo_nsegs(struct mbuf *m)
2315 return (m->m_pkthdr.PH_loc.eight[1]);
2318 #if defined(INET) || defined(INET6)
2320 set_mbuf_eo_nsegs(struct mbuf *m, uint8_t nsegs)
2324 m->m_pkthdr.PH_loc.eight[1] = nsegs;
2329 mbuf_eo_len16(struct mbuf *m)
2334 n = m->m_pkthdr.PH_loc.eight[2];
2335 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16);
2340 #if defined(INET) || defined(INET6)
2342 set_mbuf_eo_len16(struct mbuf *m, uint8_t len16)
2346 m->m_pkthdr.PH_loc.eight[2] = len16;
2351 mbuf_eo_tsclk_tsoff(struct mbuf *m)
2355 return (m->m_pkthdr.PH_loc.eight[3]);
2358 #if defined(INET) || defined(INET6)
2360 set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_tsoff)
2364 m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff;
2369 needs_eo(struct m_snd_tag *mst)
2372 return (mst != NULL && mst->type == IF_SND_TAG_TYPE_RATE_LIMIT);
2377 * Try to allocate an mbuf to contain a raw work request. To make it
2378 * easy to construct the work request, don't allocate a chain but a
2382 alloc_wr_mbuf(int len, int how)
2387 m = m_gethdr(how, MT_DATA);
2388 else if (len <= MCLBYTES)
2389 m = m_getcl(how, MT_DATA, M_PKTHDR);
2394 m->m_pkthdr.len = len;
2396 set_mbuf_cflags(m, MC_RAW_WR);
2397 set_mbuf_len16(m, howmany(len, 16));
2402 needs_hwcsum(struct mbuf *m)
2404 const uint32_t csum_flags = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP |
2405 CSUM_IP_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP |
2406 CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_IP6_UDP |
2407 CSUM_IP6_TCP | CSUM_IP6_TSO | CSUM_INNER_IP6_UDP |
2408 CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO;
2412 return (m->m_pkthdr.csum_flags & csum_flags);
2416 needs_tso(struct mbuf *m)
2418 const uint32_t csum_flags = CSUM_IP_TSO | CSUM_IP6_TSO |
2419 CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO;
2423 return (m->m_pkthdr.csum_flags & csum_flags);
2427 needs_vxlan_csum(struct mbuf *m)
2432 return (m->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN);
2436 needs_vxlan_tso(struct mbuf *m)
2438 const uint32_t csum_flags = CSUM_ENCAP_VXLAN | CSUM_INNER_IP_TSO |
2443 return ((m->m_pkthdr.csum_flags & csum_flags) != 0 &&
2444 (m->m_pkthdr.csum_flags & csum_flags) != CSUM_ENCAP_VXLAN);
2447 #if defined(INET) || defined(INET6)
2449 needs_inner_tcp_csum(struct mbuf *m)
2451 const uint32_t csum_flags = CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO;
2455 return (m->m_pkthdr.csum_flags & csum_flags);
2460 needs_l3_csum(struct mbuf *m)
2462 const uint32_t csum_flags = CSUM_IP | CSUM_IP_TSO | CSUM_INNER_IP |
2467 return (m->m_pkthdr.csum_flags & csum_flags);
2471 needs_outer_tcp_csum(struct mbuf *m)
2473 const uint32_t csum_flags = CSUM_IP_TCP | CSUM_IP_TSO | CSUM_IP6_TCP |
2478 return (m->m_pkthdr.csum_flags & csum_flags);
2483 needs_outer_l4_csum(struct mbuf *m)
2485 const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_TSO |
2486 CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_TSO;
2490 return (m->m_pkthdr.csum_flags & csum_flags);
2494 needs_outer_udp_csum(struct mbuf *m)
2496 const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP6_UDP;
2500 return (m->m_pkthdr.csum_flags & csum_flags);
2505 needs_vlan_insertion(struct mbuf *m)
2510 return (m->m_flags & M_VLANTAG);
2514 m_advance(struct mbuf **pm, int *poffset, int len)
2516 struct mbuf *m = *pm;
2517 int offset = *poffset;
2523 if (offset + len < m->m_len) {
2525 p = mtod(m, uintptr_t) + offset;
2528 len -= m->m_len - offset;
2539 count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr)
2542 int i, len, off, pglen, pgoff, seglen, segoff;
2546 off = mtod(m, vm_offset_t);
2551 if (m->m_epg_hdrlen != 0) {
2552 if (off >= m->m_epg_hdrlen) {
2553 off -= m->m_epg_hdrlen;
2555 seglen = m->m_epg_hdrlen - off;
2557 seglen = min(seglen, len);
2560 paddr = pmap_kextract(
2561 (vm_offset_t)&m->m_epg_hdr[segoff]);
2562 if (*nextaddr != paddr)
2564 *nextaddr = paddr + seglen;
2567 pgoff = m->m_epg_1st_off;
2568 for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
2569 pglen = m_epg_pagelen(m, i, pgoff);
2575 seglen = pglen - off;
2576 segoff = pgoff + off;
2578 seglen = min(seglen, len);
2580 paddr = m->m_epg_pa[i] + segoff;
2581 if (*nextaddr != paddr)
2583 *nextaddr = paddr + seglen;
2587 seglen = min(len, m->m_epg_trllen - off);
2589 paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]);
2590 if (*nextaddr != paddr)
2592 *nextaddr = paddr + seglen;
2600 * Can deal with empty mbufs in the chain that have m_len = 0, but the chain
2601 * must have at least one mbuf that's not empty. It is possible for this
2602 * routine to return 0 if skip accounts for all the contents of the mbuf chain.
2605 count_mbuf_nsegs(struct mbuf *m, int skip, uint8_t *cflags)
2607 vm_paddr_t nextaddr, paddr;
2612 MPASS(m->m_pkthdr.len > 0);
2613 MPASS(m->m_pkthdr.len >= skip);
2617 for (; m; m = m->m_next) {
2619 if (__predict_false(len == 0))
2625 if ((m->m_flags & M_EXTPG) != 0) {
2626 *cflags |= MC_NOMAP;
2627 nsegs += count_mbuf_ext_pgs(m, skip, &nextaddr);
2631 va = mtod(m, vm_offset_t) + skip;
2634 paddr = pmap_kextract(va);
2635 nsegs += sglist_count((void *)(uintptr_t)va, len);
2636 if (paddr == nextaddr)
2638 nextaddr = pmap_kextract(va + len - 1) + 1;
2645 * The maximum number of segments that can fit in a WR.
2648 max_nsegs_allowed(struct mbuf *m, bool vm_wr)
2653 return (TX_SGL_SEGS_VM_TSO);
2654 return (TX_SGL_SEGS_VM);
2658 if (needs_vxlan_tso(m))
2659 return (TX_SGL_SEGS_VXLAN_TSO);
2661 return (TX_SGL_SEGS_TSO);
2664 return (TX_SGL_SEGS);
2667 static struct timeval txerr_ratecheck = {0};
2668 static const struct timeval txerr_interval = {3, 0};
2671 * Analyze the mbuf to determine its tx needs. The mbuf passed in may change:
2672 * a) caller can assume it's been freed if this function returns with an error.
2673 * b) it may get defragged up if the gather list is too long for the hardware.
2676 parse_pkt(struct mbuf **mp, bool vm_wr)
2678 struct mbuf *m0 = *mp, *m;
2679 int rc, nsegs, defragged = 0, offset;
2680 struct ether_header *eh;
2682 #if defined(INET) || defined(INET6)
2685 #if defined(KERN_TLS) || defined(RATELIMIT)
2686 struct m_snd_tag *mst;
2693 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) {
2702 * First count the number of gather list segments in the payload.
2703 * Defrag the mbuf if nsegs exceeds the hardware limit.
2706 MPASS(m0->m_pkthdr.len > 0);
2707 nsegs = count_mbuf_nsegs(m0, 0, &cflags);
2708 #if defined(KERN_TLS) || defined(RATELIMIT)
2709 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG)
2710 mst = m0->m_pkthdr.snd_tag;
2715 if (mst != NULL && mst->type == IF_SND_TAG_TYPE_TLS) {
2719 set_mbuf_cflags(m0, cflags);
2720 rc = t6_ktls_parse_pkt(m0, &nsegs, &len16);
2723 set_mbuf_nsegs(m0, nsegs);
2724 set_mbuf_len16(m0, len16);
2728 if (nsegs > max_nsegs_allowed(m0, vm_wr)) {
2729 if (defragged++ > 0) {
2733 counter_u64_add(defrags, 1);
2734 if ((m = m_defrag(m0, M_NOWAIT)) == NULL) {
2738 *mp = m0 = m; /* update caller's copy after defrag */
2742 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN &&
2743 !(cflags & MC_NOMAP))) {
2744 counter_u64_add(pullups, 1);
2745 m0 = m_pullup(m0, m0->m_pkthdr.len);
2747 /* Should have left well enough alone. */
2751 *mp = m0; /* update caller's copy after pullup */
2754 set_mbuf_nsegs(m0, nsegs);
2755 set_mbuf_cflags(m0, cflags);
2756 calculate_mbuf_len16(m0, vm_wr);
2760 * Ethofld is limited to TCP and UDP for now, and only when L4 hw
2761 * checksumming is enabled. needs_outer_l4_csum happens to check for
2762 * all the right things.
2764 if (__predict_false(needs_eo(mst) && !needs_outer_l4_csum(m0))) {
2765 m_snd_tag_rele(m0->m_pkthdr.snd_tag);
2766 m0->m_pkthdr.snd_tag = NULL;
2767 m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
2772 if (!needs_hwcsum(m0)
2780 eh = mtod(m, struct ether_header *);
2781 eh_type = ntohs(eh->ether_type);
2782 if (eh_type == ETHERTYPE_VLAN) {
2783 struct ether_vlan_header *evh = (void *)eh;
2785 eh_type = ntohs(evh->evl_proto);
2786 m0->m_pkthdr.l2hlen = sizeof(*evh);
2788 m0->m_pkthdr.l2hlen = sizeof(*eh);
2791 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen);
2795 case ETHERTYPE_IPV6:
2796 m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
2802 struct ip *ip = l3hdr;
2804 if (needs_vxlan_csum(m0)) {
2805 /* Driver will do the outer IP hdr checksum. */
2807 if (needs_vxlan_tso(m0)) {
2808 const uint16_t ipl = ip->ip_len;
2811 ip->ip_sum = ~in_cksum_hdr(ip);
2814 ip->ip_sum = in_cksum_hdr(ip);
2816 m0->m_pkthdr.l3hlen = ip->ip_hl << 2;
2821 if (ratecheck(&txerr_ratecheck, &txerr_interval)) {
2822 log(LOG_ERR, "%s: ethertype 0x%04x unknown. "
2823 "if_cxgbe must be compiled with the same "
2824 "INET/INET6 options as the kernel.\n", __func__,
2831 if (needs_vxlan_csum(m0)) {
2832 m0->m_pkthdr.l4hlen = sizeof(struct udphdr);
2833 m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header);
2835 /* Inner headers. */
2836 eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen +
2837 sizeof(struct udphdr) + sizeof(struct vxlan_header));
2838 eh_type = ntohs(eh->ether_type);
2839 if (eh_type == ETHERTYPE_VLAN) {
2840 struct ether_vlan_header *evh = (void *)eh;
2842 eh_type = ntohs(evh->evl_proto);
2843 m0->m_pkthdr.inner_l2hlen = sizeof(*evh);
2845 m0->m_pkthdr.inner_l2hlen = sizeof(*eh);
2846 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen);
2850 case ETHERTYPE_IPV6:
2851 m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr);
2857 struct ip *ip = l3hdr;
2859 m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2;
2864 if (ratecheck(&txerr_ratecheck, &txerr_interval)) {
2865 log(LOG_ERR, "%s: VXLAN hw offload requested"
2866 "with unknown ethertype 0x%04x. if_cxgbe "
2867 "must be compiled with the same INET/INET6 "
2868 "options as the kernel.\n", __func__,
2874 #if defined(INET) || defined(INET6)
2875 if (needs_inner_tcp_csum(m0)) {
2876 tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen);
2877 m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4;
2880 MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
2881 m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP |
2882 CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO | CSUM_INNER_IP |
2883 CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO |
2887 #if defined(INET) || defined(INET6)
2888 if (needs_outer_tcp_csum(m0)) {
2889 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen);
2890 m0->m_pkthdr.l4hlen = tcp->th_off * 4;
2892 if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) {
2893 set_mbuf_eo_tsclk_tsoff(m0,
2894 V_FW_ETH_TX_EO_WR_TSCLK(tsclk) |
2895 V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1));
2897 set_mbuf_eo_tsclk_tsoff(m0, 0);
2898 } else if (needs_outer_udp_csum(m0)) {
2899 m0->m_pkthdr.l4hlen = sizeof(struct udphdr);
2903 if (needs_eo(mst)) {
2906 /* EO WRs have the headers in the WR and not the GL. */
2907 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen +
2908 m0->m_pkthdr.l4hlen;
2910 nsegs = count_mbuf_nsegs(m0, immhdrs, &cflags);
2911 MPASS(cflags == mbuf_cflags(m0));
2912 set_mbuf_eo_nsegs(m0, nsegs);
2913 set_mbuf_eo_len16(m0,
2914 txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0)));
2923 start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie)
2925 struct sge_eq *eq = &wrq->eq;
2926 struct adapter *sc = wrq->adapter;
2927 int ndesc, available;
2932 ndesc = tx_len16_to_desc(len16);
2933 MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC);
2937 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
2938 drain_wrq_wr_list(sc, wrq);
2940 if (!STAILQ_EMPTY(&wrq->wr_list)) {
2943 wr = alloc_wrqe(len16 * 16, wrq);
2944 if (__predict_false(wr == NULL))
2947 cookie->ndesc = ndesc;
2951 eq->cidx = read_hw_cidx(eq);
2952 if (eq->pidx == eq->cidx)
2953 available = eq->sidx - 1;
2955 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
2956 if (available < ndesc)
2959 cookie->pidx = eq->pidx;
2960 cookie->ndesc = ndesc;
2961 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link);
2963 w = &eq->desc[eq->pidx];
2964 IDXINCR(eq->pidx, ndesc, eq->sidx);
2965 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) {
2967 wrq->ss_pidx = cookie->pidx;
2968 wrq->ss_len = len16 * 16;
2977 commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie)
2979 struct sge_eq *eq = &wrq->eq;
2980 struct adapter *sc = wrq->adapter;
2982 struct wrq_cookie *prev, *next;
2984 if (cookie->pidx == -1) {
2985 struct wrqe *wr = __containerof(w, struct wrqe, wr);
2991 if (__predict_false(w == &wrq->ss[0])) {
2992 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE;
2994 MPASS(wrq->ss_len > n); /* WR had better wrap around. */
2995 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n);
2996 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n);
2999 wrq->tx_wrs_direct++;
3002 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */
3003 pidx = cookie->pidx;
3004 MPASS(pidx >= 0 && pidx < eq->sidx);
3005 prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link);
3006 next = TAILQ_NEXT(cookie, link);
3008 MPASS(pidx == eq->dbidx);
3009 if (next == NULL || ndesc >= 16) {
3011 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */
3014 * Note that the WR via which we'll request tx updates
3015 * is at pidx and not eq->pidx, which has moved on
3018 dst = (void *)&eq->desc[pidx];
3019 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
3020 if (available < eq->sidx / 4 &&
3021 atomic_cmpset_int(&eq->equiq, 0, 1)) {
3023 * XXX: This is not 100% reliable with some
3024 * types of WRs. But this is a very unusual
3025 * situation for an ofld/ctrl queue anyway.
3027 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
3031 ring_eq_db(wrq->adapter, eq, ndesc);
3033 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc);
3035 next->ndesc += ndesc;
3038 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc);
3039 prev->ndesc += ndesc;
3041 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link);
3043 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
3044 drain_wrq_wr_list(sc, wrq);
3047 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) {
3048 /* Doorbell must have caught up to the pidx. */
3049 MPASS(wrq->eq.pidx == wrq->eq.dbidx);
3056 can_resume_eth_tx(struct mp_ring *r)
3058 struct sge_eq *eq = r->cookie;
3060 return (total_available_tx_desc(eq) > eq->sidx / 8);
3064 cannot_use_txpkts(struct mbuf *m)
3066 /* maybe put a GL limit too, to avoid silliness? */
3068 return (needs_tso(m) || (mbuf_cflags(m) & (MC_RAW_WR | MC_TLS)) != 0);
3072 discard_tx(struct sge_eq *eq)
3075 return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED);
3079 wr_can_update_eq(void *p)
3081 struct fw_eth_tx_pkts_wr *wr = p;
3083 switch (G_FW_WR_OP(be32toh(wr->op_pkd))) {
3085 case FW_ETH_TX_PKT_WR:
3086 case FW_ETH_TX_PKTS_WR:
3087 case FW_ETH_TX_PKTS2_WR:
3088 case FW_ETH_TX_PKT_VM_WR:
3089 case FW_ETH_TX_PKTS_VM_WR:
3097 set_txupdate_flags(struct sge_txq *txq, u_int avail,
3098 struct fw_eth_tx_pkt_wr *wr)
3100 struct sge_eq *eq = &txq->eq;
3101 struct txpkts *txp = &txq->txp;
3103 if ((txp->npkt > 0 || avail < eq->sidx / 2) &&
3104 atomic_cmpset_int(&eq->equiq, 0, 1)) {
3105 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
3106 eq->equeqidx = eq->pidx;
3107 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) {
3108 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
3109 eq->equeqidx = eq->pidx;
3113 #if defined(__i386__) || defined(__amd64__)
3114 extern uint64_t tsc_freq;
3118 record_eth_tx_time(struct sge_txq *txq)
3120 const uint64_t cycles = get_cyclecount();
3121 const uint64_t last_tx = txq->last_tx;
3122 #if defined(__i386__) || defined(__amd64__)
3123 const uint64_t itg = tsc_freq * t4_tx_coalesce_gap / 1000000;
3125 const uint64_t itg = 0;
3128 MPASS(cycles >= last_tx);
3129 txq->last_tx = cycles;
3130 return (cycles - last_tx < itg);
3134 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
3135 * be consumed. Return the actual number consumed. 0 indicates a stall.
3138 eth_tx(struct mp_ring *r, u_int cidx, u_int pidx, bool *coalescing)
3140 struct sge_txq *txq = r->cookie;
3141 struct ifnet *ifp = txq->ifp;
3142 struct sge_eq *eq = &txq->eq;
3143 struct txpkts *txp = &txq->txp;
3144 struct vi_info *vi = ifp->if_softc;
3145 struct adapter *sc = vi->adapter;
3146 u_int total, remaining; /* # of packets */
3147 u_int n, avail, dbdiff; /* # of hardware descriptors */
3150 bool snd, recent_tx;
3151 void *wr; /* start of the last WR written to the ring */
3153 TXQ_LOCK_ASSERT_OWNED(txq);
3154 recent_tx = record_eth_tx_time(txq);
3156 remaining = IDXDIFF(pidx, cidx, r->size);
3157 if (__predict_false(discard_tx(eq))) {
3158 for (i = 0; i < txp->npkt; i++)
3159 m_freem(txp->mb[i]);
3161 while (cidx != pidx) {
3162 m0 = r->items[cidx];
3164 if (++cidx == r->size)
3167 reclaim_tx_descs(txq, eq->sidx);
3168 *coalescing = false;
3169 return (remaining); /* emptied */
3172 /* How many hardware descriptors do we have readily available. */
3173 if (eq->pidx == eq->cidx)
3174 avail = eq->sidx - 1;
3176 avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
3179 if (remaining == 0) {
3181 txq->txpkts_flush++;
3186 MPASS(remaining > 0);
3187 while (remaining > 0) {
3188 m0 = r->items[cidx];
3190 MPASS(m0->m_nextpkt == NULL);
3192 if (avail < 2 * SGE_MAX_WR_NDESC)
3193 avail += reclaim_tx_descs(txq, 64);
3195 if (t4_tx_coalesce == 0 && txp->npkt == 0)
3196 goto skip_coalescing;
3197 if (cannot_use_txpkts(m0))
3199 else if (recent_tx) {
3200 if (++txp->score == 0)
3201 txp->score = UINT8_MAX;
3204 if (txp->npkt > 0 || remaining > 1 ||
3205 txp->score >= t4_tx_coalesce_pkts ||
3206 atomic_load_int(&txq->eq.equiq) != 0) {
3207 if (vi->flags & TX_USES_VM_WR)
3208 rc = add_to_txpkts_vf(sc, txq, m0, avail, &snd);
3210 rc = add_to_txpkts_pf(sc, txq, m0, avail, &snd);
3216 MPASS(txp->npkt > 0);
3217 for (i = 0; i < txp->npkt; i++)
3218 ETHER_BPF_MTAP(ifp, txp->mb[i]);
3219 if (txp->npkt > 1) {
3220 MPASS(avail >= tx_len16_to_desc(txp->len16));
3221 if (vi->flags & TX_USES_VM_WR)
3222 n = write_txpkts_vm_wr(sc, txq);
3224 n = write_txpkts_wr(sc, txq);
3227 tx_len16_to_desc(mbuf_len16(txp->mb[0])));
3228 if (vi->flags & TX_USES_VM_WR)
3229 n = write_txpkt_vm_wr(sc, txq,
3232 n = write_txpkt_wr(sc, txq, txp->mb[0],
3235 MPASS(n <= SGE_MAX_WR_NDESC);
3238 wr = &eq->desc[eq->pidx];
3239 IDXINCR(eq->pidx, n, eq->sidx);
3240 txp->npkt = 0; /* emptied */
3243 /* m0 was coalesced into txq->txpkts. */
3248 * m0 is suitable for tx coalescing but could not be
3249 * combined with the existing txq->txpkts, which has now
3250 * been transmitted. Start a new txpkts with m0.
3253 MPASS(txp->npkt == 0);
3257 MPASS(rc != 0 && rc != EAGAIN);
3258 MPASS(txp->npkt == 0);
3260 n = tx_len16_to_desc(mbuf_len16(m0));
3261 if (__predict_false(avail < n)) {
3262 avail += reclaim_tx_descs(txq, min(n, 32));
3264 break; /* out of descriptors */
3267 wr = &eq->desc[eq->pidx];
3268 if (mbuf_cflags(m0) & MC_RAW_WR) {
3269 n = write_raw_wr(txq, wr, m0, avail);
3271 } else if (mbuf_cflags(m0) & MC_TLS) {
3272 ETHER_BPF_MTAP(ifp, m0);
3273 n = t6_ktls_write_wr(txq, wr, m0, mbuf_nsegs(m0),
3277 ETHER_BPF_MTAP(ifp, m0);
3278 if (vi->flags & TX_USES_VM_WR)
3279 n = write_txpkt_vm_wr(sc, txq, m0);
3281 n = write_txpkt_wr(sc, txq, m0, avail);
3283 MPASS(n >= 1 && n <= avail);
3284 if (!(mbuf_cflags(m0) & MC_TLS))
3285 MPASS(n <= SGE_MAX_WR_NDESC);
3289 IDXINCR(eq->pidx, n, eq->sidx);
3291 if (dbdiff >= 512 / EQ_ESIZE) { /* X_FETCHBURSTMAX_512B */
3292 if (wr_can_update_eq(wr))
3293 set_txupdate_flags(txq, avail, wr);
3294 ring_eq_db(sc, eq, dbdiff);
3295 avail += reclaim_tx_descs(txq, 32);
3301 if (__predict_false(++cidx == r->size))
3305 if (wr_can_update_eq(wr))
3306 set_txupdate_flags(txq, avail, wr);
3307 ring_eq_db(sc, eq, dbdiff);
3308 reclaim_tx_descs(txq, 32);
3309 } else if (eq->pidx == eq->cidx && txp->npkt > 0 &&
3310 atomic_load_int(&txq->eq.equiq) == 0) {
3312 * If nothing was submitted to the chip for tx (it was coalesced
3313 * into txpkts instead) and there is no tx update outstanding
3314 * then we need to send txpkts now.
3317 MPASS(txp->npkt > 0);
3318 for (i = 0; i < txp->npkt; i++)
3319 ETHER_BPF_MTAP(ifp, txp->mb[i]);
3320 if (txp->npkt > 1) {
3321 MPASS(avail >= tx_len16_to_desc(txp->len16));
3322 if (vi->flags & TX_USES_VM_WR)
3323 n = write_txpkts_vm_wr(sc, txq);
3325 n = write_txpkts_wr(sc, txq);
3328 tx_len16_to_desc(mbuf_len16(txp->mb[0])));
3329 if (vi->flags & TX_USES_VM_WR)
3330 n = write_txpkt_vm_wr(sc, txq, txp->mb[0]);
3332 n = write_txpkt_wr(sc, txq, txp->mb[0], avail);
3334 MPASS(n <= SGE_MAX_WR_NDESC);
3335 wr = &eq->desc[eq->pidx];
3336 IDXINCR(eq->pidx, n, eq->sidx);
3337 txp->npkt = 0; /* emptied */
3339 MPASS(wr_can_update_eq(wr));
3340 set_txupdate_flags(txq, avail - n, wr);
3341 ring_eq_db(sc, eq, n);
3342 reclaim_tx_descs(txq, 32);
3344 *coalescing = txp->npkt > 0;
3350 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
3351 int qsize, int intr_idx, int cong)
3354 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
3355 ("%s: bad tmr_idx %d", __func__, tmr_idx));
3356 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */
3357 ("%s: bad pktc_idx %d", __func__, pktc_idx));
3358 KASSERT(intr_idx >= -1 && intr_idx < sc->intr_count,
3359 ("%s: bad intr_idx %d", __func__, intr_idx));
3362 iq->state = IQS_DISABLED;
3364 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
3365 iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
3366 if (pktc_idx >= 0) {
3367 iq->intr_params |= F_QINTR_CNT_EN;
3368 iq->intr_pktc_idx = pktc_idx;
3370 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */
3371 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE;
3372 iq->intr_idx = intr_idx;
3377 init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name)
3379 struct sge_params *sp = &sc->params.sge;
3382 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
3383 strlcpy(fl->lockname, name, sizeof(fl->lockname));
3384 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
3385 if (sc->flags & BUF_PACKING_OK &&
3386 ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */
3387 (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */
3388 fl->flags |= FL_BUF_PACKING;
3389 fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING);
3390 fl->safe_zidx = sc->sge.safe_zidx;
3391 if (fl->flags & FL_BUF_PACKING) {
3392 fl->lowat = roundup2(sp->fl_starve_threshold2, 8);
3393 fl->buf_boundary = sp->pack_boundary;
3395 fl->lowat = roundup2(sp->fl_starve_threshold, 8);
3396 fl->buf_boundary = 16;
3398 if (fl_pad && fl->buf_boundary < sp->pad_boundary)
3399 fl->buf_boundary = sp->pad_boundary;
3403 init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
3404 uint8_t tx_chan, struct sge_iq *iq, char *name)
3406 KASSERT(eqtype >= EQ_CTRL && eqtype <= EQ_OFLD,
3407 ("%s: bad qtype %d", __func__, eqtype));
3410 eq->tx_chan = tx_chan;
3412 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
3413 strlcpy(eq->lockname, name, sizeof(eq->lockname));
3414 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
3418 alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag,
3419 bus_dmamap_t *map, bus_addr_t *pa, void **va)
3423 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR,
3424 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag);
3426 CH_ERR(sc, "cannot allocate DMA tag: %d\n", rc);
3430 rc = bus_dmamem_alloc(*tag, va,
3431 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
3433 CH_ERR(sc, "cannot allocate DMA memory: %d\n", rc);
3437 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0);
3439 CH_ERR(sc, "cannot load DMA map: %d\n", rc);
3444 free_ring(sc, *tag, *map, *pa, *va);
3450 free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map,
3451 bus_addr_t pa, void *va)
3454 bus_dmamap_unload(tag, map);
3456 bus_dmamem_free(tag, va, map);
3458 bus_dma_tag_destroy(tag);
3464 * Allocates the software resources (mainly memory and sysctl nodes) for an
3465 * ingress queue and an optional freelist.
3467 * Sets IQ_SW_ALLOCATED and returns 0 on success.
3470 alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
3471 struct sysctl_ctx_list *ctx, struct sysctl_oid *oid)
3475 struct adapter *sc = vi->adapter;
3477 MPASS(!(iq->flags & IQ_SW_ALLOCATED));
3479 len = iq->qsize * IQ_ESIZE;
3480 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
3481 (void **)&iq->desc);
3486 len = fl->qsize * EQ_ESIZE;
3487 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
3488 &fl->ba, (void **)&fl->desc);
3490 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba,
3495 /* Allocate space for one software descriptor per buffer. */
3496 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc),
3497 M_CXGBE, M_ZERO | M_WAITOK);
3499 add_fl_sysctls(sc, ctx, oid, fl);
3500 iq->flags |= IQ_HAS_FL;
3502 add_iq_sysctls(ctx, oid, iq);
3503 iq->flags |= IQ_SW_ALLOCATED;
3509 * Frees all software resources (memory and locks) associated with an ingress
3510 * queue and an optional freelist.
3513 free_iq_fl(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl)
3515 MPASS(iq->flags & IQ_SW_ALLOCATED);
3518 MPASS(iq->flags & IQ_HAS_FL);
3519 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, fl->desc);
3520 free_fl_buffers(sc, fl);
3521 free(fl->sdesc, M_CXGBE);
3522 mtx_destroy(&fl->fl_lock);
3523 bzero(fl, sizeof(*fl));
3525 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc);
3526 bzero(iq, sizeof(*iq));
3530 * Allocates a hardware ingress queue and an optional freelist that will be
3531 * associated with it.
3533 * Returns errno on failure. Resources allocated up to that point may still be
3534 * allocated. Caller is responsible for cleanup in case this function fails.
3537 alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
3539 int rc, i, cntxt_id;
3541 struct adapter *sc = vi->adapter;
3544 MPASS (!(iq->flags & IQ_HW_ALLOCATED));
3546 bzero(&c, sizeof(c));
3547 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
3548 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
3549 V_FW_IQ_CMD_VFN(0));
3551 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
3554 /* Special handling for firmware event queue */
3555 if (iq == &sc->sge.fwq)
3556 v |= F_FW_IQ_CMD_IQASYNCH;
3558 if (iq->intr_idx < 0) {
3559 /* Forwarded interrupts, all headed to fwq */
3560 v |= F_FW_IQ_CMD_IQANDST;
3561 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id);
3563 KASSERT(iq->intr_idx < sc->intr_count,
3564 ("%s: invalid direct intr_idx %d", __func__, iq->intr_idx));
3565 v |= V_FW_IQ_CMD_IQANDSTINDEX(iq->intr_idx);
3568 bzero(iq->desc, iq->qsize * IQ_ESIZE);
3569 c.type_to_iqandstindex = htobe32(v |
3570 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
3571 V_FW_IQ_CMD_VIID(vi->viid) |
3572 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
3573 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) |
3574 F_FW_IQ_CMD_IQGTSMODE |
3575 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
3576 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
3577 c.iqsize = htobe16(iq->qsize);
3578 c.iqaddr = htobe64(iq->ba);
3580 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
3583 bzero(fl->desc, fl->sidx * EQ_ESIZE + sc->params.sge.spg_len);
3584 c.iqns_to_fl0congen |=
3585 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
3586 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
3587 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
3588 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN :
3590 if (iq->cong >= 0) {
3591 c.iqns_to_fl0congen |=
3592 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(iq->cong) |
3593 F_FW_IQ_CMD_FL0CONGCIF |
3594 F_FW_IQ_CMD_FL0CONGEN);
3596 c.fl0dcaen_to_fl0cidxfthresh =
3597 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
3598 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) |
3599 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
3600 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
3601 c.fl0size = htobe16(fl->qsize);
3602 c.fl0addr = htobe64(fl->ba);
3605 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
3607 CH_ERR(sc, "failed to create hw ingress queue: %d\n", rc);
3612 iq->gen = F_RSPD_GEN;
3613 iq->cntxt_id = be16toh(c.iqid);
3614 iq->abs_id = be16toh(c.physiqid);
3616 cntxt_id = iq->cntxt_id - sc->sge.iq_start;
3617 if (cntxt_id >= sc->sge.iqmap_sz) {
3618 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
3619 cntxt_id, sc->sge.iqmap_sz - 1);
3621 sc->sge.iqmap[cntxt_id] = iq;
3626 MPASS(!(fl->flags & FL_BUF_RESUME));
3627 for (i = 0; i < fl->sidx * 8; i++)
3628 MPASS(fl->sdesc[i].cl == NULL);
3630 fl->cntxt_id = be16toh(c.fl0id);
3631 fl->pidx = fl->cidx = fl->hw_cidx = fl->dbidx = 0;
3633 fl->flags &= ~(FL_STARVING | FL_DOOMED);
3635 cntxt_id = fl->cntxt_id - sc->sge.eq_start;
3636 if (cntxt_id >= sc->sge.eqmap_sz) {
3637 panic("%s: fl->cntxt_id (%d) more than the max (%d)",
3638 __func__, cntxt_id, sc->sge.eqmap_sz - 1);
3640 sc->sge.eqmap[cntxt_id] = (void *)fl;
3643 if (isset(&sc->doorbells, DOORBELL_UDB)) {
3644 uint32_t s_qpp = sc->params.sge.eq_s_qpp;
3645 uint32_t mask = (1 << s_qpp) - 1;
3646 volatile uint8_t *udb;
3648 udb = sc->udbs_base + UDBS_DB_OFFSET;
3649 udb += (qid >> s_qpp) << PAGE_SHIFT;
3651 if (qid < PAGE_SIZE / UDBS_SEG_SIZE) {
3652 udb += qid << UDBS_SEG_SHIFT;
3655 fl->udb = (volatile void *)udb;
3657 fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db;
3660 /* Enough to make sure the SGE doesn't think it's starved */
3661 refill_fl(sc, fl, fl->lowat);
3665 if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && iq->cong >= 0) {
3666 uint32_t param, val;
3668 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
3669 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
3670 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id);
3675 for (i = 0; i < 4; i++) {
3676 if (iq->cong & (1 << i))
3677 val |= 1 << (i << 2);
3681 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
3683 /* report error but carry on */
3684 CH_ERR(sc, "failed to set congestion manager context "
3685 "for ingress queue %d: %d\n", iq->cntxt_id, rc);
3689 /* Enable IQ interrupts */
3690 atomic_store_rel_int(&iq->state, IQS_IDLE);
3691 t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) |
3692 V_INGRESSQID(iq->cntxt_id));
3694 iq->flags |= IQ_HW_ALLOCATED;
3700 free_iq_fl_hwq(struct adapter *sc, struct sge_iq *iq, struct sge_fl *fl)
3704 MPASS(iq->flags & IQ_HW_ALLOCATED);
3705 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
3706 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
3708 CH_ERR(sc, "failed to free iq %p: %d\n", iq, rc);
3711 iq->flags &= ~IQ_HW_ALLOCATED;
3717 add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
3720 struct sysctl_oid_list *children;
3722 if (ctx == NULL || oid == NULL)
3725 children = SYSCTL_CHILDREN(oid);
3726 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba,
3727 "bus address of descriptor ring");
3728 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3729 iq->qsize * IQ_ESIZE, "descriptor ring size in bytes");
3730 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
3731 &iq->abs_id, 0, "absolute id of the queue");
3732 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
3733 &iq->cntxt_id, 0, "SGE context id of the queue");
3734 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &iq->cidx,
3735 0, "consumer index");
3739 add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
3740 struct sysctl_oid *oid, struct sge_fl *fl)
3742 struct sysctl_oid_list *children;
3744 if (ctx == NULL || oid == NULL)
3747 children = SYSCTL_CHILDREN(oid);
3748 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl",
3749 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist");
3750 children = SYSCTL_CHILDREN(oid);
3752 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
3753 &fl->ba, "bus address of descriptor ring");
3754 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
3755 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len,
3756 "desc ring size in bytes");
3757 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
3758 &fl->cntxt_id, 0, "SGE context id of the freelist");
3759 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL,
3760 fl_pad ? 1 : 0, "padding enabled");
3761 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL,
3762 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled");
3763 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx,
3764 0, "consumer index");
3765 if (fl->flags & FL_BUF_PACKING) {
3766 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset",
3767 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset");
3769 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx,
3770 0, "producer index");
3771 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated",
3772 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated");
3773 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled",
3774 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled");
3775 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled",
3776 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)");
3783 alloc_fwq(struct adapter *sc)
3786 struct sge_iq *fwq = &sc->sge.fwq;
3787 struct vi_info *vi = &sc->port[0]->vi[0];
3789 if (!(fwq->flags & IQ_SW_ALLOCATED)) {
3790 MPASS(!(fwq->flags & IQ_HW_ALLOCATED));
3792 if (sc->flags & IS_VF)
3795 intr_idx = sc->intr_count > 1 ? 1 : 0;
3796 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, intr_idx, -1);
3797 rc = alloc_iq_fl(vi, fwq, NULL, &sc->ctx, sc->fwq_oid);
3799 CH_ERR(sc, "failed to allocate fwq: %d\n", rc);
3802 MPASS(fwq->flags & IQ_SW_ALLOCATED);
3805 if (!(fwq->flags & IQ_HW_ALLOCATED)) {
3806 MPASS(fwq->flags & IQ_SW_ALLOCATED);
3808 rc = alloc_iq_fl_hwq(vi, fwq, NULL);
3810 CH_ERR(sc, "failed to create hw fwq: %d\n", rc);
3813 MPASS(fwq->flags & IQ_HW_ALLOCATED);
3823 free_fwq(struct adapter *sc)
3825 struct sge_iq *fwq = &sc->sge.fwq;
3827 if (fwq->flags & IQ_HW_ALLOCATED) {
3828 MPASS(fwq->flags & IQ_SW_ALLOCATED);
3829 free_iq_fl_hwq(sc, fwq, NULL);
3830 MPASS(!(fwq->flags & IQ_HW_ALLOCATED));
3833 if (fwq->flags & IQ_SW_ALLOCATED) {
3834 MPASS(!(fwq->flags & IQ_HW_ALLOCATED));
3835 free_iq_fl(sc, fwq, NULL);
3836 MPASS(!(fwq->flags & IQ_SW_ALLOCATED));
3844 alloc_ctrlq(struct adapter *sc, int idx)
3848 struct sysctl_oid *oid;
3849 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx];
3851 MPASS(idx < sc->params.nports);
3853 if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) {
3854 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
3856 snprintf(name, sizeof(name), "%d", idx);
3857 oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(sc->ctrlq_oid),
3858 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3861 snprintf(name, sizeof(name), "%s ctrlq%d",
3862 device_get_nameunit(sc->dev), idx);
3863 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE,
3864 sc->port[idx]->tx_chan, &sc->sge.fwq, name);
3865 rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid);
3867 CH_ERR(sc, "failed to allocate ctrlq%d: %d\n", idx, rc);
3868 sysctl_remove_oid(oid, 1, 1);
3871 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED);
3874 if (!(ctrlq->eq.flags & EQ_HW_ALLOCATED)) {
3875 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED);
3877 rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq);
3879 CH_ERR(sc, "failed to create hw ctrlq%d: %d\n", idx, rc);
3882 MPASS(ctrlq->eq.flags & EQ_HW_ALLOCATED);
3892 free_ctrlq(struct adapter *sc, int idx)
3894 struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx];
3896 if (ctrlq->eq.flags & EQ_HW_ALLOCATED) {
3897 MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED);
3898 free_eq_hwq(sc, NULL, &ctrlq->eq);
3899 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
3902 if (ctrlq->eq.flags & EQ_SW_ALLOCATED) {
3903 MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
3904 free_wrq(sc, ctrlq);
3905 MPASS(!(ctrlq->eq.flags & EQ_SW_ALLOCATED));
3910 tnl_cong(struct port_info *pi, int drop)
3918 return (pi->rx_e_chan_map);
3925 alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int idx, int intr_idx,
3929 struct adapter *sc = vi->adapter;
3930 struct ifnet *ifp = vi->ifp;
3931 struct sysctl_oid *oid;
3934 if (!(rxq->iq.flags & IQ_SW_ALLOCATED)) {
3935 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED));
3936 #if defined(INET) || defined(INET6)
3937 rc = tcp_lro_init_args(&rxq->lro, ifp, lro_entries, lro_mbufs);
3940 MPASS(rxq->lro.ifp == ifp); /* also indicates LRO init'ed */
3944 snprintf(name, sizeof(name), "%d", idx);
3945 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->rxq_oid),
3946 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3949 init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq,
3950 intr_idx, tnl_cong(vi->pi, cong_drop));
3951 #if defined(INET) || defined(INET6)
3952 if (ifp->if_capenable & IFCAP_LRO)
3953 rxq->iq.flags |= IQ_LRO_ENABLED;
3955 if (ifp->if_capenable & IFCAP_HWRXTSTMP)
3956 rxq->iq.flags |= IQ_RX_TIMESTAMP;
3957 snprintf(name, sizeof(name), "%s rxq%d-fl",
3958 device_get_nameunit(vi->dev), idx);
3959 init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name);
3960 rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, &vi->ctx, oid);
3962 CH_ERR(vi, "failed to allocate rxq%d: %d\n", idx, rc);
3963 sysctl_remove_oid(oid, 1, 1);
3964 #if defined(INET) || defined(INET6)
3965 tcp_lro_free(&rxq->lro);
3966 rxq->lro.ifp = NULL;
3970 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED);
3971 add_rxq_sysctls(&vi->ctx, oid, rxq);
3974 if (!(rxq->iq.flags & IQ_HW_ALLOCATED)) {
3975 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED);
3976 rc = alloc_iq_fl_hwq(vi, &rxq->iq, &rxq->fl);
3978 CH_ERR(vi, "failed to create hw rxq%d: %d\n", idx, rc);
3981 MPASS(rxq->iq.flags & IQ_HW_ALLOCATED);
3984 sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id;
3986 KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id,
3987 ("iq_base mismatch"));
3988 KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF,
3989 ("PF with non-zero iq_base"));
3992 * The freelist is just barely above the starvation threshold
3993 * right now, fill it up a bit more.
3996 refill_fl(sc, &rxq->fl, 128);
3997 FL_UNLOCK(&rxq->fl);
4007 free_rxq(struct vi_info *vi, struct sge_rxq *rxq)
4009 if (rxq->iq.flags & IQ_HW_ALLOCATED) {
4010 MPASS(rxq->iq.flags & IQ_SW_ALLOCATED);
4011 free_iq_fl_hwq(vi->adapter, &rxq->iq, &rxq->fl);
4012 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED));
4015 if (rxq->iq.flags & IQ_SW_ALLOCATED) {
4016 MPASS(!(rxq->iq.flags & IQ_HW_ALLOCATED));
4017 #if defined(INET) || defined(INET6)
4018 tcp_lro_free(&rxq->lro);
4020 free_iq_fl(vi->adapter, &rxq->iq, &rxq->fl);
4021 MPASS(!(rxq->iq.flags & IQ_SW_ALLOCATED));
4022 bzero(rxq, sizeof(*rxq));
4027 add_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
4028 struct sge_rxq *rxq)
4030 struct sysctl_oid_list *children;
4032 if (ctx == NULL || oid == NULL)
4035 children = SYSCTL_CHILDREN(oid);
4036 #if defined(INET) || defined(INET6)
4037 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
4038 &rxq->lro.lro_queued, 0, NULL);
4039 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
4040 &rxq->lro.lro_flushed, 0, NULL);
4042 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
4043 &rxq->rxcsum, "# of times hardware assisted with checksum");
4044 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vlan_extraction", CTLFLAG_RD,
4045 &rxq->vlan_extraction, "# of times hardware extracted 802.1Q tag");
4046 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vxlan_rxcsum", CTLFLAG_RD,
4048 "# of times hardware assisted with inner checksum (VXLAN)");
4056 alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, int idx,
4057 int intr_idx, int maxp)
4060 struct adapter *sc = vi->adapter;
4061 struct sysctl_oid *oid;
4064 if (!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED)) {
4065 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED));
4067 snprintf(name, sizeof(name), "%d", idx);
4068 oid = SYSCTL_ADD_NODE(&vi->ctx,
4069 SYSCTL_CHILDREN(vi->ofld_rxq_oid), OID_AUTO, name,
4070 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload rx queue");
4072 init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx,
4073 vi->qsize_rxq, intr_idx, 0);
4074 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
4075 device_get_nameunit(vi->dev), idx);
4076 init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name);
4077 rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, &vi->ctx,
4080 CH_ERR(vi, "failed to allocate ofld_rxq%d: %d\n", idx,
4082 sysctl_remove_oid(oid, 1, 1);
4085 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED);
4086 ofld_rxq->rx_iscsi_ddp_setup_ok = counter_u64_alloc(M_WAITOK);
4087 ofld_rxq->rx_iscsi_ddp_setup_error =
4088 counter_u64_alloc(M_WAITOK);
4089 add_ofld_rxq_sysctls(&vi->ctx, oid, ofld_rxq);
4092 if (!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED)) {
4093 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED);
4094 rc = alloc_iq_fl_hwq(vi, &ofld_rxq->iq, &ofld_rxq->fl);
4096 CH_ERR(vi, "failed to create hw ofld_rxq%d: %d\n", idx,
4100 MPASS(ofld_rxq->iq.flags & IQ_HW_ALLOCATED);
4109 free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq)
4111 if (ofld_rxq->iq.flags & IQ_HW_ALLOCATED) {
4112 MPASS(ofld_rxq->iq.flags & IQ_SW_ALLOCATED);
4113 free_iq_fl_hwq(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl);
4114 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED));
4117 if (ofld_rxq->iq.flags & IQ_SW_ALLOCATED) {
4118 MPASS(!(ofld_rxq->iq.flags & IQ_HW_ALLOCATED));
4119 free_iq_fl(vi->adapter, &ofld_rxq->iq, &ofld_rxq->fl);
4120 MPASS(!(ofld_rxq->iq.flags & IQ_SW_ALLOCATED));
4121 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_ok);
4122 counter_u64_free(ofld_rxq->rx_iscsi_ddp_setup_error);
4123 bzero(ofld_rxq, sizeof(*ofld_rxq));
4128 add_ofld_rxq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
4129 struct sge_ofld_rxq *ofld_rxq)
4131 struct sysctl_oid_list *children;
4133 if (ctx == NULL || oid == NULL)
4136 children = SYSCTL_CHILDREN(oid);
4137 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
4138 "rx_toe_tls_records", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_records,
4139 "# of TOE TLS records received");
4140 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
4141 "rx_toe_tls_octets", CTLFLAG_RD, &ofld_rxq->rx_toe_tls_octets,
4142 "# of payload octets in received TOE TLS records");
4144 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "iscsi",
4145 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TOE iSCSI statistics");
4146 children = SYSCTL_CHILDREN(oid);
4148 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_ok",
4149 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_ok,
4150 "# of times DDP buffer was setup successfully.");
4151 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ddp_setup_error",
4152 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_setup_error,
4153 "# of times DDP buffer setup failed.");
4154 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ddp_octets",
4155 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_octets, 0,
4156 "# of octets placed directly");
4157 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "ddp_pdus",
4158 CTLFLAG_RD, &ofld_rxq->rx_iscsi_ddp_pdus, 0,
4159 "# of PDUs with data placed directly.");
4160 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "fl_octets",
4161 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_octets, 0,
4162 "# of data octets delivered in freelist");
4163 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "fl_pdus",
4164 CTLFLAG_RD, &ofld_rxq->rx_iscsi_fl_pdus, 0,
4165 "# of PDUs with data delivered in freelist");
4170 * Returns a reasonable automatic cidx flush threshold for a given queue size.
4173 qsize_to_fthresh(int qsize)
4177 while (!powerof2(qsize))
4179 fthresh = ilog2(qsize);
4180 if (fthresh > X_CIDXFLUSHTHRESH_128)
4181 fthresh = X_CIDXFLUSHTHRESH_128;
4187 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
4190 struct fw_eq_ctrl_cmd c;
4191 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
4193 bzero(&c, sizeof(c));
4195 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
4196 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
4197 V_FW_EQ_CTRL_CMD_VFN(0));
4198 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
4199 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
4200 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
4201 c.physeqid_pkd = htobe32(0);
4202 c.fetchszm_to_iqid =
4203 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
4204 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
4205 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
4207 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
4208 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
4209 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
4210 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) |
4211 V_FW_EQ_CTRL_CMD_EQSIZE(qsize));
4212 c.eqaddr = htobe64(eq->ba);
4214 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
4216 CH_ERR(sc, "failed to create hw ctrlq for tx_chan %d: %d\n",
4221 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
4222 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
4223 if (cntxt_id >= sc->sge.eqmap_sz)
4224 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
4225 cntxt_id, sc->sge.eqmap_sz - 1);
4226 sc->sge.eqmap[cntxt_id] = eq;
4232 eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
4235 struct fw_eq_eth_cmd c;
4236 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
4238 bzero(&c, sizeof(c));
4240 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
4241 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
4242 V_FW_EQ_ETH_CMD_VFN(0));
4243 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
4244 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
4245 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
4246 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
4247 c.fetchszm_to_iqid =
4248 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
4249 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
4250 V_FW_EQ_ETH_CMD_IQID(eq->iqid));
4252 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
4253 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
4254 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
4255 V_FW_EQ_ETH_CMD_EQSIZE(qsize));
4256 c.eqaddr = htobe64(eq->ba);
4258 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
4260 device_printf(vi->dev,
4261 "failed to create Ethernet egress queue: %d\n", rc);
4265 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
4266 eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
4267 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
4268 if (cntxt_id >= sc->sge.eqmap_sz)
4269 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
4270 cntxt_id, sc->sge.eqmap_sz - 1);
4271 sc->sge.eqmap[cntxt_id] = eq;
4276 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4278 ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
4281 struct fw_eq_ofld_cmd c;
4282 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
4284 bzero(&c, sizeof(c));
4286 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
4287 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
4288 V_FW_EQ_OFLD_CMD_VFN(0));
4289 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
4290 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
4291 c.fetchszm_to_iqid =
4292 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
4293 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
4294 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
4296 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
4297 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) |
4298 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
4299 V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) |
4300 V_FW_EQ_OFLD_CMD_EQSIZE(qsize));
4301 c.eqaddr = htobe64(eq->ba);
4303 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
4305 device_printf(vi->dev,
4306 "failed to create egress queue for TCP offload: %d\n", rc);
4310 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd));
4311 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
4312 if (cntxt_id >= sc->sge.eqmap_sz)
4313 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
4314 cntxt_id, sc->sge.eqmap_sz - 1);
4315 sc->sge.eqmap[cntxt_id] = eq;
4323 alloc_eq(struct adapter *sc, struct sge_eq *eq, struct sysctl_ctx_list *ctx,
4324 struct sysctl_oid *oid)
4329 MPASS(!(eq->flags & EQ_SW_ALLOCATED));
4331 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
4332 len = qsize * EQ_ESIZE;
4333 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, &eq->ba,
4334 (void **)&eq->desc);
4337 if (ctx != NULL && oid != NULL)
4338 add_eq_sysctls(sc, ctx, oid, eq);
4339 eq->flags |= EQ_SW_ALLOCATED;
4346 free_eq(struct adapter *sc, struct sge_eq *eq)
4348 MPASS(eq->flags & EQ_SW_ALLOCATED);
4349 if (eq->type == EQ_ETH)
4350 MPASS(eq->pidx == eq->cidx);
4352 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc);
4353 mtx_destroy(&eq->eq_lock);
4354 bzero(eq, sizeof(*eq));
4358 add_eq_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
4359 struct sysctl_oid *oid, struct sge_eq *eq)
4361 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
4363 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &eq->ba,
4364 "bus address of descriptor ring");
4365 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
4366 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len,
4367 "desc ring size in bytes");
4368 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
4369 &eq->abs_id, 0, "absolute id of the queue");
4370 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
4371 &eq->cntxt_id, 0, "SGE context id of the queue");
4372 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &eq->cidx,
4373 0, "consumer index");
4374 SYSCTL_ADD_U16(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &eq->pidx,
4375 0, "producer index");
4376 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
4377 eq->sidx, "status page index");
4381 alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
4385 MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4387 eq->iqid = eq->iq->cntxt_id;
4388 eq->pidx = eq->cidx = eq->dbidx = 0;
4389 /* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */
4391 eq->doorbells = sc->doorbells;
4392 bzero(eq->desc, eq->sidx * EQ_ESIZE + sc->params.sge.spg_len);
4396 rc = ctrl_eq_alloc(sc, eq);
4400 rc = eth_eq_alloc(sc, vi, eq);
4403 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4405 rc = ofld_eq_alloc(sc, vi, eq);
4410 panic("%s: invalid eq type %d.", __func__, eq->type);
4413 CH_ERR(sc, "failed to allocate egress queue(%d): %d\n",
4418 if (isset(&eq->doorbells, DOORBELL_UDB) ||
4419 isset(&eq->doorbells, DOORBELL_UDBWC) ||
4420 isset(&eq->doorbells, DOORBELL_WCWR)) {
4421 uint32_t s_qpp = sc->params.sge.eq_s_qpp;
4422 uint32_t mask = (1 << s_qpp) - 1;
4423 volatile uint8_t *udb;
4425 udb = sc->udbs_base + UDBS_DB_OFFSET;
4426 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */
4427 eq->udb_qid = eq->cntxt_id & mask; /* id in page */
4428 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
4429 clrbit(&eq->doorbells, DOORBELL_WCWR);
4431 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */
4434 eq->udb = (volatile void *)udb;
4437 eq->flags |= EQ_HW_ALLOCATED;
4442 free_eq_hwq(struct adapter *sc, struct vi_info *vi __unused, struct sge_eq *eq)
4446 MPASS(eq->flags & EQ_HW_ALLOCATED);
4450 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
4453 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
4455 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4457 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id);
4461 panic("%s: invalid eq type %d.", __func__, eq->type);
4464 CH_ERR(sc, "failed to free eq (type %d): %d\n", eq->type, rc);
4467 eq->flags &= ~EQ_HW_ALLOCATED;
4473 alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq,
4474 struct sysctl_ctx_list *ctx, struct sysctl_oid *oid)
4476 struct sge_eq *eq = &wrq->eq;
4479 MPASS(!(eq->flags & EQ_SW_ALLOCATED));
4481 rc = alloc_eq(sc, eq, ctx, oid);
4484 MPASS(eq->flags & EQ_SW_ALLOCATED);
4485 /* Can't fail after this. */
4488 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq);
4489 TAILQ_INIT(&wrq->incomplete_wrs);
4490 STAILQ_INIT(&wrq->wr_list);
4491 wrq->nwr_pending = 0;
4492 wrq->ndesc_needed = 0;
4493 add_wrq_sysctls(ctx, oid, wrq);
4499 free_wrq(struct adapter *sc, struct sge_wrq *wrq)
4501 free_eq(sc, &wrq->eq);
4502 MPASS(wrq->nwr_pending == 0);
4503 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
4504 MPASS(STAILQ_EMPTY(&wrq->wr_list));
4505 bzero(wrq, sizeof(*wrq));
4509 add_wrq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
4510 struct sge_wrq *wrq)
4512 struct sysctl_oid_list *children;
4514 if (ctx == NULL || oid == NULL)
4517 children = SYSCTL_CHILDREN(oid);
4518 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD,
4519 &wrq->tx_wrs_direct, "# of work requests (direct)");
4520 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD,
4521 &wrq->tx_wrs_copied, "# of work requests (copied)");
4522 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD,
4523 &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)");
4530 alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx)
4533 struct port_info *pi = vi->pi;
4534 struct adapter *sc = vi->adapter;
4535 struct sge_eq *eq = &txq->eq;
4538 struct sysctl_oid *oid;
4540 if (!(eq->flags & EQ_SW_ALLOCATED)) {
4541 MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4543 snprintf(name, sizeof(name), "%d", idx);
4544 oid = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(vi->txq_oid),
4545 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
4548 iqidx = vi->first_rxq + (idx % vi->nrxq);
4549 snprintf(name, sizeof(name), "%s txq%d",
4550 device_get_nameunit(vi->dev), idx);
4551 init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan,
4552 &sc->sge.rxq[iqidx].iq, name);
4554 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx,
4555 can_resume_eth_tx, M_CXGBE, &eq->eq_lock, M_WAITOK);
4557 CH_ERR(vi, "failed to allocate mp_ring for txq%d: %d\n",
4560 sysctl_remove_oid(oid, 1, 1);
4564 rc = alloc_eq(sc, eq, &vi->ctx, oid);
4566 CH_ERR(vi, "failed to allocate txq%d: %d\n", idx, rc);
4567 mp_ring_free(txq->r);
4570 MPASS(eq->flags & EQ_SW_ALLOCATED);
4571 /* Can't fail after this point. */
4573 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq);
4575 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
4576 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE,
4579 add_txq_sysctls(vi, &vi->ctx, oid, txq);
4582 if (!(eq->flags & EQ_HW_ALLOCATED)) {
4583 MPASS(eq->flags & EQ_SW_ALLOCATED);
4584 rc = alloc_eq_hwq(sc, vi, eq);
4586 CH_ERR(vi, "failed to create hw txq%d: %d\n", idx, rc);
4589 MPASS(eq->flags & EQ_HW_ALLOCATED);
4590 /* Can't fail after this point. */
4593 sc->sge.eq_base = eq->abs_id - eq->cntxt_id;
4595 KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id,
4596 ("eq_base mismatch"));
4597 KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF,
4598 ("PF with non-zero eq_base"));
4601 MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr);
4602 txq->txp.max_npkt = min(nitems(txp->mb),
4603 sc->params.max_pkts_per_eth_tx_pkts_wr);
4604 if (vi->flags & TX_USES_VM_WR && !(sc->flags & IS_VF))
4605 txq->txp.max_npkt--;
4607 if (vi->flags & TX_USES_VM_WR)
4608 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
4609 V_TXPKT_INTF(pi->tx_chan));
4611 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
4612 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
4613 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
4625 free_txq(struct vi_info *vi, struct sge_txq *txq)
4627 struct adapter *sc = vi->adapter;
4628 struct sge_eq *eq = &txq->eq;
4630 if (eq->flags & EQ_HW_ALLOCATED) {
4631 MPASS(eq->flags & EQ_SW_ALLOCATED);
4632 free_eq_hwq(sc, NULL, eq);
4633 MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4636 if (eq->flags & EQ_SW_ALLOCATED) {
4637 MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4638 sglist_free(txq->gl);
4639 free(txq->sdesc, M_CXGBE);
4640 mp_ring_free(txq->r);
4642 MPASS(!(eq->flags & EQ_SW_ALLOCATED));
4643 bzero(txq, sizeof(*txq));
4648 add_txq_sysctls(struct vi_info *vi, struct sysctl_ctx_list *ctx,
4649 struct sysctl_oid *oid, struct sge_txq *txq)
4652 struct sysctl_oid_list *children;
4654 if (ctx == NULL || oid == NULL)
4658 children = SYSCTL_CHILDREN(oid);
4660 mp_ring_sysctls(txq->r, ctx, children);
4662 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tc",
4663 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, vi, txq - sc->sge.txq,
4664 sysctl_tc, "I", "traffic class (-1 means none)");
4666 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
4667 &txq->txcsum, "# of times hardware assisted with checksum");
4668 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vlan_insertion", CTLFLAG_RD,
4669 &txq->vlan_insertion, "# of times hardware inserted 802.1Q tag");
4670 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
4671 &txq->tso_wrs, "# of TSO work requests");
4672 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
4673 &txq->imm_wrs, "# of work requests with immediate data");
4674 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
4675 &txq->sgl_wrs, "# of work requests with direct SGL");
4676 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
4677 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
4678 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts0_wrs", CTLFLAG_RD,
4679 &txq->txpkts0_wrs, "# of txpkts (type 0) work requests");
4680 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts1_wrs", CTLFLAG_RD,
4681 &txq->txpkts1_wrs, "# of txpkts (type 1) work requests");
4682 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts0_pkts", CTLFLAG_RD,
4684 "# of frames tx'd using type0 txpkts work requests");
4685 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts1_pkts", CTLFLAG_RD,
4687 "# of frames tx'd using type1 txpkts work requests");
4688 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "txpkts_flush", CTLFLAG_RD,
4690 "# of times txpkts had to be flushed out by an egress-update");
4691 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD,
4692 &txq->raw_wrs, "# of raw work requests (non-packets)");
4693 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vxlan_tso_wrs", CTLFLAG_RD,
4694 &txq->vxlan_tso_wrs, "# of VXLAN TSO work requests");
4695 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "vxlan_txcsum", CTLFLAG_RD,
4697 "# of times hardware assisted with inner checksums (VXLAN)");
4701 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_records",
4702 CTLFLAG_RD, &txq->kern_tls_records,
4703 "# of NIC TLS records transmitted");
4704 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_short",
4705 CTLFLAG_RD, &txq->kern_tls_short,
4706 "# of short NIC TLS records transmitted");
4707 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_partial",
4708 CTLFLAG_RD, &txq->kern_tls_partial,
4709 "# of partial NIC TLS records transmitted");
4710 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_full",
4711 CTLFLAG_RD, &txq->kern_tls_full,
4712 "# of full NIC TLS records transmitted");
4713 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_octets",
4714 CTLFLAG_RD, &txq->kern_tls_octets,
4715 "# of payload octets in transmitted NIC TLS records");
4716 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_waste",
4717 CTLFLAG_RD, &txq->kern_tls_waste,
4718 "# of octets DMAd but not transmitted in NIC TLS records");
4719 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_options",
4720 CTLFLAG_RD, &txq->kern_tls_options,
4721 "# of NIC TLS options-only packets transmitted");
4722 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_header",
4723 CTLFLAG_RD, &txq->kern_tls_header,
4724 "# of NIC TLS header-only packets transmitted");
4725 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin",
4726 CTLFLAG_RD, &txq->kern_tls_fin,
4727 "# of NIC TLS FIN-only packets transmitted");
4728 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin_short",
4729 CTLFLAG_RD, &txq->kern_tls_fin_short,
4730 "# of NIC TLS padded FIN packets on short TLS records");
4731 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_cbc",
4732 CTLFLAG_RD, &txq->kern_tls_cbc,
4733 "# of NIC TLS sessions using AES-CBC");
4734 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_gcm",
4735 CTLFLAG_RD, &txq->kern_tls_gcm,
4736 "# of NIC TLS sessions using AES-GCM");
4741 #if defined(TCP_OFFLOAD) || defined(RATELIMIT)
4746 alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx)
4748 struct sysctl_oid *oid;
4749 struct port_info *pi = vi->pi;
4750 struct adapter *sc = vi->adapter;
4751 struct sge_eq *eq = &ofld_txq->wrq.eq;
4756 MPASS(idx < vi->nofldtxq);
4758 if (!(eq->flags & EQ_SW_ALLOCATED)) {
4759 snprintf(name, sizeof(name), "%d", idx);
4760 oid = SYSCTL_ADD_NODE(&vi->ctx,
4761 SYSCTL_CHILDREN(vi->ofld_txq_oid), OID_AUTO, name,
4762 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "offload tx queue");
4764 snprintf(name, sizeof(name), "%s ofld_txq%d",
4765 device_get_nameunit(vi->dev), idx);
4766 if (vi->nofldrxq > 0) {
4767 iqidx = vi->first_ofld_rxq + (idx % vi->nofldrxq);
4768 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
4769 &sc->sge.ofld_rxq[iqidx].iq, name);
4771 iqidx = vi->first_rxq + (idx % vi->nrxq);
4772 init_eq(sc, eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
4773 &sc->sge.rxq[iqidx].iq, name);
4776 rc = alloc_wrq(sc, vi, &ofld_txq->wrq, &vi->ctx, oid);
4778 CH_ERR(vi, "failed to allocate ofld_txq%d: %d\n", idx,
4780 sysctl_remove_oid(oid, 1, 1);
4783 MPASS(eq->flags & EQ_SW_ALLOCATED);
4784 /* Can't fail after this point. */
4786 ofld_txq->tx_iscsi_pdus = counter_u64_alloc(M_WAITOK);
4787 ofld_txq->tx_iscsi_octets = counter_u64_alloc(M_WAITOK);
4788 ofld_txq->tx_toe_tls_records = counter_u64_alloc(M_WAITOK);
4789 ofld_txq->tx_toe_tls_octets = counter_u64_alloc(M_WAITOK);
4790 add_ofld_txq_sysctls(&vi->ctx, oid, ofld_txq);
4793 if (!(eq->flags & EQ_HW_ALLOCATED)) {
4794 rc = alloc_eq_hwq(sc, vi, eq);
4796 CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx,
4800 MPASS(eq->flags & EQ_HW_ALLOCATED);
4810 free_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq)
4812 struct adapter *sc = vi->adapter;
4813 struct sge_eq *eq = &ofld_txq->wrq.eq;
4815 if (eq->flags & EQ_HW_ALLOCATED) {
4816 MPASS(eq->flags & EQ_SW_ALLOCATED);
4817 free_eq_hwq(sc, NULL, eq);
4818 MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4821 if (eq->flags & EQ_SW_ALLOCATED) {
4822 MPASS(!(eq->flags & EQ_HW_ALLOCATED));
4823 counter_u64_free(ofld_txq->tx_iscsi_pdus);
4824 counter_u64_free(ofld_txq->tx_iscsi_octets);
4825 counter_u64_free(ofld_txq->tx_toe_tls_records);
4826 counter_u64_free(ofld_txq->tx_toe_tls_octets);
4827 free_wrq(sc, &ofld_txq->wrq);
4828 MPASS(!(eq->flags & EQ_SW_ALLOCATED));
4829 bzero(ofld_txq, sizeof(*ofld_txq));
4834 add_ofld_txq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid,
4835 struct sge_ofld_txq *ofld_txq)
4837 struct sysctl_oid_list *children;
4839 if (ctx == NULL || oid == NULL)
4842 children = SYSCTL_CHILDREN(oid);
4843 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_pdus",
4844 CTLFLAG_RD, &ofld_txq->tx_iscsi_pdus,
4845 "# of iSCSI PDUs transmitted");
4846 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_iscsi_octets",
4847 CTLFLAG_RD, &ofld_txq->tx_iscsi_octets,
4848 "# of payload octets in transmitted iSCSI PDUs");
4849 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_toe_tls_records",
4850 CTLFLAG_RD, &ofld_txq->tx_toe_tls_records,
4851 "# of TOE TLS records transmitted");
4852 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "tx_toe_tls_octets",
4853 CTLFLAG_RD, &ofld_txq->tx_toe_tls_octets,
4854 "# of payload octets in transmitted TOE TLS records");
4859 oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
4861 bus_addr_t *ba = arg;
4864 ("%s meant for single segment mappings only.", __func__));
4866 *ba = error ? 0 : segs->ds_addr;
4870 ring_fl_db(struct adapter *sc, struct sge_fl *fl)
4874 n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx);
4878 v = fl->dbval | V_PIDX(n);
4880 *fl->udb = htole32(v);
4882 t4_write_reg(sc, sc->sge_kdoorbell_reg, v);
4883 IDXINCR(fl->dbidx, n, fl->sidx);
4887 * Fills up the freelist by allocating up to 'n' buffers. Buffers that are
4888 * recycled do not count towards this allocation budget.
4890 * Returns non-zero to indicate that this freelist should be added to the list
4891 * of starving freelists.
4894 refill_fl(struct adapter *sc, struct sge_fl *fl, int n)
4897 struct fl_sdesc *sd;
4900 struct rx_buf_info *rxb;
4901 struct cluster_metadata *clm;
4902 uint16_t max_pidx, zidx = fl->zidx;
4903 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */
4905 FL_LOCK_ASSERT_OWNED(fl);
4908 * We always stop at the beginning of the hardware descriptor that's just
4909 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx,
4910 * which would mean an empty freelist to the chip.
4912 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1;
4913 if (fl->pidx == max_pidx * 8)
4916 d = &fl->desc[fl->pidx];
4917 sd = &fl->sdesc[fl->pidx];
4918 rxb = &sc->sge.rx_buf_info[zidx];
4922 if (sd->cl != NULL) {
4924 if (sd->nmbuf == 0) {
4926 * Fast recycle without involving any atomics on
4927 * the cluster's metadata (if the cluster has
4928 * metadata). This happens when all frames
4929 * received in the cluster were small enough to
4930 * fit within a single mbuf each.
4932 fl->cl_fast_recycled++;
4937 * Cluster is guaranteed to have metadata. Clusters
4938 * without metadata always take the fast recycle path
4939 * when they're recycled.
4941 clm = cl_metadata(sd);
4944 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
4946 counter_u64_add(extfree_rels, 1);
4949 sd->cl = NULL; /* gave up my reference */
4951 MPASS(sd->cl == NULL);
4952 cl = uma_zalloc(rxb->zone, M_NOWAIT);
4953 if (__predict_false(cl == NULL)) {
4954 if (zidx != fl->safe_zidx) {
4955 zidx = fl->safe_zidx;
4956 rxb = &sc->sge.rx_buf_info[zidx];
4957 cl = uma_zalloc(rxb->zone, M_NOWAIT);
4965 pa = pmap_kextract((vm_offset_t)cl);
4969 if (fl->flags & FL_BUF_PACKING) {
4970 *d = htobe64(pa | rxb->hwidx2);
4971 sd->moff = rxb->size2;
4973 *d = htobe64(pa | rxb->hwidx1);
4980 if (__predict_false((++fl->pidx & 7) == 0)) {
4981 uint16_t pidx = fl->pidx >> 3;
4983 if (__predict_false(pidx == fl->sidx)) {
4989 if (n < 8 || pidx == max_pidx)
4992 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4)
4997 if ((fl->pidx >> 3) != fl->dbidx)
5000 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
5004 * Attempt to refill all starving freelists.
5007 refill_sfl(void *arg)
5009 struct adapter *sc = arg;
5010 struct sge_fl *fl, *fl_temp;
5012 mtx_assert(&sc->sfl_lock, MA_OWNED);
5013 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
5015 refill_fl(sc, fl, 64);
5016 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
5017 TAILQ_REMOVE(&sc->sfl, fl, link);
5018 fl->flags &= ~FL_STARVING;
5023 if (!TAILQ_EMPTY(&sc->sfl))
5024 callout_schedule(&sc->sfl_callout, hz / 5);
5028 * Release the driver's reference on all buffers in the given freelist. Buffers
5029 * with kernel references cannot be freed and will prevent the driver from being
5033 free_fl_buffers(struct adapter *sc, struct sge_fl *fl)
5035 struct fl_sdesc *sd;
5036 struct cluster_metadata *clm;
5040 for (i = 0; i < fl->sidx * 8; i++, sd++) {
5045 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl);
5046 else if (fl->flags & FL_BUF_PACKING) {
5047 clm = cl_metadata(sd);
5048 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
5049 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone,
5051 counter_u64_add(extfree_rels, 1);
5057 if (fl->flags & FL_BUF_RESUME) {
5059 fl->flags &= ~FL_BUF_RESUME;
5064 get_pkt_gl(struct mbuf *m, struct sglist *gl)
5071 rc = sglist_append_mbuf(gl, m);
5072 if (__predict_false(rc != 0)) {
5073 panic("%s: mbuf %p (%d segs) was vetted earlier but now fails "
5074 "with %d.", __func__, m, mbuf_nsegs(m), rc);
5077 KASSERT(gl->sg_nseg == mbuf_nsegs(m),
5078 ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m,
5079 mbuf_nsegs(m), gl->sg_nseg));
5080 #if 0 /* vm_wr not readily available here. */
5081 KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= max_nsegs_allowed(m, vm_wr),
5082 ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__,
5083 gl->sg_nseg, max_nsegs_allowed(m, vm_wr)));
5088 * len16 for a txpkt WR with a GL. Includes the firmware work request header.
5091 txpkt_len16(u_int nsegs, const u_int extra)
5097 nsegs--; /* first segment is part of ulptx_sgl */
5098 n = extra + sizeof(struct fw_eth_tx_pkt_wr) +
5099 sizeof(struct cpl_tx_pkt_core) +
5100 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
5102 return (howmany(n, 16));
5106 * len16 for a txpkt_vm WR with a GL. Includes the firmware work
5110 txpkt_vm_len16(u_int nsegs, const u_int extra)
5116 nsegs--; /* first segment is part of ulptx_sgl */
5117 n = extra + sizeof(struct fw_eth_tx_pkt_vm_wr) +
5118 sizeof(struct cpl_tx_pkt_core) +
5119 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
5121 return (howmany(n, 16));
5125 calculate_mbuf_len16(struct mbuf *m, bool vm_wr)
5127 const int lso = sizeof(struct cpl_tx_pkt_lso_core);
5128 const int tnl_lso = sizeof(struct cpl_tx_tnl_lso);
5132 set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), lso));
5134 set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), 0));
5139 if (needs_vxlan_tso(m))
5140 set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), tnl_lso));
5142 set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), lso));
5144 set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), 0));
5148 * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work
5152 txpkts0_len16(u_int nsegs)
5158 nsegs--; /* first segment is part of ulptx_sgl */
5159 n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) +
5160 sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) +
5161 8 * ((3 * nsegs) / 2 + (nsegs & 1));
5163 return (howmany(n, 16));
5167 * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work
5175 n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl);
5177 return (howmany(n, 16));
5181 imm_payload(u_int ndesc)
5185 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) -
5186 sizeof(struct cpl_tx_pkt_core);
5191 static inline uint64_t
5192 csum_to_ctrl(struct adapter *sc, struct mbuf *m)
5195 int csum_type, l2hlen, l3hlen;
5197 static const int csum_types[3][2] = {
5198 {TX_CSUM_TCPIP, TX_CSUM_TCPIP6},
5199 {TX_CSUM_UDPIP, TX_CSUM_UDPIP6},
5205 if (!needs_hwcsum(m))
5206 return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
5208 MPASS(m->m_pkthdr.l2hlen >= ETHER_HDR_LEN);
5209 MPASS(m->m_pkthdr.l3hlen >= sizeof(struct ip));
5211 if (needs_vxlan_csum(m)) {
5212 MPASS(m->m_pkthdr.l4hlen > 0);
5213 MPASS(m->m_pkthdr.l5hlen > 0);
5214 MPASS(m->m_pkthdr.inner_l2hlen >= ETHER_HDR_LEN);
5215 MPASS(m->m_pkthdr.inner_l3hlen >= sizeof(struct ip));
5217 l2hlen = m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
5218 m->m_pkthdr.l4hlen + m->m_pkthdr.l5hlen +
5219 m->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN;
5220 l3hlen = m->m_pkthdr.inner_l3hlen;
5222 l2hlen = m->m_pkthdr.l2hlen - ETHER_HDR_LEN;
5223 l3hlen = m->m_pkthdr.l3hlen;
5227 if (!needs_l3_csum(m))
5228 ctrl |= F_TXPKT_IPCSUM_DIS;
5230 if (m->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_INNER_IP_TCP |
5231 CSUM_IP6_TCP | CSUM_INNER_IP6_TCP))
5233 else if (m->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_INNER_IP_UDP |
5234 CSUM_IP6_UDP | CSUM_INNER_IP6_UDP))
5239 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP |
5240 CSUM_INNER_IP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_UDP))
5243 MPASS(m->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP |
5244 CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_UDP));
5248 * needs_hwcsum returned true earlier so there must be some kind of
5249 * checksum to calculate.
5251 csum_type = csum_types[x][y];
5252 MPASS(csum_type != 0);
5253 if (csum_type == TX_CSUM_IP)
5254 ctrl |= F_TXPKT_L4CSUM_DIS;
5255 ctrl |= V_TXPKT_CSUM_TYPE(csum_type) | V_TXPKT_IPHDR_LEN(l3hlen);
5256 if (chip_id(sc) <= CHELSIO_T5)
5257 ctrl |= V_TXPKT_ETHHDR_LEN(l2hlen);
5259 ctrl |= V_T6_TXPKT_ETHHDR_LEN(l2hlen);
5264 static inline void *
5265 write_lso_cpl(void *cpl, struct mbuf *m0)
5267 struct cpl_tx_pkt_lso_core *lso;
5270 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
5271 m0->m_pkthdr.l4hlen > 0,
5272 ("%s: mbuf %p needs TSO but missing header lengths",
5275 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
5276 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
5277 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) |
5278 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
5279 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
5280 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
5284 lso->lso_ctrl = htobe32(ctrl);
5285 lso->ipid_ofst = htobe16(0);
5286 lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
5287 lso->seqno_offset = htobe32(0);
5288 lso->len = htobe32(m0->m_pkthdr.len);
5294 write_tnl_lso_cpl(void *cpl, struct mbuf *m0)
5296 struct cpl_tx_tnl_lso *tnl_lso = cpl;
5299 KASSERT(m0->m_pkthdr.inner_l2hlen > 0 &&
5300 m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 &&
5301 m0->m_pkthdr.inner_l5hlen > 0,
5302 ("%s: mbuf %p needs VXLAN_TSO but missing inner header lengths",
5304 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
5305 m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0,
5306 ("%s: mbuf %p needs VXLAN_TSO but missing outer header lengths",
5309 /* Outer headers. */
5310 ctrl = V_CPL_TX_TNL_LSO_OPCODE(CPL_TX_TNL_LSO) |
5311 F_CPL_TX_TNL_LSO_FIRST | F_CPL_TX_TNL_LSO_LAST |
5312 V_CPL_TX_TNL_LSO_ETHHDRLENOUT(
5313 (m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) |
5314 V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) |
5315 F_CPL_TX_TNL_LSO_IPLENSETOUT;
5316 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
5317 ctrl |= F_CPL_TX_TNL_LSO_IPV6OUT;
5319 ctrl |= F_CPL_TX_TNL_LSO_IPHDRCHKOUT |
5320 F_CPL_TX_TNL_LSO_IPIDINCOUT;
5322 tnl_lso->op_to_IpIdSplitOut = htobe32(ctrl);
5323 tnl_lso->IpIdOffsetOut = 0;
5324 tnl_lso->UdpLenSetOut_to_TnlHdrLen =
5325 htobe16(F_CPL_TX_TNL_LSO_UDPCHKCLROUT |
5326 F_CPL_TX_TNL_LSO_UDPLENSETOUT |
5327 V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen +
5328 m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen +
5329 m0->m_pkthdr.l5hlen) |
5330 V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN));
5333 /* Inner headers. */
5334 ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN(
5335 (m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) |
5336 V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) |
5337 V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2);
5338 if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr))
5339 ctrl |= F_CPL_TX_TNL_LSO_IPV6;
5340 tnl_lso->Flow_to_TcpHdrLen = htobe32(ctrl);
5341 tnl_lso->IpIdOffset = 0;
5342 tnl_lso->IpIdSplit_to_Mss =
5343 htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz));
5344 tnl_lso->TCPSeqOffset = 0;
5345 tnl_lso->EthLenOffset_Size =
5346 htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len));
5348 return (tnl_lso + 1);
5351 #define VM_TX_L2HDR_LEN 16 /* ethmacdst to vlantci */
5354 * Write a VM txpkt WR for this packet to the hardware descriptors, update the
5355 * software descriptor, and advance the pidx. It is guaranteed that enough
5356 * descriptors are available.
5358 * The return value is the # of hardware descriptors used.
5361 write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0)
5364 struct fw_eth_tx_pkt_vm_wr *wr;
5365 struct tx_sdesc *txsd;
5366 struct cpl_tx_pkt_core *cpl;
5367 uint32_t ctrl; /* used in many unrelated places */
5369 int len16, ndesc, pktlen, nsegs;
5372 TXQ_LOCK_ASSERT_OWNED(txq);
5375 len16 = mbuf_len16(m0);
5376 nsegs = mbuf_nsegs(m0);
5377 pktlen = m0->m_pkthdr.len;
5378 ctrl = sizeof(struct cpl_tx_pkt_core);
5380 ctrl += sizeof(struct cpl_tx_pkt_lso_core);
5381 ndesc = tx_len16_to_desc(len16);
5383 /* Firmware work request header */
5385 wr = (void *)&eq->desc[eq->pidx];
5386 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
5387 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
5389 ctrl = V_FW_WR_LEN16(len16);
5390 wr->equiq_to_len16 = htobe32(ctrl);
5395 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci.
5396 * vlantci is ignored unless the ethtype is 0x8100, so it's
5397 * simpler to always copy it rather than making it
5398 * conditional. Also, it seems that we do not have to set
5399 * vlantci or fake the ethtype when doing VLAN tag insertion.
5401 m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst);
5403 if (needs_tso(m0)) {
5404 cpl = write_lso_cpl(wr + 1, m0);
5407 cpl = (void *)(wr + 1);
5409 /* Checksum offload */
5410 ctrl1 = csum_to_ctrl(sc, m0);
5411 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS))
5412 txq->txcsum++; /* some hardware assistance provided */
5414 /* VLAN tag insertion */
5415 if (needs_vlan_insertion(m0)) {
5416 ctrl1 |= F_TXPKT_VLAN_VLD |
5417 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
5418 txq->vlan_insertion++;
5422 cpl->ctrl0 = txq->cpl_ctrl0;
5424 cpl->len = htobe16(pktlen);
5425 cpl->ctrl1 = htobe64(ctrl1);
5428 dst = (void *)(cpl + 1);
5431 * A packet using TSO will use up an entire descriptor for the
5432 * firmware work request header, LSO CPL, and TX_PKT_XT CPL.
5433 * If this descriptor is the last descriptor in the ring, wrap
5434 * around to the front of the ring explicitly for the start of
5437 if (dst == (void *)&eq->desc[eq->sidx]) {
5438 dst = (void *)&eq->desc[0];
5439 write_gl_to_txd(txq, m0, &dst, 0);
5441 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
5445 txsd = &txq->sdesc[eq->pidx];
5447 txsd->desc_used = ndesc;
5453 * Write a raw WR to the hardware descriptors, update the software
5454 * descriptor, and advance the pidx. It is guaranteed that enough
5455 * descriptors are available.
5457 * The return value is the # of hardware descriptors used.
5460 write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available)
5462 struct sge_eq *eq = &txq->eq;
5463 struct tx_sdesc *txsd;
5468 len16 = mbuf_len16(m0);
5469 ndesc = tx_len16_to_desc(len16);
5470 MPASS(ndesc <= available);
5473 for (m = m0; m != NULL; m = m->m_next)
5474 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
5478 txsd = &txq->sdesc[eq->pidx];
5480 txsd->desc_used = ndesc;
5486 * Write a txpkt WR for this packet to the hardware descriptors, update the
5487 * software descriptor, and advance the pidx. It is guaranteed that enough
5488 * descriptors are available.
5490 * The return value is the # of hardware descriptors used.
5493 write_txpkt_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0,
5497 struct fw_eth_tx_pkt_wr *wr;
5498 struct tx_sdesc *txsd;
5499 struct cpl_tx_pkt_core *cpl;
5500 uint32_t ctrl; /* used in many unrelated places */
5502 int len16, ndesc, pktlen, nsegs;
5505 TXQ_LOCK_ASSERT_OWNED(txq);
5508 len16 = mbuf_len16(m0);
5509 nsegs = mbuf_nsegs(m0);
5510 pktlen = m0->m_pkthdr.len;
5511 ctrl = sizeof(struct cpl_tx_pkt_core);
5512 if (needs_tso(m0)) {
5513 if (needs_vxlan_tso(m0))
5514 ctrl += sizeof(struct cpl_tx_tnl_lso);
5516 ctrl += sizeof(struct cpl_tx_pkt_lso_core);
5517 } else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) &&
5519 /* Immediate data. Recalculate len16 and set nsegs to 0. */
5521 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) +
5522 sizeof(struct cpl_tx_pkt_core) + pktlen, 16);
5525 ndesc = tx_len16_to_desc(len16);
5526 MPASS(ndesc <= available);
5528 /* Firmware work request header */
5530 wr = (void *)&eq->desc[eq->pidx];
5531 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
5532 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
5534 ctrl = V_FW_WR_LEN16(len16);
5535 wr->equiq_to_len16 = htobe32(ctrl);
5538 if (needs_tso(m0)) {
5539 if (needs_vxlan_tso(m0)) {
5540 cpl = write_tnl_lso_cpl(wr + 1, m0);
5541 txq->vxlan_tso_wrs++;
5543 cpl = write_lso_cpl(wr + 1, m0);
5547 cpl = (void *)(wr + 1);
5549 /* Checksum offload */
5550 ctrl1 = csum_to_ctrl(sc, m0);
5551 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) {
5552 /* some hardware assistance provided */
5553 if (needs_vxlan_csum(m0))
5554 txq->vxlan_txcsum++;
5559 /* VLAN tag insertion */
5560 if (needs_vlan_insertion(m0)) {
5561 ctrl1 |= F_TXPKT_VLAN_VLD |
5562 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
5563 txq->vlan_insertion++;
5567 cpl->ctrl0 = txq->cpl_ctrl0;
5569 cpl->len = htobe16(pktlen);
5570 cpl->ctrl1 = htobe64(ctrl1);
5573 dst = (void *)(cpl + 1);
5574 if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx]))
5575 dst = (caddr_t)&eq->desc[0];
5578 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
5583 for (m = m0; m != NULL; m = m->m_next) {
5584 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
5590 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
5597 txsd = &txq->sdesc[eq->pidx];
5599 txsd->desc_used = ndesc;
5605 cmp_l2hdr(struct txpkts *txp, struct mbuf *m)
5609 MPASS(txp->npkt > 0);
5610 MPASS(m->m_len >= VM_TX_L2HDR_LEN);
5612 if (txp->ethtype == be16toh(ETHERTYPE_VLAN))
5613 len = VM_TX_L2HDR_LEN;
5615 len = sizeof(struct ether_header);
5617 return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0);
5621 save_l2hdr(struct txpkts *txp, struct mbuf *m)
5623 MPASS(m->m_len >= VM_TX_L2HDR_LEN);
5625 memcpy(&txp->ethmacdst[0], mtod(m, const void *), VM_TX_L2HDR_LEN);
5629 add_to_txpkts_vf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m,
5630 int avail, bool *send)
5632 struct txpkts *txp = &txq->txp;
5634 /* Cannot have TSO and coalesce at the same time. */
5635 if (cannot_use_txpkts(m)) {
5637 *send = txp->npkt > 0;
5641 /* VF allows coalescing of type 1 (1 GL) only */
5642 if (mbuf_nsegs(m) > 1)
5643 goto cannot_coalesce;
5646 if (txp->npkt > 0) {
5647 MPASS(tx_len16_to_desc(txp->len16) <= avail);
5648 MPASS(txp->npkt < txp->max_npkt);
5649 MPASS(txp->wr_type == 1); /* VF supports type 1 only */
5651 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) {
5656 if (m->m_pkthdr.len + txp->plen > 65535)
5657 goto retry_after_send;
5658 if (cmp_l2hdr(txp, m))
5659 goto retry_after_send;
5661 txp->len16 += txpkts1_len16();
5662 txp->plen += m->m_pkthdr.len;
5663 txp->mb[txp->npkt++] = m;
5664 if (txp->npkt == txp->max_npkt)
5667 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) +
5669 if (tx_len16_to_desc(txp->len16) > avail)
5670 goto cannot_coalesce;
5673 txp->plen = m->m_pkthdr.len;
5681 add_to_txpkts_pf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m,
5682 int avail, bool *send)
5684 struct txpkts *txp = &txq->txp;
5687 MPASS(!(sc->flags & IS_VF));
5689 /* Cannot have TSO and coalesce at the same time. */
5690 if (cannot_use_txpkts(m)) {
5692 *send = txp->npkt > 0;
5697 nsegs = mbuf_nsegs(m);
5698 if (txp->npkt == 0) {
5699 if (m->m_pkthdr.len > 65535)
5700 goto cannot_coalesce;
5704 howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) +
5705 txpkts0_len16(nsegs);
5709 howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) +
5712 if (tx_len16_to_desc(txp->len16) > avail)
5713 goto cannot_coalesce;
5715 txp->plen = m->m_pkthdr.len;
5718 MPASS(tx_len16_to_desc(txp->len16) <= avail);
5719 MPASS(txp->npkt < txp->max_npkt);
5721 if (m->m_pkthdr.len + txp->plen > 65535) {
5727 MPASS(txp->wr_type == 0 || txp->wr_type == 1);
5728 if (txp->wr_type == 0) {
5729 if (tx_len16_to_desc(txp->len16 +
5730 txpkts0_len16(nsegs)) > min(avail, SGE_MAX_WR_NDESC))
5731 goto retry_after_send;
5732 txp->len16 += txpkts0_len16(nsegs);
5735 goto retry_after_send;
5736 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) >
5738 goto retry_after_send;
5739 txp->len16 += txpkts1_len16();
5742 txp->plen += m->m_pkthdr.len;
5743 txp->mb[txp->npkt++] = m;
5744 if (txp->npkt == txp->max_npkt)
5751 * Write a txpkts WR for the packets in txp to the hardware descriptors, update
5752 * the software descriptor, and advance the pidx. It is guaranteed that enough
5753 * descriptors are available.
5755 * The return value is the # of hardware descriptors used.
5758 write_txpkts_wr(struct adapter *sc, struct sge_txq *txq)
5760 const struct txpkts *txp = &txq->txp;
5761 struct sge_eq *eq = &txq->eq;
5762 struct fw_eth_tx_pkts_wr *wr;
5763 struct tx_sdesc *txsd;
5764 struct cpl_tx_pkt_core *cpl;
5766 int ndesc, i, checkwrap;
5767 struct mbuf *m, *last;
5770 TXQ_LOCK_ASSERT_OWNED(txq);
5771 MPASS(txp->npkt > 0);
5772 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16));
5774 wr = (void *)&eq->desc[eq->pidx];
5775 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
5776 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16));
5777 wr->plen = htobe16(txp->plen);
5778 wr->npkt = txp->npkt;
5780 wr->type = txp->wr_type;
5784 * At this point we are 16B into a hardware descriptor. If checkwrap is
5785 * set then we know the WR is going to wrap around somewhere. We'll
5786 * check for that at appropriate points.
5788 ndesc = tx_len16_to_desc(txp->len16);
5790 checkwrap = eq->sidx - ndesc < eq->pidx;
5791 for (i = 0; i < txp->npkt; i++) {
5793 if (txp->wr_type == 0) {
5794 struct ulp_txpkt *ulpmc;
5795 struct ulptx_idata *ulpsc;
5797 /* ULP master command */
5799 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
5800 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid));
5801 ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m)));
5803 /* ULP subcommand */
5804 ulpsc = (void *)(ulpmc + 1);
5805 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
5807 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
5809 cpl = (void *)(ulpsc + 1);
5811 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx])
5812 cpl = (void *)&eq->desc[0];
5817 /* Checksum offload */
5818 ctrl1 = csum_to_ctrl(sc, m);
5819 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) {
5820 /* some hardware assistance provided */
5821 if (needs_vxlan_csum(m))
5822 txq->vxlan_txcsum++;
5827 /* VLAN tag insertion */
5828 if (needs_vlan_insertion(m)) {
5829 ctrl1 |= F_TXPKT_VLAN_VLD |
5830 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
5831 txq->vlan_insertion++;
5835 cpl->ctrl0 = txq->cpl_ctrl0;
5837 cpl->len = htobe16(m->m_pkthdr.len);
5838 cpl->ctrl1 = htobe64(ctrl1);
5842 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx])
5843 flitp = (void *)&eq->desc[0];
5845 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap);
5848 last->m_nextpkt = m;
5853 if (txp->wr_type == 0) {
5854 txq->txpkts0_pkts += txp->npkt;
5857 txq->txpkts1_pkts += txp->npkt;
5861 txsd = &txq->sdesc[eq->pidx];
5862 txsd->m = txp->mb[0];
5863 txsd->desc_used = ndesc;
5869 write_txpkts_vm_wr(struct adapter *sc, struct sge_txq *txq)
5871 const struct txpkts *txp = &txq->txp;
5872 struct sge_eq *eq = &txq->eq;
5873 struct fw_eth_tx_pkts_vm_wr *wr;
5874 struct tx_sdesc *txsd;
5875 struct cpl_tx_pkt_core *cpl;
5878 struct mbuf *m, *last;
5881 TXQ_LOCK_ASSERT_OWNED(txq);
5882 MPASS(txp->npkt > 0);
5883 MPASS(txp->wr_type == 1); /* VF supports type 1 only */
5884 MPASS(txp->mb[0] != NULL);
5885 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16));
5887 wr = (void *)&eq->desc[eq->pidx];
5888 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
5889 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16));
5891 wr->plen = htobe16(txp->plen);
5892 wr->npkt = txp->npkt;
5894 memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16);
5898 * At this point we are 32B into a hardware descriptor. Each mbuf in
5899 * the WR will take 32B so we check for the end of the descriptor ring
5900 * before writing odd mbufs (mb[1], 3, 5, ..)
5902 ndesc = tx_len16_to_desc(txp->len16);
5904 for (i = 0; i < txp->npkt; i++) {
5906 if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx])
5907 flitp = &eq->desc[0];
5910 /* Checksum offload */
5911 ctrl1 = csum_to_ctrl(sc, m);
5912 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS))
5913 txq->txcsum++; /* some hardware assistance provided */
5915 /* VLAN tag insertion */
5916 if (needs_vlan_insertion(m)) {
5917 ctrl1 |= F_TXPKT_VLAN_VLD |
5918 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
5919 txq->vlan_insertion++;
5923 cpl->ctrl0 = txq->cpl_ctrl0;
5925 cpl->len = htobe16(m->m_pkthdr.len);
5926 cpl->ctrl1 = htobe64(ctrl1);
5929 MPASS(mbuf_nsegs(m) == 1);
5930 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), 0);
5933 last->m_nextpkt = m;
5938 txq->txpkts1_pkts += txp->npkt;
5941 txsd = &txq->sdesc[eq->pidx];
5942 txsd->m = txp->mb[0];
5943 txsd->desc_used = ndesc;
5949 * If the SGL ends on an address that is not 16 byte aligned, this function will
5950 * add a 0 filled flit at the end.
5953 write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap)
5955 struct sge_eq *eq = &txq->eq;
5956 struct sglist *gl = txq->gl;
5957 struct sglist_seg *seg;
5958 __be64 *flitp, *wrap;
5959 struct ulptx_sgl *usgl;
5960 int i, nflits, nsegs;
5962 KASSERT(((uintptr_t)(*to) & 0xf) == 0,
5963 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
5964 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
5965 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
5968 nsegs = gl->sg_nseg;
5971 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
5972 flitp = (__be64 *)(*to);
5973 wrap = (__be64 *)(&eq->desc[eq->sidx]);
5974 seg = &gl->sg_segs[0];
5975 usgl = (void *)flitp;
5978 * We start at a 16 byte boundary somewhere inside the tx descriptor
5979 * ring, so we're at least 16 bytes away from the status page. There is
5980 * no chance of a wrap around in the middle of usgl (which is 16 bytes).
5983 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
5984 V_ULPTX_NSGE(nsegs));
5985 usgl->len0 = htobe32(seg->ss_len);
5986 usgl->addr0 = htobe64(seg->ss_paddr);
5989 if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) {
5991 /* Won't wrap around at all */
5993 for (i = 0; i < nsegs - 1; i++, seg++) {
5994 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
5995 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
5998 usgl->sge[i / 2].len[1] = htobe32(0);
6002 /* Will wrap somewhere in the rest of the SGL */
6004 /* 2 flits already written, write the rest flit by flit */
6005 flitp = (void *)(usgl + 1);
6006 for (i = 0; i < nflits - 2; i++) {
6008 flitp = (void *)eq->desc;
6009 *flitp++ = get_flit(seg, nsegs - 1, i);
6014 MPASS(((uintptr_t)flitp) & 0xf);
6018 MPASS((((uintptr_t)flitp) & 0xf) == 0);
6019 if (__predict_false(flitp == wrap))
6020 *to = (void *)eq->desc;
6022 *to = (void *)flitp;
6026 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
6029 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
6030 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
6032 if (__predict_true((uintptr_t)(*to) + len <=
6033 (uintptr_t)&eq->desc[eq->sidx])) {
6034 bcopy(from, *to, len);
6037 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
6039 bcopy(from, *to, portion);
6041 portion = len - portion; /* remaining */
6042 bcopy(from, (void *)eq->desc, portion);
6043 (*to) = (caddr_t)eq->desc + portion;
6048 ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n)
6056 clrbit(&db, DOORBELL_WCWR);
6059 switch (ffs(db) - 1) {
6061 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
6064 case DOORBELL_WCWR: {
6065 volatile uint64_t *dst, *src;
6069 * Queues whose 128B doorbell segment fits in the page do not
6070 * use relative qid (udb_qid is always 0). Only queues with
6071 * doorbell segments can do WCWR.
6073 KASSERT(eq->udb_qid == 0 && n == 1,
6074 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p",
6075 __func__, eq->doorbells, n, eq->dbidx, eq));
6077 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET -
6080 src = (void *)&eq->desc[i];
6081 while (src != (void *)&eq->desc[i + 1])
6087 case DOORBELL_UDBWC:
6088 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
6093 t4_write_reg(sc, sc->sge_kdoorbell_reg,
6094 V_QID(eq->cntxt_id) | V_PIDX(n));
6098 IDXINCR(eq->dbidx, n, eq->sidx);
6102 reclaimable_tx_desc(struct sge_eq *eq)
6106 hw_cidx = read_hw_cidx(eq);
6107 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx));
6111 total_available_tx_desc(struct sge_eq *eq)
6113 uint16_t hw_cidx, pidx;
6115 hw_cidx = read_hw_cidx(eq);
6118 if (pidx == hw_cidx)
6119 return (eq->sidx - 1);
6121 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1);
6124 static inline uint16_t
6125 read_hw_cidx(struct sge_eq *eq)
6127 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
6128 uint16_t cidx = spg->cidx; /* stable snapshot */
6130 return (be16toh(cidx));
6134 * Reclaim 'n' descriptors approximately.
6137 reclaim_tx_descs(struct sge_txq *txq, u_int n)
6139 struct tx_sdesc *txsd;
6140 struct sge_eq *eq = &txq->eq;
6141 u_int can_reclaim, reclaimed;
6143 TXQ_LOCK_ASSERT_OWNED(txq);
6147 can_reclaim = reclaimable_tx_desc(eq);
6148 while (can_reclaim && reclaimed < n) {
6150 struct mbuf *m, *nextpkt;
6152 txsd = &txq->sdesc[eq->cidx];
6153 ndesc = txsd->desc_used;
6155 /* Firmware doesn't return "partial" credits. */
6156 KASSERT(can_reclaim >= ndesc,
6157 ("%s: unexpected number of credits: %d, %d",
6158 __func__, can_reclaim, ndesc));
6160 ("%s: descriptor with no credits: cidx %d",
6161 __func__, eq->cidx));
6163 for (m = txsd->m; m != NULL; m = nextpkt) {
6164 nextpkt = m->m_nextpkt;
6165 m->m_nextpkt = NULL;
6169 can_reclaim -= ndesc;
6170 IDXINCR(eq->cidx, ndesc, eq->sidx);
6177 tx_reclaim(void *arg, int n)
6179 struct sge_txq *txq = arg;
6180 struct sge_eq *eq = &txq->eq;
6183 if (TXQ_TRYLOCK(txq) == 0)
6185 n = reclaim_tx_descs(txq, 32);
6186 if (eq->cidx == eq->pidx)
6187 eq->equeqidx = eq->pidx;
6193 get_flit(struct sglist_seg *segs, int nsegs, int idx)
6195 int i = (idx / 3) * 2;
6201 rc = (uint64_t)segs[i].ss_len << 32;
6203 rc |= (uint64_t)(segs[i + 1].ss_len);
6205 return (htobe64(rc));
6208 return (htobe64(segs[i].ss_paddr));
6210 return (htobe64(segs[i + 1].ss_paddr));
6217 find_refill_source(struct adapter *sc, int maxp, bool packing)
6220 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0];
6223 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
6224 if (rxb->hwidx2 == -1)
6226 if (rxb->size1 < PAGE_SIZE &&
6227 rxb->size1 < largest_rx_cluster)
6229 if (rxb->size1 > largest_rx_cluster)
6231 MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE);
6232 if (rxb->size2 >= maxp)
6237 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
6238 if (rxb->hwidx1 == -1)
6240 if (rxb->size1 > largest_rx_cluster)
6242 if (rxb->size1 >= maxp)
6252 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
6254 mtx_lock(&sc->sfl_lock);
6256 if ((fl->flags & FL_DOOMED) == 0) {
6257 fl->flags |= FL_STARVING;
6258 TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
6259 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc);
6262 mtx_unlock(&sc->sfl_lock);
6266 handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq)
6268 struct sge_wrq *wrq = (void *)eq;
6270 atomic_readandclear_int(&eq->equiq);
6271 taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task);
6275 handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq)
6277 struct sge_txq *txq = (void *)eq;
6279 MPASS(eq->type == EQ_ETH);
6281 atomic_readandclear_int(&eq->equiq);
6282 if (mp_ring_is_idle(txq->r))
6283 taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task);
6285 mp_ring_check_drainage(txq->r, 64);
6289 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
6292 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1);
6293 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
6294 struct adapter *sc = iq->adapter;
6295 struct sge *s = &sc->sge;
6297 static void (*h[])(struct adapter *, struct sge_eq *) = {NULL,
6298 &handle_wrq_egr_update, &handle_eth_egr_update,
6299 &handle_wrq_egr_update};
6301 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
6304 eq = s->eqmap[qid - s->eq_start - s->eq_base];
6305 (*h[eq->type])(sc, eq);
6310 /* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */
6311 CTASSERT(offsetof(struct cpl_fw4_msg, data) == \
6312 offsetof(struct cpl_fw6_msg, data));
6315 handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
6317 struct adapter *sc = iq->adapter;
6318 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
6320 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
6323 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) {
6324 const struct rss_header *rss2;
6326 rss2 = (const struct rss_header *)&cpl->data[0];
6327 return (t4_cpl_handler[rss2->opcode](iq, rss2, m));
6330 return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0]));
6334 * t4_handle_wrerr_rpl - process a FW work request error message
6335 * @adap: the adapter
6336 * @rpl: start of the FW message
6339 t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl)
6341 u8 opcode = *(const u8 *)rpl;
6342 const struct fw_error_cmd *e = (const void *)rpl;
6345 if (opcode != FW_ERROR_CMD) {
6347 "%s: Received WRERR_RPL message with opcode %#x\n",
6348 device_get_nameunit(adap->dev), opcode);
6351 log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev),
6352 G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" :
6354 switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) {
6355 case FW_ERROR_TYPE_EXCEPTION:
6356 log(LOG_ERR, "exception info:\n");
6357 for (i = 0; i < nitems(e->u.exception.info); i++)
6358 log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ",
6359 be32toh(e->u.exception.info[i]));
6362 case FW_ERROR_TYPE_HWMODULE:
6363 log(LOG_ERR, "HW module regaddr %08x regval %08x\n",
6364 be32toh(e->u.hwmodule.regaddr),
6365 be32toh(e->u.hwmodule.regval));
6367 case FW_ERROR_TYPE_WR:
6368 log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n",
6369 be16toh(e->u.wr.cidx),
6370 G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)),
6371 G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)),
6372 be32toh(e->u.wr.eqid));
6373 for (i = 0; i < nitems(e->u.wr.wrhdr); i++)
6374 log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ",
6378 case FW_ERROR_TYPE_ACL:
6379 log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s",
6380 be16toh(e->u.acl.cidx),
6381 G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)),
6382 G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)),
6383 be32toh(e->u.acl.eqid),
6384 G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" :
6386 for (i = 0; i < nitems(e->u.acl.val); i++)
6387 log(LOG_ERR, " %02x", e->u.acl.val[i]);
6391 log(LOG_ERR, "type %#x\n",
6392 G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type)));
6399 bufidx_used(struct adapter *sc, int idx)
6401 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0];
6404 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) {
6405 if (rxb->size1 > largest_rx_cluster)
6407 if (rxb->hwidx1 == idx || rxb->hwidx2 == idx)
6415 sysctl_bufsizes(SYSCTL_HANDLER_ARGS)
6417 struct adapter *sc = arg1;
6418 struct sge_params *sp = &sc->params.sge;
6423 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
6424 for (i = 0; i < SGE_FLBUF_SIZES; i++) {
6425 if (bufidx_used(sc, i))
6430 sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c);
6434 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
6440 #if defined(INET) || defined(INET6)
6442 * len16 for a txpkt WR with a GL. Includes the firmware work request header.
6445 txpkt_eo_len16(u_int nsegs, u_int immhdrs, u_int tso)
6451 n = roundup2(sizeof(struct fw_eth_tx_eo_wr) +
6452 sizeof(struct cpl_tx_pkt_core) + immhdrs, 16);
6453 if (__predict_false(nsegs == 0))
6456 nsegs--; /* first segment is part of ulptx_sgl */
6457 n += sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
6459 n += sizeof(struct cpl_tx_pkt_lso_core);
6462 return (howmany(n, 16));
6466 #define ETID_FLOWC_NPARAMS 6
6467 #define ETID_FLOWC_LEN (roundup2((sizeof(struct fw_flowc_wr) + \
6468 ETID_FLOWC_NPARAMS * sizeof(struct fw_flowc_mnemval)), 16))
6469 #define ETID_FLOWC_LEN16 (howmany(ETID_FLOWC_LEN, 16))
6472 send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
6475 struct wrq_cookie cookie;
6476 u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN;
6477 struct fw_flowc_wr *flowc;
6479 mtx_assert(&cst->lock, MA_OWNED);
6480 MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) ==
6483 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLOWC_LEN16, &cookie);
6484 if (__predict_false(flowc == NULL))
6487 bzero(flowc, ETID_FLOWC_LEN);
6488 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
6489 V_FW_FLOWC_WR_NPARAMS(ETID_FLOWC_NPARAMS) | V_FW_WR_COMPL(0));
6490 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) |
6491 V_FW_WR_FLOWID(cst->etid));
6492 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
6493 flowc->mnemval[0].val = htobe32(pfvf);
6494 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
6495 flowc->mnemval[1].val = htobe32(pi->tx_chan);
6496 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
6497 flowc->mnemval[2].val = htobe32(pi->tx_chan);
6498 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
6499 flowc->mnemval[3].val = htobe32(cst->iqid);
6500 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE;
6501 flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
6502 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
6503 flowc->mnemval[5].val = htobe32(cst->schedcl);
6505 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie);
6507 cst->flags &= ~EO_FLOWC_PENDING;
6508 cst->flags |= EO_FLOWC_RPL_PENDING;
6509 MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */
6510 cst->tx_credits -= ETID_FLOWC_LEN16;
6515 #define ETID_FLUSH_LEN16 (howmany(sizeof (struct fw_flowc_wr), 16))
6518 send_etid_flush_wr(struct cxgbe_rate_tag *cst)
6520 struct fw_flowc_wr *flowc;
6521 struct wrq_cookie cookie;
6523 mtx_assert(&cst->lock, MA_OWNED);
6525 flowc = start_wrq_wr(&cst->eo_txq->wrq, ETID_FLUSH_LEN16, &cookie);
6526 if (__predict_false(flowc == NULL))
6527 CXGBE_UNIMPLEMENTED(__func__);
6529 bzero(flowc, ETID_FLUSH_LEN16 * 16);
6530 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
6531 V_FW_FLOWC_WR_NPARAMS(0) | F_FW_WR_COMPL);
6532 flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) |
6533 V_FW_WR_FLOWID(cst->etid));
6535 commit_wrq_wr(&cst->eo_txq->wrq, flowc, &cookie);
6537 cst->flags |= EO_FLUSH_RPL_PENDING;
6538 MPASS(cst->tx_credits >= ETID_FLUSH_LEN16);
6539 cst->tx_credits -= ETID_FLUSH_LEN16;
6544 write_ethofld_wr(struct cxgbe_rate_tag *cst, struct fw_eth_tx_eo_wr *wr,
6545 struct mbuf *m0, int compl)
6547 struct cpl_tx_pkt_core *cpl;
6549 uint32_t ctrl; /* used in many unrelated places */
6550 int len16, pktlen, nsegs, immhdrs;
6553 struct ulptx_sgl *usgl;
6555 struct sglist_seg segs[38]; /* XXX: find real limit. XXX: get off the stack */
6557 mtx_assert(&cst->lock, MA_OWNED);
6559 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
6560 m0->m_pkthdr.l4hlen > 0,
6561 ("%s: ethofld mbuf %p is missing header lengths", __func__, m0));
6563 len16 = mbuf_eo_len16(m0);
6564 nsegs = mbuf_eo_nsegs(m0);
6565 pktlen = m0->m_pkthdr.len;
6566 ctrl = sizeof(struct cpl_tx_pkt_core);
6568 ctrl += sizeof(struct cpl_tx_pkt_lso_core);
6569 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen;
6572 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) |
6573 V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl));
6574 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) |
6575 V_FW_WR_FLOWID(cst->etid));
6577 if (needs_outer_udp_csum(m0)) {
6578 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
6579 wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen;
6580 wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen);
6581 wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen;
6582 wr->u.udpseg.rtplen = 0;
6583 wr->u.udpseg.r4 = 0;
6584 wr->u.udpseg.mss = htobe16(pktlen - immhdrs);
6585 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
6586 wr->u.udpseg.plen = htobe32(pktlen - immhdrs);
6587 cpl = (void *)(wr + 1);
6589 MPASS(needs_outer_tcp_csum(m0));
6590 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
6591 wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen;
6592 wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen);
6593 wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen;
6594 wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0);
6595 wr->u.tcpseg.r4 = 0;
6596 wr->u.tcpseg.r5 = 0;
6597 wr->u.tcpseg.plen = htobe32(pktlen - immhdrs);
6599 if (needs_tso(m0)) {
6600 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
6602 wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz);
6604 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
6605 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
6606 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen -
6607 ETHER_HDR_LEN) >> 2) |
6608 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
6609 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
6610 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
6612 lso->lso_ctrl = htobe32(ctrl);
6613 lso->ipid_ofst = htobe16(0);
6614 lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
6615 lso->seqno_offset = htobe32(0);
6616 lso->len = htobe32(pktlen);
6618 cpl = (void *)(lso + 1);
6620 wr->u.tcpseg.mss = htobe16(0xffff);
6621 cpl = (void *)(wr + 1);
6625 /* Checksum offload must be requested for ethofld. */
6626 MPASS(needs_outer_l4_csum(m0));
6627 ctrl1 = csum_to_ctrl(cst->adapter, m0);
6629 /* VLAN tag insertion */
6630 if (needs_vlan_insertion(m0)) {
6631 ctrl1 |= F_TXPKT_VLAN_VLD |
6632 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
6636 cpl->ctrl0 = cst->ctrl0;
6638 cpl->len = htobe16(pktlen);
6639 cpl->ctrl1 = htobe64(ctrl1);
6641 /* Copy Ethernet, IP & TCP/UDP hdrs as immediate data */
6642 p = (uintptr_t)(cpl + 1);
6643 m_copydata(m0, 0, immhdrs, (void *)p);
6646 dst = (void *)(cpl + 1);
6650 /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */
6652 pad = 16 - (immhdrs & 0xf);
6653 bzero((void *)p, pad);
6655 usgl = (void *)(p + pad);
6656 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
6657 V_ULPTX_NSGE(nsegs));
6659 sglist_init(&sg, nitems(segs), segs);
6660 for (; m0 != NULL; m0 = m0->m_next) {
6661 if (__predict_false(m0->m_len == 0))
6663 if (immhdrs >= m0->m_len) {
6664 immhdrs -= m0->m_len;
6667 if (m0->m_flags & M_EXTPG)
6668 sglist_append_mbuf_epg(&sg, m0,
6669 mtod(m0, vm_offset_t), m0->m_len);
6671 sglist_append(&sg, mtod(m0, char *) + immhdrs,
6672 m0->m_len - immhdrs);
6675 MPASS(sg.sg_nseg == nsegs);
6678 * Zero pad last 8B in case the WR doesn't end on a 16B
6681 *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0;
6683 usgl->len0 = htobe32(segs[0].ss_len);
6684 usgl->addr0 = htobe64(segs[0].ss_paddr);
6685 for (i = 0; i < nsegs - 1; i++) {
6686 usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len);
6687 usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr);
6690 usgl->sge[i / 2].len[1] = htobe32(0);
6696 ethofld_tx(struct cxgbe_rate_tag *cst)
6699 struct wrq_cookie cookie;
6700 int next_credits, compl;
6701 struct fw_eth_tx_eo_wr *wr;
6703 mtx_assert(&cst->lock, MA_OWNED);
6705 while ((m = mbufq_first(&cst->pending_tx)) != NULL) {
6708 /* How many len16 credits do we need to send this mbuf. */
6709 next_credits = mbuf_eo_len16(m);
6710 MPASS(next_credits > 0);
6711 if (next_credits > cst->tx_credits) {
6713 * Tx will make progress eventually because there is at
6714 * least one outstanding fw4_ack that will return
6715 * credits and kick the tx.
6717 MPASS(cst->ncompl > 0);
6720 wr = start_wrq_wr(&cst->eo_txq->wrq, next_credits, &cookie);
6721 if (__predict_false(wr == NULL)) {
6722 /* XXX: wishful thinking, not a real assertion. */
6723 MPASS(cst->ncompl > 0);
6726 cst->tx_credits -= next_credits;
6727 cst->tx_nocompl += next_credits;
6728 compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2;
6729 ETHER_BPF_MTAP(cst->com.ifp, m);
6730 write_ethofld_wr(cst, wr, m, compl);
6731 commit_wrq_wr(&cst->eo_txq->wrq, wr, &cookie);
6734 cst->tx_nocompl = 0;
6736 (void) mbufq_dequeue(&cst->pending_tx);
6739 * Drop the mbuf's reference on the tag now rather
6740 * than waiting until m_freem(). This ensures that
6741 * cxgbe_rate_tag_free gets called when the inp drops
6742 * its reference on the tag and there are no more
6743 * mbufs in the pending_tx queue and can flush any
6744 * pending requests. Otherwise if the last mbuf
6745 * doesn't request a completion the etid will never be
6748 m->m_pkthdr.snd_tag = NULL;
6749 m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
6750 m_snd_tag_rele(&cst->com);
6752 mbufq_enqueue(&cst->pending_fwack, m);
6757 ethofld_transmit(struct ifnet *ifp, struct mbuf *m0)
6759 struct cxgbe_rate_tag *cst;
6762 MPASS(m0->m_nextpkt == NULL);
6763 MPASS(m0->m_pkthdr.csum_flags & CSUM_SND_TAG);
6764 MPASS(m0->m_pkthdr.snd_tag != NULL);
6765 cst = mst_to_crt(m0->m_pkthdr.snd_tag);
6767 mtx_lock(&cst->lock);
6768 MPASS(cst->flags & EO_SND_TAG_REF);
6770 if (__predict_false(cst->flags & EO_FLOWC_PENDING)) {
6771 struct vi_info *vi = ifp->if_softc;
6772 struct port_info *pi = vi->pi;
6773 struct adapter *sc = pi->adapter;
6774 const uint32_t rss_mask = vi->rss_size - 1;
6777 cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq];
6778 if (M_HASHTYPE_ISHASH(m0))
6779 rss_hash = m0->m_pkthdr.flowid;
6781 rss_hash = arc4random();
6782 /* We assume RSS hashing */
6783 cst->iqid = vi->rss[rss_hash & rss_mask];
6784 cst->eo_txq += rss_hash % vi->nofldtxq;
6785 rc = send_etid_flowc_wr(cst, pi, vi);
6790 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) {
6795 mbufq_enqueue(&cst->pending_tx, m0);
6796 cst->plen += m0->m_pkthdr.len;
6799 * Hold an extra reference on the tag while generating work
6800 * requests to ensure that we don't try to free the tag during
6801 * ethofld_tx() in case we are sending the final mbuf after
6802 * the inp was freed.
6804 m_snd_tag_ref(&cst->com);
6806 mtx_unlock(&cst->lock);
6807 m_snd_tag_rele(&cst->com);
6811 mtx_unlock(&cst->lock);
6812 if (__predict_false(rc != 0))
6818 ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
6820 struct adapter *sc = iq->adapter;
6821 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1);
6823 u_int etid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl)));
6824 struct cxgbe_rate_tag *cst;
6825 uint8_t credits = cpl->credits;
6827 cst = lookup_etid(sc, etid);
6828 mtx_lock(&cst->lock);
6829 if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) {
6830 MPASS(credits >= ETID_FLOWC_LEN16);
6831 credits -= ETID_FLOWC_LEN16;
6832 cst->flags &= ~EO_FLOWC_RPL_PENDING;
6835 KASSERT(cst->ncompl > 0,
6836 ("%s: etid %u (%p) wasn't expecting completion.",
6837 __func__, etid, cst));
6840 while (credits > 0) {
6841 m = mbufq_dequeue(&cst->pending_fwack);
6842 if (__predict_false(m == NULL)) {
6844 * The remaining credits are for the final flush that
6845 * was issued when the tag was freed by the kernel.
6848 (EO_FLUSH_RPL_PENDING | EO_SND_TAG_REF)) ==
6849 EO_FLUSH_RPL_PENDING);
6850 MPASS(credits == ETID_FLUSH_LEN16);
6851 MPASS(cst->tx_credits + cpl->credits == cst->tx_total);
6852 MPASS(cst->ncompl == 0);
6854 cst->flags &= ~EO_FLUSH_RPL_PENDING;
6855 cst->tx_credits += cpl->credits;
6856 cxgbe_rate_tag_free_locked(cst);
6857 return (0); /* cst is gone. */
6860 ("%s: too many credits (%u, %u)", __func__, cpl->credits,
6862 KASSERT(credits >= mbuf_eo_len16(m),
6863 ("%s: too few credits (%u, %u, %u)", __func__,
6864 cpl->credits, credits, mbuf_eo_len16(m)));
6865 credits -= mbuf_eo_len16(m);
6866 cst->plen -= m->m_pkthdr.len;
6870 cst->tx_credits += cpl->credits;
6871 MPASS(cst->tx_credits <= cst->tx_total);
6873 if (cst->flags & EO_SND_TAG_REF) {
6875 * As with ethofld_transmit(), hold an extra reference
6876 * so that the tag is stable across ethold_tx().
6878 m_snd_tag_ref(&cst->com);
6879 m = mbufq_first(&cst->pending_tx);
6880 if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m))
6882 mtx_unlock(&cst->lock);
6883 m_snd_tag_rele(&cst->com);
6886 * There shouldn't be any pending packets if the tag
6887 * was freed by the kernel since any pending packet
6888 * should hold a reference to the tag.
6890 MPASS(mbufq_first(&cst->pending_tx) == NULL);
6891 mtx_unlock(&cst->lock);