2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #ifndef __T4_ADAPTER_H__
32 #define __T4_ADAPTER_H__
34 #include <sys/kernel.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <dev/pci/pcivar.h>
40 #include <dev/pci/pcireg.h>
41 #include <machine/bus.h>
42 #include <sys/socket.h>
43 #include <sys/sysctl.h>
44 #include <net/ethernet.h>
46 #include <net/if_media.h>
47 #include <netinet/in.h>
48 #include <netinet/tcp_lro.h>
51 #include "firmware/t4fw_interface.h"
53 #define T4_CFGNAME "t4fw_cfg"
54 #define T4_FWNAME "t4fw"
56 MALLOC_DECLARE(M_CXGBE);
57 #define CXGBE_UNIMPLEMENTED(s) \
58 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
60 #if defined(__i386__) || defined(__amd64__)
64 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
70 #ifndef SYSCTL_ADD_UQUAD
71 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
72 #define sysctl_handle_64 sysctl_handle_quad
73 #define CTLTYPE_U64 CTLTYPE_QUAD
76 #if (__FreeBSD_version >= 900030) || \
77 ((__FreeBSD_version >= 802507) && (__FreeBSD_version < 900000))
82 /* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
83 static __inline uint64_t
84 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
87 KASSERT(tag == X86_BUS_SPACE_MEM,
88 ("%s: can only handle mem space", __func__));
90 return (*(volatile uint64_t *)(handle + offset));
94 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
95 bus_size_t offset, uint64_t value)
97 KASSERT(tag == X86_BUS_SPACE_MEM,
98 ("%s: can only handle mem space", __func__));
100 *(volatile uint64_t *)(bsh + offset) = value;
103 static __inline uint64_t
104 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
107 return (uint64_t)bus_space_read_4(tag, handle, offset) +
108 ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32);
112 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
113 bus_size_t offset, uint64_t value)
115 bus_space_write_4(tag, bsh, offset, value);
116 bus_space_write_4(tag, bsh, offset + 4, value >> 32);
121 typedef struct adapter adapter_t;
125 FW_IQ_ESIZE = 64, /* At least 64 mandated by the firmware spec */
128 RX_IQ_ESIZE = 64, /* At least 64 so CPL_RX_PKT will fit */
130 EQ_ESIZE = 64, /* All egress queues use this entry size */
132 RX_FL_ESIZE = EQ_ESIZE, /* 8 64bit addresses */
133 #if MJUMPAGESIZE != MCLBYTES
134 FL_BUF_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */
136 FL_BUF_SIZES = 3, /* cluster, jumbo9k, jumbo16k */
138 OFLD_BUF_SIZE = MJUM16BYTES, /* size of fl buffer for TOE rxq */
144 TX_WR_FLITS = SGE_MAX_WR_LEN / 8
147 #ifdef T4_PKT_TIMESTAMP
148 #define RX_COPY_THRESHOLD (MINCLSIZE - 8)
150 #define RX_COPY_THRESHOLD MINCLSIZE
154 /* adapter intr_type */
155 INTR_INTX = (1 << 0),
161 /* flags understood by begin_synchronized_op */
162 HOLD_LOCK = (1 << 0),
166 /* flags understood by end_synchronized_op */
167 LOCK_HELD = HOLD_LOCK,
172 FULL_INIT_DONE = (1 << 0),
174 INTR_DIRECT = (1 << 2), /* direct interrupts for everything */
175 MASTER_PF = (1 << 3),
176 ADAP_SYSCTL_CTX = (1 << 4),
177 TOM_INIT_DONE = (1 << 5),
179 CXGBE_BUSY = (1 << 9),
183 PORT_INIT_DONE = (1 << 1),
184 PORT_SYSCTL_CTX = (1 << 2),
187 #define IS_DOOMED(pi) ((pi)->flags & DOOMED)
188 #define SET_DOOMED(pi) do {(pi)->flags |= DOOMED;} while (0)
189 #define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY)
190 #define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0)
191 #define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
195 struct adapter *adapter;
198 struct ifmedia media;
206 int16_t xact_addr_filt;/* index of exact MAC address filter */
207 uint16_t rss_size; /* size of VI's RSS table slice */
208 uint8_t lport; /* associated offload logical port */
215 /* These need to be int as they are used in sysctl */
216 int ntxq; /* # of tx queues */
217 int first_txq; /* index of first tx queue */
218 int nrxq; /* # of rx queues */
219 int first_rxq; /* index of first rx queue */
221 int nofldtxq; /* # of offload tx queues */
222 int first_ofld_txq; /* index of first offload tx queue */
223 int nofldrxq; /* # of offload rx queues */
224 int first_ofld_rxq; /* index of first offload rx queue */
231 struct link_config link_cfg;
232 struct port_stats stats;
234 eventhandler_tag vlan_c;
237 struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */
239 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
246 uint8_t tag_idx; /* the sc->fl_tag this map comes from */
261 /* DMA maps used for tx */
264 uint32_t map_total; /* # of DMA maps */
265 uint32_t map_pidx; /* next map to be used */
266 uint32_t map_cidx; /* reclaimed up to this index */
267 uint32_t map_avail; /* # of available maps */
271 uint8_t desc_used; /* # of hardware descriptors used by the WR */
272 uint8_t credits; /* NIC txq: # of frames sent out in the WR */
277 IQ_ALLOCATED = (1 << 0), /* firmware resources allocated */
278 IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */
279 IQ_INTR = (1 << 2), /* iq takes direct interrupt */
280 IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */
289 * Ingress Queue: T4 is producer, driver is consumer.
292 bus_dma_tag_t desc_tag;
293 bus_dmamap_t desc_map;
294 bus_addr_t ba; /* bus address of descriptor ring */
296 uint16_t abs_id; /* absolute SGE id for the iq */
297 int8_t intr_pktc_idx; /* packet count threshold index */
299 __be64 *desc; /* KVA of descriptor ring */
302 struct adapter *adapter;
303 const __be64 *cdesc; /* current descriptor */
304 uint8_t gen; /* generation bit */
305 uint8_t intr_params; /* interrupt holdoff parameters */
306 uint8_t intr_next; /* XXX: holdoff for next interrupt */
307 uint8_t esize; /* size (bytes) of each entry in the queue */
308 uint16_t qsize; /* size (# of entries) of the queue */
309 uint16_t cidx; /* consumer index */
310 uint16_t cntxt_id; /* SGE context id for the iq */
312 STAILQ_ENTRY(sge_iq) link;
323 EQ_TYPEMASK = 7, /* 3 lsbits hold the type */
324 EQ_ALLOCATED = (1 << 3), /* firmware resources allocated */
325 EQ_DOOMED = (1 << 4), /* about to be destroyed */
326 EQ_CRFLUSHED = (1 << 5), /* expecting an update from SGE */
327 EQ_STALLED = (1 << 6), /* out of hw descriptors or dmamaps */
331 * Egress Queue: driver is producer, T4 is consumer.
333 * Note: A free list is an egress queue (driver produces the buffers and T4
334 * consumes them) but it's special enough to have its own struct (see sge_fl).
337 unsigned int flags; /* MUST be first */
338 unsigned int cntxt_id; /* SGE context id for the eq */
339 bus_dma_tag_t desc_tag;
340 bus_dmamap_t desc_map;
344 struct tx_desc *desc; /* KVA of descriptor ring */
345 bus_addr_t ba; /* bus address of descriptor ring */
346 struct sge_qstat *spg; /* status page, for convenience */
347 uint16_t cap; /* max # of desc, for convenience */
348 uint16_t avail; /* available descriptors, for convenience */
349 uint16_t qsize; /* size (# of entries) of the queue */
350 uint16_t cidx; /* consumer idx (desc idx) */
351 uint16_t pidx; /* producer idx (desc idx) */
352 uint16_t pending; /* # of descriptors used since last doorbell */
353 uint16_t iqid; /* iq that gets egr_update for the eq */
354 uint8_t tx_chan; /* tx channel used by the eq */
356 struct callout tx_callout;
360 uint32_t egr_update; /* # of SGE_EGR_UPDATE notifications for eq */
361 uint32_t unstalled; /* recovered from stall */
365 FL_STARVING = (1 << 0), /* on the adapter's list of starving fl's */
366 FL_DOOMED = (1 << 1), /* about to be destroyed */
369 #define FL_RUNNING_LOW(fl) (fl->cap - fl->needed <= fl->lowat)
370 #define FL_NOT_RUNNING_LOW(fl) (fl->cap - fl->needed >= 2 * fl->lowat)
373 bus_dma_tag_t desc_tag;
374 bus_dmamap_t desc_map;
375 bus_dma_tag_t tag[FL_BUF_SIZES];
381 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */
382 bus_addr_t ba; /* bus address of descriptor ring */
383 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
384 uint32_t cap; /* max # of buffers, for convenience */
385 uint16_t qsize; /* size (# of entries) of the queue */
386 uint16_t cntxt_id; /* SGE context id for the freelist */
387 uint32_t cidx; /* consumer idx (buffer idx, NOT hw desc idx) */
388 uint32_t pidx; /* producer idx (buffer idx, NOT hw desc idx) */
389 uint32_t needed; /* # of buffers needed to fill up fl. */
390 uint32_t lowat; /* # of buffers <= this means fl needs help */
391 uint32_t pending; /* # of bufs allocated since last doorbell */
392 unsigned int dmamap_failed;
393 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
396 /* txq: SGE egress queue + what's needed for Ethernet NIC */
398 struct sge_eq eq; /* MUST be first */
400 struct ifnet *ifp; /* the interface this txq belongs to */
401 bus_dma_tag_t tx_tag; /* tag for transmit buffers */
402 struct buf_ring *br; /* tx buffer ring */
403 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
404 struct mbuf *m; /* held up due to temporary resource shortage */
406 struct tx_maps txmaps;
408 /* stats for common events first */
410 uint64_t txcsum; /* # of times hardware assisted with checksum */
411 uint64_t tso_wrs; /* # of TSO work requests */
412 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
413 uint64_t imm_wrs; /* # of work requests with immediate data */
414 uint64_t sgl_wrs; /* # of work requests with direct SGL */
415 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */
416 uint64_t txpkts_wrs; /* # of coalesced tx work requests */
417 uint64_t txpkts_pkts; /* # of frames in coalesced tx work requests */
419 /* stats for not-that-common events */
421 uint32_t no_dmamap; /* no DMA map to load the mbuf */
422 uint32_t no_desc; /* out of hardware descriptors */
423 } __aligned(CACHE_LINE_SIZE);
425 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */
427 struct sge_iq iq; /* MUST be first */
428 struct sge_fl fl; /* MUST follow iq */
430 struct ifnet *ifp; /* the interface this rxq belongs to */
431 #if defined(INET) || defined(INET6)
432 struct lro_ctrl lro; /* LRO state */
435 /* stats for common events first */
437 uint64_t rxcsum; /* # of times hardware assisted with checksum */
438 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
440 /* stats for not-that-common events */
442 } __aligned(CACHE_LINE_SIZE);
444 static inline struct sge_rxq *
445 iq_to_rxq(struct sge_iq *iq)
448 return (__containerof(iq, struct sge_rxq, iq));
453 /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */
454 struct sge_ofld_rxq {
455 struct sge_iq iq; /* MUST be first */
456 struct sge_fl fl; /* MUST follow iq */
457 } __aligned(CACHE_LINE_SIZE);
459 static inline struct sge_ofld_rxq *
460 iq_to_ofld_rxq(struct sge_iq *iq)
463 return (__containerof(iq, struct sge_ofld_rxq, iq));
468 STAILQ_ENTRY(wrqe) link;
471 uint64_t wr[] __aligned(16);
475 * wrq: SGE egress queue that is given prebuilt work requests. Both the control
476 * and offload tx queues are of this type.
479 struct sge_eq eq; /* MUST be first */
481 struct adapter *adapter;
483 /* List of WRs held up due to lack of tx descriptors */
484 STAILQ_HEAD(, wrqe) wr_list;
486 /* stats for common events first */
488 uint64_t tx_wrs; /* # of tx work requests */
490 /* stats for not-that-common events */
492 uint32_t no_desc; /* out of hardware descriptors */
493 } __aligned(CACHE_LINE_SIZE);
496 int timer_val[SGE_NTIMERS];
497 int counter_val[SGE_NCOUNTERS];
498 int fl_starve_threshold;
500 int nrxq; /* total # of Ethernet rx queues */
501 int ntxq; /* total # of Ethernet tx tx queues */
503 int nofldrxq; /* total # of TOE rx queues */
504 int nofldtxq; /* total # of TOE tx queues */
506 int niq; /* total # of ingress queues */
507 int neq; /* total # of egress queues */
509 struct sge_iq fwq; /* Firmware event queue */
510 struct sge_wrq mgmtq; /* Management queue (control queue) */
511 struct sge_wrq *ctrlq; /* Control queues */
512 struct sge_txq *txq; /* NIC tx queues */
513 struct sge_rxq *rxq; /* NIC rx queues */
515 struct sge_wrq *ofld_txq; /* TOE tx queues */
516 struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */
521 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
522 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
526 typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
528 typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
529 typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
532 SLIST_ENTRY(adapter) link;
536 /* PCIe register resources */
538 struct resource *regs_res;
540 struct resource *msix_res;
541 bus_space_handle_t bh;
548 /* Interrupt information */
552 struct resource *res;
557 bus_dma_tag_t dmat; /* Parent DMA tag */
561 struct taskqueue *tq[NCHAN]; /* taskqueues that flush data out */
562 struct port_info *port[MAX_NPORTS];
563 uint8_t chan_map[NCHAN];
564 uint32_t filter_mode;
567 void *tom_softc; /* (struct tom_data *) */
568 struct tom_tunables tt;
570 struct l2t_data *l2t; /* L2 table */
571 struct tid_info tids;
581 struct adapter_params params;
582 struct t4_virt_res vres;
591 struct sysctl_ctx_list ctx; /* from adapter_full_init to full_uninit */
596 /* Starving free lists */
597 struct mtx sfl_lock; /* same cache-line as sc_lock? but that's ok */
598 TAILQ_HEAD(, sge_fl) sfl;
599 struct callout sfl_callout;
601 an_handler_t an_handler __aligned(CACHE_LINE_SIZE);
602 fw_msg_handler_t fw_msg_handler[4]; /* NUM_FW6_TYPES */
603 cpl_handler_t cpl_handler[0xef]; /* NUM_CPL_CMDS */
607 const void *last_op_thr;
611 #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock)
612 #define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock)
613 #define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
614 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
616 /* XXX: not bulletproof, but much better than nothing */
617 #define ASSERT_SYNCHRONIZED_OP(sc) \
618 KASSERT(IS_BUSY(sc) && \
619 (mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
620 ("%s: operation not synchronized.", __func__))
622 #define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock)
623 #define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock)
624 #define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED)
625 #define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
627 #define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock)
628 #define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock)
629 #define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock)
630 #define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED)
631 #define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
633 #define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl)
634 #define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl)
635 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
636 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
638 #define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock)
639 #define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock)
640 #define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock)
641 #define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED)
642 #define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
644 #define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq)
645 #define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq)
646 #define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq)
647 #define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
648 #define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
650 #define for_each_txq(pi, iter, q) \
651 for (q = &pi->adapter->sge.txq[pi->first_txq], iter = 0; \
652 iter < pi->ntxq; ++iter, ++q)
653 #define for_each_rxq(pi, iter, q) \
654 for (q = &pi->adapter->sge.rxq[pi->first_rxq], iter = 0; \
655 iter < pi->nrxq; ++iter, ++q)
656 #define for_each_ofld_txq(pi, iter, q) \
657 for (q = &pi->adapter->sge.ofld_txq[pi->first_ofld_txq], iter = 0; \
658 iter < pi->nofldtxq; ++iter, ++q)
659 #define for_each_ofld_rxq(pi, iter, q) \
660 for (q = &pi->adapter->sge.ofld_rxq[pi->first_ofld_rxq], iter = 0; \
661 iter < pi->nofldrxq; ++iter, ++q)
663 /* One for errors, one for firmware events */
664 #define T4_EXTRA_INTR 2
666 static inline uint32_t
667 t4_read_reg(struct adapter *sc, uint32_t reg)
670 return bus_space_read_4(sc->bt, sc->bh, reg);
674 t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
677 bus_space_write_4(sc->bt, sc->bh, reg, val);
680 static inline uint64_t
681 t4_read_reg64(struct adapter *sc, uint32_t reg)
684 return t4_bus_space_read_8(sc->bt, sc->bh, reg);
688 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
691 t4_bus_space_write_8(sc->bt, sc->bh, reg, val);
695 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
698 *val = pci_read_config(sc->dev, reg, 1);
702 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
705 pci_write_config(sc->dev, reg, val, 1);
709 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
712 *val = pci_read_config(sc->dev, reg, 2);
716 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
719 pci_write_config(sc->dev, reg, val, 2);
723 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
726 *val = pci_read_config(sc->dev, reg, 4);
730 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
733 pci_write_config(sc->dev, reg, val, 4);
736 static inline struct port_info *
737 adap2pinfo(struct adapter *sc, int idx)
740 return (sc->port[idx]);
744 t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
747 bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
750 static inline bool is_10G_port(const struct port_info *pi)
753 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
756 static inline int tx_resume_threshold(struct sge_eq *eq)
759 return (eq->qsize / 4);
763 void t4_tx_task(void *, int);
764 void t4_tx_callout(void *);
765 int t4_os_find_pci_capability(struct adapter *, int);
766 int t4_os_pci_save_state(struct adapter *);
767 int t4_os_pci_restore_state(struct adapter *);
768 void t4_os_portmod_changed(const struct adapter *, int);
769 void t4_os_link_changed(struct adapter *, int, int);
770 void t4_iterate(void (*)(struct adapter *, void *), void *);
771 int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
772 int t4_register_an_handler(struct adapter *, an_handler_t);
773 int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
774 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
775 int begin_synchronized_op(struct adapter *, struct port_info *, int, char *);
776 void end_synchronized_op(struct adapter *, int);
779 void t4_sge_modload(void);
780 int t4_sge_init(struct adapter *);
781 int t4_create_dma_tag(struct adapter *);
782 int t4_destroy_dma_tag(struct adapter *);
783 int t4_setup_adapter_queues(struct adapter *);
784 int t4_teardown_adapter_queues(struct adapter *);
785 int t4_setup_port_queues(struct port_info *);
786 int t4_teardown_port_queues(struct port_info *);
787 int t4_alloc_tx_maps(struct tx_maps *, bus_dma_tag_t, int, int);
788 void t4_free_tx_maps(struct tx_maps *, bus_dma_tag_t);
789 void t4_intr_all(void *);
790 void t4_intr(void *);
791 void t4_intr_err(void *);
792 void t4_intr_evt(void *);
793 void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
794 int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
795 void t4_update_fl_bufsize(struct ifnet *);
796 int can_resume_tx(struct sge_eq *);
798 static inline struct wrqe *
799 alloc_wrqe(int wr_len, struct sge_wrq *wrq)
801 int len = offsetof(struct wrqe, wr) + wr_len;
804 wr = malloc(len, M_CXGBE, M_NOWAIT);
805 if (__predict_false(wr == NULL))
813 wrtod(struct wrqe *wr)
819 free_wrqe(struct wrqe *wr)
825 t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
827 struct sge_wrq *wrq = wr->wrq;
830 t4_wrq_tx_locked(sc, wrq, wr);