2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #ifndef __T4_ADAPTER_H__
32 #define __T4_ADAPTER_H__
34 #include <sys/kernel.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <dev/pci/pcivar.h>
40 #include <dev/pci/pcireg.h>
41 #include <machine/bus.h>
42 #include <sys/socket.h>
43 #include <sys/sysctl.h>
44 #include <net/ethernet.h>
46 #include <net/if_media.h>
47 #include <netinet/in.h>
48 #include <netinet/tcp_lro.h>
51 #include "firmware/t4fw_interface.h"
53 MALLOC_DECLARE(M_CXGBE);
54 #define CXGBE_UNIMPLEMENTED(s) \
55 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
57 #if defined(__i386__) || defined(__amd64__)
61 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
67 #ifndef SYSCTL_ADD_UQUAD
68 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
69 #define sysctl_handle_64 sysctl_handle_quad
70 #define CTLTYPE_U64 CTLTYPE_QUAD
73 #if (__FreeBSD_version >= 900030) || \
74 ((__FreeBSD_version >= 802507) && (__FreeBSD_version < 900000))
79 /* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
80 static __inline uint64_t
81 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
84 KASSERT(tag == X86_BUS_SPACE_MEM,
85 ("%s: can only handle mem space", __func__));
87 return (*(volatile uint64_t *)(handle + offset));
91 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
92 bus_size_t offset, uint64_t value)
94 KASSERT(tag == X86_BUS_SPACE_MEM,
95 ("%s: can only handle mem space", __func__));
97 *(volatile uint64_t *)(bsh + offset) = value;
100 static __inline uint64_t
101 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
104 return (uint64_t)bus_space_read_4(tag, handle, offset) +
105 ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32);
109 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
110 bus_size_t offset, uint64_t value)
112 bus_space_write_4(tag, bsh, offset, value);
113 bus_space_write_4(tag, bsh, offset + 4, value >> 32);
118 typedef struct adapter adapter_t;
122 FW_IQ_ESIZE = 64, /* At least 64 mandated by the firmware spec */
125 RX_IQ_ESIZE = 64, /* At least 64 so CPL_RX_PKT will fit */
127 EQ_ESIZE = 64, /* All egress queues use this entry size */
129 RX_FL_ESIZE = EQ_ESIZE, /* 8 64bit addresses */
130 #if MJUMPAGESIZE != MCLBYTES
131 FL_BUF_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */
133 FL_BUF_SIZES = 3, /* cluster, jumbo9k, jumbo16k */
140 TX_WR_FLITS = SGE_MAX_WR_LEN / 8
144 /* adapter intr_type */
145 INTR_INTX = (1 << 0),
151 /* flags understood by begin_synchronized_op */
152 HOLD_LOCK = (1 << 0),
156 /* flags understood by end_synchronized_op */
157 LOCK_HELD = HOLD_LOCK,
162 FULL_INIT_DONE = (1 << 0),
164 INTR_DIRECT = (1 << 2), /* direct interrupts for everything */
165 MASTER_PF = (1 << 3),
166 ADAP_SYSCTL_CTX = (1 << 4),
167 TOM_INIT_DONE = (1 << 5),
169 CXGBE_BUSY = (1 << 9),
173 PORT_INIT_DONE = (1 << 1),
174 PORT_SYSCTL_CTX = (1 << 2),
177 #define IS_DOOMED(pi) ((pi)->flags & DOOMED)
178 #define SET_DOOMED(pi) do {(pi)->flags |= DOOMED;} while (0)
179 #define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY)
180 #define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0)
181 #define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
185 struct adapter *adapter;
188 struct ifmedia media;
196 int16_t xact_addr_filt;/* index of exact MAC address filter */
197 uint16_t rss_size; /* size of VI's RSS table slice */
198 uint8_t lport; /* associated offload logical port */
205 /* These need to be int as they are used in sysctl */
206 int ntxq; /* # of tx queues */
207 int first_txq; /* index of first tx queue */
208 int nrxq; /* # of rx queues */
209 int first_rxq; /* index of first rx queue */
211 int nofldtxq; /* # of offload tx queues */
212 int first_ofld_txq; /* index of first offload tx queue */
213 int nofldrxq; /* # of offload rx queues */
214 int first_ofld_rxq; /* index of first offload rx queue */
222 struct link_config link_cfg;
223 struct port_stats stats;
225 eventhandler_tag vlan_c;
228 struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */
230 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
237 uint8_t tag_idx; /* the sc->fl_tag this map comes from */
252 /* DMA maps used for tx */
255 uint32_t map_total; /* # of DMA maps */
256 uint32_t map_pidx; /* next map to be used */
257 uint32_t map_cidx; /* reclaimed up to this index */
258 uint32_t map_avail; /* # of available maps */
262 uint8_t desc_used; /* # of hardware descriptors used by the WR */
263 uint8_t credits; /* NIC txq: # of frames sent out in the WR */
268 IQ_ALLOCATED = (1 << 0), /* firmware resources allocated */
269 IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */
270 IQ_INTR = (1 << 2), /* iq takes direct interrupt */
271 IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */
280 * Ingress Queue: T4 is producer, driver is consumer.
283 bus_dma_tag_t desc_tag;
284 bus_dmamap_t desc_map;
285 bus_addr_t ba; /* bus address of descriptor ring */
287 uint16_t abs_id; /* absolute SGE id for the iq */
288 int8_t intr_pktc_idx; /* packet count threshold index */
290 __be64 *desc; /* KVA of descriptor ring */
293 struct adapter *adapter;
294 const __be64 *cdesc; /* current descriptor */
295 uint8_t gen; /* generation bit */
296 uint8_t intr_params; /* interrupt holdoff parameters */
297 uint8_t intr_next; /* XXX: holdoff for next interrupt */
298 uint8_t esize; /* size (bytes) of each entry in the queue */
299 uint16_t qsize; /* size (# of entries) of the queue */
300 uint16_t cidx; /* consumer index */
301 uint16_t cntxt_id; /* SGE context id for the iq */
303 STAILQ_ENTRY(sge_iq) link;
314 EQ_TYPEMASK = 7, /* 3 lsbits hold the type */
315 EQ_ALLOCATED = (1 << 3), /* firmware resources allocated */
316 EQ_DOOMED = (1 << 4), /* about to be destroyed */
317 EQ_CRFLUSHED = (1 << 5), /* expecting an update from SGE */
318 EQ_STALLED = (1 << 6), /* out of hw descriptors or dmamaps */
321 /* Listed in order of preference. Update t4_sysctls too if you change these */
322 enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB};
325 * Egress Queue: driver is producer, T4 is consumer.
327 * Note: A free list is an egress queue (driver produces the buffers and T4
328 * consumes them) but it's special enough to have its own struct (see sge_fl).
331 unsigned int flags; /* MUST be first */
332 unsigned int cntxt_id; /* SGE context id for the eq */
333 bus_dma_tag_t desc_tag;
334 bus_dmamap_t desc_map;
338 struct tx_desc *desc; /* KVA of descriptor ring */
339 bus_addr_t ba; /* bus address of descriptor ring */
340 struct sge_qstat *spg; /* status page, for convenience */
342 volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */
343 u_int udb_qid; /* relative qid within the doorbell page */
344 uint16_t cap; /* max # of desc, for convenience */
345 uint16_t avail; /* available descriptors, for convenience */
346 uint16_t qsize; /* size (# of entries) of the queue */
347 uint16_t cidx; /* consumer idx (desc idx) */
348 uint16_t pidx; /* producer idx (desc idx) */
349 uint16_t pending; /* # of descriptors used since last doorbell */
350 uint16_t iqid; /* iq that gets egr_update for the eq */
351 uint8_t tx_chan; /* tx channel used by the eq */
353 struct callout tx_callout;
357 uint32_t egr_update; /* # of SGE_EGR_UPDATE notifications for eq */
358 uint32_t unstalled; /* recovered from stall */
362 FL_STARVING = (1 << 0), /* on the adapter's list of starving fl's */
363 FL_DOOMED = (1 << 1), /* about to be destroyed */
366 #define FL_RUNNING_LOW(fl) (fl->cap - fl->needed <= fl->lowat)
367 #define FL_NOT_RUNNING_LOW(fl) (fl->cap - fl->needed >= 2 * fl->lowat)
370 bus_dma_tag_t desc_tag;
371 bus_dmamap_t desc_map;
372 bus_dma_tag_t tag[FL_BUF_SIZES];
378 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */
379 bus_addr_t ba; /* bus address of descriptor ring */
380 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
381 uint32_t cap; /* max # of buffers, for convenience */
382 uint16_t qsize; /* size (# of entries) of the queue */
383 uint16_t cntxt_id; /* SGE context id for the freelist */
384 uint32_t cidx; /* consumer idx (buffer idx, NOT hw desc idx) */
385 uint32_t pidx; /* producer idx (buffer idx, NOT hw desc idx) */
386 uint32_t needed; /* # of buffers needed to fill up fl. */
387 uint32_t lowat; /* # of buffers <= this means fl needs help */
388 uint32_t pending; /* # of bufs allocated since last doorbell */
389 unsigned int dmamap_failed;
390 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
393 /* txq: SGE egress queue + what's needed for Ethernet NIC */
395 struct sge_eq eq; /* MUST be first */
397 struct ifnet *ifp; /* the interface this txq belongs to */
398 bus_dma_tag_t tx_tag; /* tag for transmit buffers */
399 struct buf_ring *br; /* tx buffer ring */
400 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
401 struct mbuf *m; /* held up due to temporary resource shortage */
403 struct tx_maps txmaps;
405 /* stats for common events first */
407 uint64_t txcsum; /* # of times hardware assisted with checksum */
408 uint64_t tso_wrs; /* # of TSO work requests */
409 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
410 uint64_t imm_wrs; /* # of work requests with immediate data */
411 uint64_t sgl_wrs; /* # of work requests with direct SGL */
412 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */
413 uint64_t txpkts_wrs; /* # of coalesced tx work requests */
414 uint64_t txpkts_pkts; /* # of frames in coalesced tx work requests */
416 /* stats for not-that-common events */
418 uint32_t no_dmamap; /* no DMA map to load the mbuf */
419 uint32_t no_desc; /* out of hardware descriptors */
420 } __aligned(CACHE_LINE_SIZE);
422 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */
424 struct sge_iq iq; /* MUST be first */
425 struct sge_fl fl; /* MUST follow iq */
427 struct ifnet *ifp; /* the interface this rxq belongs to */
428 #if defined(INET) || defined(INET6)
429 struct lro_ctrl lro; /* LRO state */
432 /* stats for common events first */
434 uint64_t rxcsum; /* # of times hardware assisted with checksum */
435 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
437 /* stats for not-that-common events */
439 } __aligned(CACHE_LINE_SIZE);
441 static inline struct sge_rxq *
442 iq_to_rxq(struct sge_iq *iq)
445 return (__containerof(iq, struct sge_rxq, iq));
450 /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */
451 struct sge_ofld_rxq {
452 struct sge_iq iq; /* MUST be first */
453 struct sge_fl fl; /* MUST follow iq */
454 } __aligned(CACHE_LINE_SIZE);
456 static inline struct sge_ofld_rxq *
457 iq_to_ofld_rxq(struct sge_iq *iq)
460 return (__containerof(iq, struct sge_ofld_rxq, iq));
465 STAILQ_ENTRY(wrqe) link;
468 uint64_t wr[] __aligned(16);
472 * wrq: SGE egress queue that is given prebuilt work requests. Both the control
473 * and offload tx queues are of this type.
476 struct sge_eq eq; /* MUST be first */
478 struct adapter *adapter;
480 /* List of WRs held up due to lack of tx descriptors */
481 STAILQ_HEAD(, wrqe) wr_list;
483 /* stats for common events first */
485 uint64_t tx_wrs; /* # of tx work requests */
487 /* stats for not-that-common events */
489 uint32_t no_desc; /* out of hardware descriptors */
490 } __aligned(CACHE_LINE_SIZE);
493 int timer_val[SGE_NTIMERS];
494 int counter_val[SGE_NCOUNTERS];
495 int fl_starve_threshold;
498 int nrxq; /* total # of Ethernet rx queues */
499 int ntxq; /* total # of Ethernet tx tx queues */
501 int nofldrxq; /* total # of TOE rx queues */
502 int nofldtxq; /* total # of TOE tx queues */
504 int niq; /* total # of ingress queues */
505 int neq; /* total # of egress queues */
507 struct sge_iq fwq; /* Firmware event queue */
508 struct sge_wrq mgmtq; /* Management queue (control queue) */
509 struct sge_wrq *ctrlq; /* Control queues */
510 struct sge_txq *txq; /* NIC tx queues */
511 struct sge_rxq *rxq; /* NIC rx queues */
513 struct sge_wrq *ofld_txq; /* TOE tx queues */
514 struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */
519 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
520 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
524 typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
526 typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
527 typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
530 SLIST_ENTRY(adapter) link;
534 /* PCIe register resources */
536 struct resource *regs_res;
538 struct resource *msix_res;
539 bus_space_handle_t bh;
543 struct resource *udbs_res;
544 volatile uint8_t *udbs_base;
549 /* Interrupt information */
553 struct resource *res;
558 bus_dma_tag_t dmat; /* Parent DMA tag */
562 struct taskqueue *tq[NCHAN]; /* taskqueues that flush data out */
563 struct port_info *port[MAX_NPORTS];
564 uint8_t chan_map[NCHAN];
567 void *tom_softc; /* (struct tom_data *) */
568 struct tom_tunables tt;
570 struct l2t_data *l2t; /* L2 table */
571 struct tid_info tids;
583 struct adapter_params params;
584 struct t4_virt_res vres;
593 struct sysctl_ctx_list ctx; /* from adapter_full_init to full_uninit */
598 /* Starving free lists */
599 struct mtx sfl_lock; /* same cache-line as sc_lock? but that's ok */
600 TAILQ_HEAD(, sge_fl) sfl;
601 struct callout sfl_callout;
603 an_handler_t an_handler __aligned(CACHE_LINE_SIZE);
604 fw_msg_handler_t fw_msg_handler[5]; /* NUM_FW6_TYPES */
605 cpl_handler_t cpl_handler[0xef]; /* NUM_CPL_CMDS */
609 const void *last_op_thr;
613 #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock)
614 #define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock)
615 #define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
616 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
618 /* XXX: not bulletproof, but much better than nothing */
619 #define ASSERT_SYNCHRONIZED_OP(sc) \
620 KASSERT(IS_BUSY(sc) && \
621 (mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
622 ("%s: operation not synchronized.", __func__))
624 #define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock)
625 #define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock)
626 #define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED)
627 #define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
629 #define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock)
630 #define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock)
631 #define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock)
632 #define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED)
633 #define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
635 #define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl)
636 #define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl)
637 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
638 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
640 #define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock)
641 #define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock)
642 #define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock)
643 #define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED)
644 #define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
646 #define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq)
647 #define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq)
648 #define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq)
649 #define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
650 #define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
652 #define for_each_txq(pi, iter, q) \
653 for (q = &pi->adapter->sge.txq[pi->first_txq], iter = 0; \
654 iter < pi->ntxq; ++iter, ++q)
655 #define for_each_rxq(pi, iter, q) \
656 for (q = &pi->adapter->sge.rxq[pi->first_rxq], iter = 0; \
657 iter < pi->nrxq; ++iter, ++q)
658 #define for_each_ofld_txq(pi, iter, q) \
659 for (q = &pi->adapter->sge.ofld_txq[pi->first_ofld_txq], iter = 0; \
660 iter < pi->nofldtxq; ++iter, ++q)
661 #define for_each_ofld_rxq(pi, iter, q) \
662 for (q = &pi->adapter->sge.ofld_rxq[pi->first_ofld_rxq], iter = 0; \
663 iter < pi->nofldrxq; ++iter, ++q)
665 /* One for errors, one for firmware events */
666 #define T4_EXTRA_INTR 2
668 static inline uint32_t
669 t4_read_reg(struct adapter *sc, uint32_t reg)
672 return bus_space_read_4(sc->bt, sc->bh, reg);
676 t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
679 bus_space_write_4(sc->bt, sc->bh, reg, val);
682 static inline uint64_t
683 t4_read_reg64(struct adapter *sc, uint32_t reg)
686 return t4_bus_space_read_8(sc->bt, sc->bh, reg);
690 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
693 t4_bus_space_write_8(sc->bt, sc->bh, reg, val);
697 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
700 *val = pci_read_config(sc->dev, reg, 1);
704 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
707 pci_write_config(sc->dev, reg, val, 1);
711 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
714 *val = pci_read_config(sc->dev, reg, 2);
718 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
721 pci_write_config(sc->dev, reg, val, 2);
725 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
728 *val = pci_read_config(sc->dev, reg, 4);
732 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
735 pci_write_config(sc->dev, reg, val, 4);
738 static inline struct port_info *
739 adap2pinfo(struct adapter *sc, int idx)
742 return (sc->port[idx]);
746 t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
749 bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
753 is_10G_port(const struct port_info *pi)
756 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
760 is_40G_port(const struct port_info *pi)
763 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) != 0);
767 tx_resume_threshold(struct sge_eq *eq)
770 return (eq->qsize / 4);
774 void t4_tx_task(void *, int);
775 void t4_tx_callout(void *);
776 int t4_os_find_pci_capability(struct adapter *, int);
777 int t4_os_pci_save_state(struct adapter *);
778 int t4_os_pci_restore_state(struct adapter *);
779 void t4_os_portmod_changed(const struct adapter *, int);
780 void t4_os_link_changed(struct adapter *, int, int, int);
781 void t4_iterate(void (*)(struct adapter *, void *), void *);
782 int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
783 int t4_register_an_handler(struct adapter *, an_handler_t);
784 int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
785 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
786 int begin_synchronized_op(struct adapter *, struct port_info *, int, char *);
787 void end_synchronized_op(struct adapter *, int);
790 void t4_sge_modload(void);
791 void t4_init_sge_cpl_handlers(struct adapter *);
792 void t4_tweak_chip_settings(struct adapter *);
793 int t4_read_chip_settings(struct adapter *);
794 int t4_create_dma_tag(struct adapter *);
795 int t4_destroy_dma_tag(struct adapter *);
796 int t4_setup_adapter_queues(struct adapter *);
797 int t4_teardown_adapter_queues(struct adapter *);
798 int t4_setup_port_queues(struct port_info *);
799 int t4_teardown_port_queues(struct port_info *);
800 int t4_alloc_tx_maps(struct tx_maps *, bus_dma_tag_t, int, int);
801 void t4_free_tx_maps(struct tx_maps *, bus_dma_tag_t);
802 void t4_intr_all(void *);
803 void t4_intr(void *);
804 void t4_intr_err(void *);
805 void t4_intr_evt(void *);
806 void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
807 int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
808 void t4_update_fl_bufsize(struct ifnet *);
809 int can_resume_tx(struct sge_eq *);
811 static inline struct wrqe *
812 alloc_wrqe(int wr_len, struct sge_wrq *wrq)
814 int len = offsetof(struct wrqe, wr) + wr_len;
817 wr = malloc(len, M_CXGBE, M_NOWAIT);
818 if (__predict_false(wr == NULL))
826 wrtod(struct wrqe *wr)
832 free_wrqe(struct wrqe *wr)
838 t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
840 struct sge_wrq *wrq = wr->wrq;
843 t4_wrq_tx_locked(sc, wrq, wr);