2 * Copyright (c) 2011 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #ifndef __T4_ADAPTER_H__
32 #define __T4_ADAPTER_H__
36 #include <sys/types.h>
37 #include <sys/malloc.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcireg.h>
40 #include <machine/bus.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
43 #include <net/ethernet.h>
45 #include <net/if_media.h>
46 #include <netinet/tcp_lro.h>
49 #include "common/t4fw_interface.h"
51 #define T4_FWNAME "t4fw"
53 MALLOC_DECLARE(M_CXGBE);
54 #define CXGBE_UNIMPLEMENTED(s) \
55 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
57 #if defined(__i386__) || defined(__amd64__)
61 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
68 /* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
69 static __inline uint64_t
70 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
73 KASSERT(tag == X86_BUS_SPACE_MEM,
74 ("%s: can only handle mem space", __func__));
76 return (*(volatile uint64_t *)(handle + offset));
80 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
81 bus_size_t offset, uint64_t value)
83 KASSERT(tag == X86_BUS_SPACE_MEM,
84 ("%s: can only handle mem space", __func__));
86 *(volatile uint64_t *)(bsh + offset) = value;
89 static __inline uint64_t
90 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
93 return (uint64_t)bus_space_read_4(tag, handle, offset) +
94 ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32);
98 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
99 bus_size_t offset, uint64_t value)
101 bus_space_write_4(tag, bsh, offset, value);
102 bus_space_write_4(tag, bsh, offset + 4, value >> 32);
107 typedef struct adapter adapter_t;
111 FW_IQ_ESIZE = 64, /* At least 64 mandated by the firmware spec */
114 INTR_IQ_ESIZE = 64, /* Handles some CPLs too, do not reduce */
120 RX_IQ_ESIZE = 64, /* At least 64 so CPL_RX_PKT will fit */
122 RX_FL_ESIZE = 64, /* 8 64bit addresses */
124 #if MJUMPAGESIZE != MCLBYTES
125 FL_BUF_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */
127 FL_BUF_SIZES = 3, /* cluster, jumbo9k, jumbo16k */
133 TX_WR_FLITS = SGE_MAX_WR_LEN / 8
137 /* adapter intr_type */
138 INTR_INTX = (1 << 0),
145 FULL_INIT_DONE = (1 << 0),
147 INTR_SHARED = (1 << 2), /* one set of intrq's for all ports */
149 CXGBE_BUSY = (1 << 9),
153 VI_ENABLED = (1 << 1),
156 #define IS_DOOMED(pi) (pi->flags & DOOMED)
157 #define SET_DOOMED(pi) do {pi->flags |= DOOMED;} while (0)
158 #define IS_BUSY(sc) (sc->flags & CXGBE_BUSY)
159 #define SET_BUSY(sc) do {sc->flags |= CXGBE_BUSY;} while (0)
160 #define CLR_BUSY(sc) do {sc->flags &= ~CXGBE_BUSY;} while (0)
164 struct adapter *adapter;
167 struct ifmedia media;
175 int16_t xact_addr_filt;/* index of exact MAC address filter */
176 uint16_t rss_size; /* size of VI's RSS table slice */
177 uint8_t lport; /* associated offload logical port */
184 /* These need to be int as they are used in sysctl */
185 int ntxq; /* # of tx queues */
186 int first_txq; /* index of first tx queue */
187 int nrxq; /* # of rx queues */
188 int first_rxq; /* index of first rx queue */
194 struct link_config link_cfg;
195 struct port_stats stats;
197 struct taskqueue *tq;
199 struct sysctl_ctx_list ctx; /* lives from ifconfig up to down */
200 struct sysctl_oid *oid_rxq;
201 struct sysctl_oid *oid_txq;
203 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
210 uint8_t tag_idx; /* the sc->fl_tag this map comes from */
226 uint8_t desc_used; /* # of hardware descriptors used by the WR */
227 uint8_t credits; /* NIC txq: # of frames sent out in the WR */
230 typedef void (iq_intr_handler_t)(void *);
234 IQ_ALLOCATED = (1 << 1), /* firmware resources allocated */
235 IQ_STARTED = (1 << 2), /* started */
244 * Ingress Queue: T4 is producer, driver is consumer.
247 bus_dma_tag_t desc_tag;
248 bus_dmamap_t desc_map;
249 bus_addr_t ba; /* bus address of descriptor ring */
252 uint16_t abs_id; /* absolute SGE id for the iq */
253 int8_t intr_pktc_idx; /* packet count threshold index */
255 iq_intr_handler_t *handler;
256 __be64 *desc; /* KVA of descriptor ring */
258 volatile uint32_t state;
259 struct adapter *adapter;
260 const __be64 *cdesc; /* current descriptor */
261 uint8_t gen; /* generation bit */
262 uint8_t intr_params; /* interrupt holdoff parameters */
263 uint8_t intr_next; /* holdoff for next interrupt */
264 uint8_t esize; /* size (bytes) of each entry in the queue */
265 uint16_t qsize; /* size (# of entries) of the queue */
266 uint16_t cidx; /* consumer index */
267 uint16_t cntxt_id; /* SGE context id for the iq */
272 EQ_ALLOCATED = (1 << 1), /* firmware resources allocated */
273 EQ_STARTED = (1 << 2), /* started */
274 EQ_CRFLUSHED = (1 << 3), /* expecting an update from SGE */
278 * Egress Queue: driver is producer, T4 is consumer.
280 * Note: A free list is an egress queue (driver produces the buffers and T4
281 * consumes them) but it's special enough to have its own struct (see sge_fl).
284 bus_dma_tag_t desc_tag;
285 bus_dmamap_t desc_map;
290 struct tx_desc *desc; /* KVA of descriptor ring */
291 bus_addr_t ba; /* bus address of descriptor ring */
292 struct sge_qstat *spg; /* status page, for convenience */
293 uint16_t cap; /* max # of desc, for convenience */
294 uint16_t avail; /* available descriptors, for convenience */
295 uint16_t qsize; /* size (# of entries) of the queue */
296 uint16_t cidx; /* consumer idx (desc idx) */
297 uint16_t pidx; /* producer idx (desc idx) */
298 uint16_t pending; /* # of descriptors used since last doorbell */
299 uint16_t iqid; /* iq that gets egr_update for the eq */
300 unsigned int cntxt_id; /* SGE context id for the eq */
304 bus_dma_tag_t desc_tag;
305 bus_dmamap_t desc_map;
306 bus_dma_tag_t tag[FL_BUF_SIZES];
311 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */
312 bus_addr_t ba; /* bus address of descriptor ring */
313 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
314 uint32_t cap; /* max # of buffers, for convenience */
315 uint16_t qsize; /* size (# of entries) of the queue */
316 uint16_t cntxt_id; /* SGE context id for the freelist */
317 uint32_t cidx; /* consumer idx (buffer idx, NOT hw desc idx) */
318 uint32_t pidx; /* producer idx (buffer idx, NOT hw desc idx) */
319 uint32_t needed; /* # of buffers needed to fill up fl. */
320 uint32_t pending; /* # of bufs allocated since last doorbell */
321 unsigned int dmamap_failed;
324 /* txq: SGE egress queue + what's needed for Ethernet NIC */
326 struct sge_eq eq; /* MUST be first */
328 struct ifnet *ifp; /* the interface this txq belongs to */
329 bus_dma_tag_t tx_tag; /* tag for transmit buffers */
330 struct buf_ring *br; /* tx buffer ring */
331 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
332 struct mbuf *m; /* held up due to temporary resource shortage */
333 struct task resume_tx;
335 /* DMA maps used for tx */
337 uint32_t map_total; /* # of DMA maps */
338 uint32_t map_pidx; /* next map to be used */
339 uint32_t map_cidx; /* reclaimed up to this index */
340 uint32_t map_avail; /* # of available maps */
342 /* stats for common events first */
344 uint64_t txcsum; /* # of times hardware assisted with checksum */
345 uint64_t tso_wrs; /* # of IPv4 TSO work requests */
346 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
347 uint64_t imm_wrs; /* # of work requests with immediate data */
348 uint64_t sgl_wrs; /* # of work requests with direct SGL */
349 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */
350 uint64_t txpkts_wrs; /* # of coalesced tx work requests */
351 uint64_t txpkts_pkts; /* # of frames in coalesced tx work requests */
353 /* stats for not-that-common events */
355 uint32_t no_dmamap; /* no DMA map to load the mbuf */
356 uint32_t no_desc; /* out of hardware descriptors */
357 uint32_t egr_update; /* # of SGE_EGR_UPDATE notifications for txq */
358 } __aligned(CACHE_LINE_SIZE);
361 RXQ_LRO_ENABLED = (1 << 0)
364 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */
366 struct sge_iq iq; /* MUST be first */
369 struct ifnet *ifp; /* the interface this rxq belongs to */
372 struct lro_ctrl lro; /* LRO state */
375 /* stats for common events first */
377 uint64_t rxcsum; /* # of times hardware assisted with checksum */
378 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
380 /* stats for not-that-common events */
382 } __aligned(CACHE_LINE_SIZE);
384 /* ctrlq: SGE egress queue + stats for control queue */
386 struct sge_eq eq; /* MUST be first */
388 /* stats for common events first */
391 /* stats for not-that-common events */
393 uint32_t no_desc; /* out of hardware descriptors */
394 } __aligned(CACHE_LINE_SIZE);
397 uint16_t timer_val[SGE_NTIMERS];
398 uint8_t counter_val[SGE_NCOUNTERS];
399 int fl_starve_threshold;
401 int nrxq; /* total rx queues (all ports and the rest) */
402 int ntxq; /* total tx queues (all ports and the rest) */
403 int niq; /* total ingress queues */
404 int neq; /* total egress queues */
406 struct sge_iq fwq; /* Firmware event queue */
407 struct sge_ctrlq *ctrlq;/* Control queues */
408 struct sge_iq *intrq; /* Interrupt queues */
409 struct sge_txq *txq; /* NIC tx queues */
410 struct sge_rxq *rxq; /* NIC rx queues */
414 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
415 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
422 /* PCIe register resources */
424 struct resource *regs_res;
426 struct resource *msix_res;
427 bus_space_handle_t bh;
434 /* Interrupt information */
438 struct resource *res;
443 bus_dma_tag_t dmat; /* Parent DMA tag */
447 struct port_info *port[MAX_NPORTS];
448 uint8_t chan_map[NCHAN];
450 struct l2t_data *l2t; /* L2 table */
451 struct tid_info tids;
453 int registered_device_map;
458 struct adapter_params params;
459 struct t4_virt_res vres;
461 struct sysctl_ctx_list ctx; /* from first_port_up to last_port_down */
462 struct sysctl_oid *oid_fwq;
463 struct sysctl_oid *oid_ctrlq;
464 struct sysctl_oid *oid_intrq;
470 #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock)
471 #define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock)
472 #define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED)
473 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
475 #define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock)
476 #define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock)
477 #define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED)
478 #define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
480 #define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock)
481 #define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock)
482 #define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock)
483 #define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED)
484 #define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
486 #define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl)
487 #define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl)
488 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
489 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
491 #define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock)
492 #define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock)
493 #define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock)
494 #define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED)
495 #define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
497 #define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq)
498 #define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq)
499 #define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq)
500 #define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
501 #define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
503 #define for_each_txq(pi, iter, txq) \
504 txq = &pi->adapter->sge.txq[pi->first_txq]; \
505 for (iter = 0; iter < pi->ntxq; ++iter, ++txq)
506 #define for_each_rxq(pi, iter, rxq) \
507 rxq = &pi->adapter->sge.rxq[pi->first_rxq]; \
508 for (iter = 0; iter < pi->nrxq; ++iter, ++rxq)
510 /* One for errors, one for firmware events */
511 #define T4_EXTRA_INTR 2
512 #define NINTRQ(sc) ((sc)->intr_count > T4_EXTRA_INTR ? \
513 (sc)->intr_count - T4_EXTRA_INTR : 1)
515 static inline uint32_t
516 t4_read_reg(struct adapter *sc, uint32_t reg)
518 return bus_space_read_4(sc->bt, sc->bh, reg);
522 t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
524 bus_space_write_4(sc->bt, sc->bh, reg, val);
527 static inline uint64_t
528 t4_read_reg64(struct adapter *sc, uint32_t reg)
530 return t4_bus_space_read_8(sc->bt, sc->bh, reg);
534 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
536 t4_bus_space_write_8(sc->bt, sc->bh, reg, val);
540 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
542 *val = pci_read_config(sc->dev, reg, 1);
546 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
548 pci_write_config(sc->dev, reg, val, 1);
552 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
554 *val = pci_read_config(sc->dev, reg, 2);
558 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
560 pci_write_config(sc->dev, reg, val, 2);
564 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
566 *val = pci_read_config(sc->dev, reg, 4);
570 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
572 pci_write_config(sc->dev, reg, val, 4);
575 static inline struct port_info *
576 adap2pinfo(struct adapter *sc, int idx)
578 return (sc->port[idx]);
582 t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
584 bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
587 static inline bool is_10G_port(const struct port_info *pi)
589 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
593 void cxgbe_txq_start(void *, int);
594 int t4_os_find_pci_capability(struct adapter *, int);
595 int t4_os_pci_save_state(struct adapter *);
596 int t4_os_pci_restore_state(struct adapter *);
597 void t4_os_portmod_changed(const struct adapter *, int);
598 void t4_os_link_changed(struct adapter *, int, int);
601 void t4_sge_modload(void);
602 void t4_sge_init(struct adapter *);
603 int t4_create_dma_tag(struct adapter *);
604 int t4_destroy_dma_tag(struct adapter *);
605 int t4_setup_adapter_queues(struct adapter *);
606 int t4_teardown_adapter_queues(struct adapter *);
607 int t4_setup_eth_queues(struct port_info *);
608 int t4_teardown_eth_queues(struct port_info *);
609 void t4_intr_all(void *);
610 void t4_intr(void *);
611 void t4_intr_err(void *);
612 void t4_intr_evt(void *);
613 int t4_mgmt_tx(struct adapter *, struct mbuf *);
614 int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
615 void t4_update_fl_bufsize(struct ifnet *);