1 /**************************************************************************
3 Copyright (c) 2007-2009, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
30 ***************************************************************************/
33 #ifndef _CXGB_ADAPTER_H_
34 #define _CXGB_ADAPTER_H_
37 #include <sys/mutex.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/condvar.h>
43 #include <sys/buf_ring.h>
45 #include <net/ethernet.h>
47 #include <net/if_media.h>
48 #include <net/if_dl.h>
49 #include <netinet/tcp_lro.h>
51 #include <machine/bus.h>
52 #include <machine/resource.h>
54 #include <sys/bus_dma.h>
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcivar.h>
58 #include <cxgb_osdep.h>
60 #include <sys/mbufq.h>
64 extern int cxgb_debug;
67 #define MTX_INIT(lock, lockname, class, flags) \
69 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
70 mtx_init((lock), lockname, class, flags); \
73 #define MTX_DESTROY(lock) \
75 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
76 mtx_destroy((lock)); \
80 #define MTX_INIT mtx_init
81 #define MTX_DESTROY mtx_destroy
91 struct adapter *adapter;
95 const struct port_type_info *port_type;
98 struct link_config link_config;
108 uint8_t hw_addr[ETHER_ADDR_LEN];
109 struct task timer_reclaim_task;
110 struct cdev *port_cdev;
112 #define PORT_LOCK_NAME_LEN 32
113 #define PORT_NAME_LEN 32
114 char lockbuf[PORT_LOCK_NAME_LEN];
115 char namebuf[PORT_NAME_LEN];
116 } __aligned(L1_CACHE_BYTES);
120 FULL_INIT_DONE = (1 << 0),
121 USING_MSI = (1 << 1),
122 USING_MSIX = (1 << 2),
123 QUEUES_BOUND = (1 << 3),
124 FW_UPTODATE = (1 << 4),
125 TPS_UPTODATE = (1 << 5),
126 CXGB_SHUTDOWN = (1 << 6),
127 CXGB_OFLD_INIT = (1 << 7),
128 TP_PARITY_INIT = (1 << 8),
129 CXGB_BUSY = (1 << 9),
134 #define IS_DOOMED(p) (p->flags & DOOMED)
135 #define SET_DOOMED(p) do {p->flags |= DOOMED;} while (0)
136 #define IS_BUSY(sc) (sc->flags & CXGB_BUSY)
137 #define SET_BUSY(sc) do {sc->flags |= CXGB_BUSY;} while (0)
138 #define CLR_BUSY(sc) do {sc->flags &= ~CXGB_BUSY;} while (0)
140 #define FL_Q_SIZE 4096
141 #define JUMBO_Q_SIZE 1024
142 #define RSPQ_Q_SIZE 2048
143 #define TX_ETH_Q_SIZE 1024
144 #define TX_OFLD_Q_SIZE 1024
145 #define TX_CTRL_Q_SIZE 256
153 * work request size in bytes
155 #define WR_LEN (WR_FLITS * 8)
156 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
159 unsigned short enabled;
160 struct lro_ctrl ctrl;
163 #define RX_BUNDLE_SIZE 8
173 uint32_t holdoff_tmr;
174 uint32_t next_holdoff;
176 uint32_t async_notif;
178 uint32_t offload_pkts;
179 uint32_t offload_bundles;
181 uint32_t unhandled_irqs;
184 bus_addr_t phys_addr;
185 bus_dma_tag_t desc_tag;
186 bus_dmamap_t desc_map;
188 struct t3_mbuf_hdr rspq_mh;
189 struct rsp_desc *desc;
191 #define RSPQ_NAME_LEN 32
192 char lockbuf[RSPQ_NAME_LEN];
193 uint32_t rspq_dump_start;
194 uint32_t rspq_dump_count;
208 bus_addr_t phys_addr;
211 bus_dma_tag_t desc_tag;
212 bus_dmamap_t desc_map;
213 bus_dma_tag_t entry_tag;
215 struct rx_desc *desc;
216 struct rx_sw_desc *sdesc;
223 #define TXQ_TRANSMITTING 0x1
237 struct tx_desc *desc;
238 struct tx_sw_desc *sdesc;
240 bus_addr_t phys_addr;
241 struct task qresume_task;
242 struct task qreclaim_task;
246 bus_dma_tag_t desc_tag;
247 bus_dmamap_t desc_map;
248 bus_dma_tag_t entry_tag;
249 struct mbuf_head sendq;
251 struct buf_ring *txq_mr;
252 struct ifaltq *txq_ifq;
253 struct callout txq_timer;
254 struct callout txq_watchdog;
255 uint64_t txq_coalesced;
256 uint32_t txq_skipped;
257 uint32_t txq_enqueued;
258 uint32_t txq_dump_start;
259 uint32_t txq_dump_count;
260 uint64_t txq_direct_packets;
261 uint64_t txq_direct_bytes;
263 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
268 SGE_PSTAT_TSO, /* # of TSO requests */
269 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
270 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
271 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
272 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
275 #define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
277 #define QS_EXITING 0x1
278 #define QS_RUNNING 0x2
280 #define QS_FLUSHING 0x8
281 #define QS_TIMEOUT 0x10
284 struct sge_rspq rspq;
285 struct sge_fl fl[SGE_RXQ_PER_SET];
286 struct lro_state lro;
287 struct sge_txq txq[SGE_TXQ_PER_SET];
288 uint32_t txq_stopped; /* which Tx queues are stopped */
289 uint64_t port_stats[SGE_PSTAT_MAX];
290 struct port_info *port;
291 int idx; /* qset # */
296 #define QS_NAME_LEN 32
297 char namebuf[QS_NAME_LEN];
301 struct sge_qset qs[SGE_QSETS];
310 TAILQ_ENTRY(adapter) adapter_entry;
312 /* PCI register resources */
314 struct resource *regs_res;
316 struct resource *udbs_res;
317 bus_space_handle_t bh;
323 bus_dma_tag_t parent_dmat;
324 bus_dma_tag_t rx_dmat;
325 bus_dma_tag_t rx_jumbo_dmat;
326 bus_dma_tag_t tx_dmat;
328 /* Interrupt resources */
329 struct resource *irq_res;
333 uint32_t msix_regs_rid;
334 struct resource *msix_regs_res;
336 struct resource *msix_irq_res[SGE_QSETS];
337 int msix_irq_rid[SGE_QSETS];
338 void *msix_intr_tag[SGE_QSETS];
339 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
340 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
341 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */
343 uint8_t fill[SGE_QSETS];
347 #define tunq_fill u.fill
348 #define tunq_coalesce u.coalesce
350 struct filter_info *filters;
353 struct task ext_intr_task;
354 struct task slow_intr_task;
355 struct task tick_task;
356 struct taskqueue *tq;
357 struct callout cxgb_tick_ch;
358 struct callout sge_timer_ch;
360 /* Register lock for use by the hardware layer */
361 struct mtx mdio_lock;
362 struct mtx elmer_lock;
364 /* Bookkeeping for the hardware layer */
365 struct adapter_params params;
366 unsigned int slow_intr_mask;
367 unsigned long irq_stats[IRQ_NUM_STATS];
375 struct port_info port[MAX_NPORTS];
376 device_t portdev[MAX_NPORTS];
379 char port_types[MAX_NPORTS + 1];
380 uint32_t open_device_map;
381 uint32_t registered_device_map;
383 driver_intr_t *cxgb_intr;
386 #define ADAPTER_LOCK_NAME_LEN 32
387 char lockbuf[ADAPTER_LOCK_NAME_LEN];
388 char reglockbuf[ADAPTER_LOCK_NAME_LEN];
389 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
390 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
396 struct port_info *port;
399 #define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
400 #define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
401 #define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
402 #define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
405 #define PORT_LOCK(port) mtx_lock(&(port)->lock);
406 #define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
407 #define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
408 #define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
409 #define PORT_LOCK_ASSERT_NOTOWNED(port) mtx_assert(&(port)->lock, MA_NOTOWNED)
410 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
412 #define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
413 #define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
414 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
415 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
416 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
417 #define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED)
420 static __inline uint32_t
421 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
423 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
427 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
429 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
433 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
435 *val = pci_read_config(adapter->dev, reg, 4);
439 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
441 pci_write_config(adapter->dev, reg, val, 4);
445 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
447 *val = pci_read_config(adapter->dev, reg, 2);
451 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
453 pci_write_config(adapter->dev, reg, val, 2);
456 static __inline uint8_t *
457 t3_get_next_mcaddr(struct t3_rx_mode *rm)
459 uint8_t *macaddr = NULL;
460 struct ifnet *ifp = rm->port->ifp;
461 struct ifmultiaddr *ifma;
465 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
466 if (ifma->ifma_addr->sa_family != AF_LINK)
469 macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
474 if_maddr_runlock(ifp);
481 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
487 static __inline struct port_info *
488 adap2pinfo(struct adapter *adap, int idx)
490 return &adap->port[idx];
493 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
494 int t3_os_pci_save_state(struct adapter *adapter);
495 int t3_os_pci_restore_state(struct adapter *adapter);
496 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
497 int speed, int duplex, int fc, int mac_was_reset);
498 void t3_os_phymod_changed(struct adapter *adap, int port_id);
499 void t3_sge_err_intr_handler(adapter_t *adapter);
500 int t3_offload_tx(struct t3cdev *, struct mbuf *);
501 void t3_os_ext_intr_handler(adapter_t *adapter);
502 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
503 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
506 int t3_sge_alloc(struct adapter *);
507 int t3_sge_free(struct adapter *);
508 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
509 int, struct port_info *);
510 void t3_free_sge_resources(adapter_t *);
511 void t3_sge_start(adapter_t *);
512 void t3_sge_stop(adapter_t *);
513 void t3b_intr(void *data);
514 void t3_intr_msi(void *data);
515 void t3_intr_msix(void *data);
517 int t3_sge_init_adapter(adapter_t *);
518 int t3_sge_reset_adapter(adapter_t *);
519 int t3_sge_init_port(struct port_info *);
520 void t3_free_tx_desc(struct sge_qset *qs, int n, int qid);
522 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
524 void t3_add_attach_sysctls(adapter_t *sc);
525 void t3_add_configured_sysctls(adapter_t *sc);
526 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
527 unsigned char *data);
528 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
530 #define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \
531 (hz * (a)->params.linkpoll_period) / 10 : \
532 (a)->params.stats_update_period * hz)
535 * XXX figure out how we can return this to being private to sge
537 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
539 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
541 static __inline struct sge_qset *
542 fl_to_qset(struct sge_fl *q, int qidx)
544 return container_of(q, struct sge_qset, fl[qidx]);
547 static __inline struct sge_qset *
548 rspq_to_qset(struct sge_rspq *q)
550 return container_of(q, struct sge_qset, rspq);
553 static __inline struct sge_qset *
554 txq_to_qset(struct sge_txq *q, int qidx)
556 return container_of(q, struct sge_qset, txq[qidx]);
559 static __inline struct adapter *
560 tdev2adap(struct t3cdev *d)
562 return container_of(d, struct adapter, tdev);
567 #define OFFLOAD_DEVMAP_BIT 15
568 static inline int offload_running(adapter_t *adapter)
570 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
573 void cxgb_tx_watchdog(void *arg);
574 int cxgb_transmit(struct ifnet *ifp, struct mbuf *m);
575 void cxgb_qflush(struct ifnet *ifp);
576 void cxgb_start(struct ifnet *ifp);