1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
31 ***************************************************************************/
35 #ifndef _CXGB_ADAPTER_H_
36 #define _CXGB_ADAPTER_H_
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
42 #include <sys/mutex.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
49 #include <net/ethernet.h>
51 #include <net/if_media.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <sys/bus_dma.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
60 #include <cxgb_osdep.h>
61 #include <ulp/toecore/toedev.h>
62 #include <sys/mbufq.h>
64 #include <dev/cxgb/cxgb_osdep.h>
65 #include <dev/cxgb/sys/mbufq.h>
66 #include <dev/cxgb/ulp/toecore/toedev.h>
73 extern int cxgb_debug;
76 #define MTX_INIT(lock, lockname, class, flags) \
78 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
79 mtx_init((lock), lockname, class, flags); \
82 #define MTX_DESTROY(lock) \
84 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
85 mtx_destroy((lock)); \
88 #define SX_INIT(lock, lockname) \
90 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
91 sx_init((lock), lockname); \
94 #define SX_DESTROY(lock) \
96 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
100 #define MTX_INIT mtx_init
101 #define MTX_DESTROY mtx_destroy
102 #define SX_INIT sx_init
103 #define SX_DESTROY sx_destroy
107 struct adapter *adapter;
110 const struct port_type_info *port_type;
113 struct link_config link_config;
114 struct ifmedia media;
121 uint8_t hw_addr[ETHER_ADDR_LEN];
124 struct taskqueue *tq;
125 struct task start_task;
126 struct task timer_reclaim_task;
127 struct cdev *port_cdev;
129 #define PORT_NAME_LEN 32
130 #define TASKQ_NAME_LEN 32
131 char lockbuf[PORT_NAME_LEN];
132 char taskqbuf[TASKQ_NAME_LEN];
135 enum { /* adapter flags */
136 FULL_INIT_DONE = (1 << 0),
137 USING_MSI = (1 << 1),
138 USING_MSIX = (1 << 2),
139 QUEUES_BOUND = (1 << 3),
140 FW_UPTODATE = (1 << 4),
141 TPS_UPTODATE = (1 << 5),
145 #define FL_Q_SIZE 4096
146 #define JUMBO_Q_SIZE 512
147 #define RSPQ_Q_SIZE 1024
148 #define TX_ETH_Q_SIZE 1024
153 * Types of Tx queues in each queue set. Order here matters, do not change.
154 * XXX TOE is not implemented yet, so the extra queues are just placeholders.
156 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
159 /* careful, the following are set on priv_flags and must not collide with
163 LRO_ACTIVE = (1 << 8),
166 /* Max concurrent LRO sessions per queue set */
167 #define MAX_LRO_SES 8
169 struct t3_lro_session {
179 unsigned short enabled;
180 unsigned short active_idx;
181 unsigned int nactive;
182 struct t3_lro_session sess[MAX_LRO_SES];
185 #define RX_BUNDLE_SIZE 8
195 uint32_t holdoff_tmr;
196 uint32_t next_holdoff;
198 struct rsp_desc *desc;
201 struct mbuf *rx_head; /* offload packet receive queue head */
202 struct mbuf *rx_tail; /* offload packet receive queue tail */
204 uint32_t offload_pkts;
205 uint32_t offload_bundles;
207 uint32_t unhandled_irqs;
209 bus_addr_t phys_addr;
210 bus_dma_tag_t desc_tag;
211 bus_dmamap_t desc_map;
213 #define RSPQ_NAME_LEN 32
214 char lockbuf[RSPQ_NAME_LEN];
228 struct rx_desc *desc;
229 struct rx_sw_desc *sdesc;
230 bus_addr_t phys_addr;
233 bus_dma_tag_t desc_tag;
234 bus_dmamap_t desc_map;
235 bus_dma_tag_t entry_tag;
243 #define TXQ_TRANSMITTING 0x1
256 struct tx_desc *desc;
257 struct tx_sw_desc *sdesc;
259 bus_addr_t phys_addr;
260 struct task qresume_task;
261 struct task qreclaim_task;
262 struct port_info *port;
266 bus_dma_tag_t desc_tag;
267 bus_dmamap_t desc_map;
268 bus_dma_tag_t entry_tag;
269 struct mbuf_head sendq;
271 #define TXQ_NAME_LEN 32
272 char lockbuf[TXQ_NAME_LEN];
277 SGE_PSTAT_TSO, /* # of TSO requests */
278 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
279 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
280 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
281 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
282 SGE_PSTATS_LRO_QUEUED, /* # of LRO appended packets */
283 SGE_PSTATS_LRO_FLUSHED, /* # of LRO flushed packets */
284 SGE_PSTATS_LRO_X_STREAMS, /* # of exceeded LRO contexts */
287 #define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
290 struct sge_rspq rspq;
291 struct sge_fl fl[SGE_RXQ_PER_SET];
292 struct lro_state lro;
293 struct sge_txq txq[SGE_TXQ_PER_SET];
294 uint32_t txq_stopped; /* which Tx queues are stopped */
295 uint64_t port_stats[SGE_PSTAT_MAX];
296 struct port_info *port;
297 int idx; /* qset # */
301 struct sge_qset qs[SGE_QSETS];
310 TAILQ_ENTRY(adapter) adapter_entry;
312 /* PCI register resources */
314 struct resource *regs_res;
315 bus_space_handle_t bh;
321 bus_dma_tag_t parent_dmat;
322 bus_dma_tag_t rx_dmat;
323 bus_dma_tag_t rx_jumbo_dmat;
324 bus_dma_tag_t tx_dmat;
326 /* Interrupt resources */
327 struct resource *irq_res;
331 uint32_t msix_regs_rid;
332 struct resource *msix_regs_res;
334 struct resource *msix_irq_res[SGE_QSETS];
335 int msix_irq_rid[SGE_QSETS];
336 void *msix_intr_tag[SGE_QSETS];
337 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
338 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
340 struct filter_info *filters;
343 struct task ext_intr_task;
344 struct task slow_intr_task;
345 struct task tick_task;
346 struct task process_responses_task;
347 struct taskqueue *tq;
348 struct callout cxgb_tick_ch;
349 struct callout sge_timer_ch;
351 /* Register lock for use by the hardware layer */
352 struct mtx mdio_lock;
353 struct mtx elmer_lock;
355 /* Bookkeeping for the hardware layer */
356 struct adapter_params params;
357 unsigned int slow_intr_mask;
358 unsigned long irq_stats[IRQ_NUM_STATS];
366 struct port_info port[MAX_NPORTS];
367 device_t portdev[MAX_NPORTS];
370 uint32_t open_device_map;
371 uint32_t registered_device_map;
377 driver_intr_t *cxgb_intr;
380 #define ADAPTER_LOCK_NAME_LEN 32
381 char lockbuf[ADAPTER_LOCK_NAME_LEN];
382 char reglockbuf[ADAPTER_LOCK_NAME_LEN];
383 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
384 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
390 struct port_info *port;
394 #define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
395 #define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
396 #define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
397 #define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
401 #define PORT_LOCK(port) sx_xlock(&(port)->lock);
402 #define PORT_UNLOCK(port) sx_xunlock(&(port)->lock);
403 #define PORT_LOCK_INIT(port, name) SX_INIT(&(port)->lock, name)
404 #define PORT_LOCK_DEINIT(port) SX_DESTROY(&(port)->lock)
405 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
407 #define ADAPTER_LOCK(adap) sx_xlock(&(adap)->lock);
408 #define ADAPTER_UNLOCK(adap) sx_xunlock(&(adap)->lock);
409 #define ADAPTER_LOCK_INIT(adap, name) SX_INIT(&(adap)->lock, name)
410 #define ADAPTER_LOCK_DEINIT(adap) SX_DESTROY(&(adap)->lock)
411 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
413 #define PORT_LOCK(port) mtx_lock(&(port)->lock);
414 #define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
415 #define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
416 #define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
417 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
419 #define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
420 #define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
421 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
422 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
423 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED)
427 static __inline uint32_t
428 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
430 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
434 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
436 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
440 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
442 *val = pci_read_config(adapter->dev, reg, 4);
446 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
448 pci_write_config(adapter->dev, reg, val, 4);
452 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
454 *val = pci_read_config(adapter->dev, reg, 2);
458 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
460 pci_write_config(adapter->dev, reg, val, 2);
463 static __inline uint8_t *
464 t3_get_next_mcaddr(struct t3_rx_mode *rm)
466 uint8_t *macaddr = NULL;
469 macaddr = rm->port->hw_addr;
476 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
482 static __inline struct port_info *
483 adap2pinfo(struct adapter *adap, int idx)
485 return &adap->port[idx];
488 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
489 int t3_os_pci_save_state(struct adapter *adapter);
490 int t3_os_pci_restore_state(struct adapter *adapter);
491 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
492 int speed, int duplex, int fc);
493 void t3_sge_err_intr_handler(adapter_t *adapter);
494 int t3_offload_tx(struct toedev *, struct mbuf *);
495 void t3_os_ext_intr_handler(adapter_t *adapter);
496 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
497 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
500 int t3_sge_alloc(struct adapter *);
501 int t3_sge_free(struct adapter *);
502 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
503 int, struct port_info *);
504 void t3_free_sge_resources(adapter_t *);
505 void t3_sge_start(adapter_t *);
506 void t3_sge_stop(adapter_t *);
507 void t3b_intr(void *data);
508 void t3_intr_msi(void *data);
509 void t3_intr_msix(void *data);
510 int t3_encap(struct port_info *, struct mbuf **);
512 int t3_sge_init_adapter(adapter_t *);
513 int t3_sge_init_port(struct port_info *);
514 void t3_sge_deinit_sw(adapter_t *);
516 void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
517 int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
518 void t3_rx_eth(struct port_info *p, struct sge_rspq *rq, struct mbuf *m, int ethpad);
519 void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state);
521 void t3_add_sysctls(adapter_t *sc);
522 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
523 unsigned char *data);
524 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
526 * XXX figure out how we can return this to being private to sge
528 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
530 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
532 static __inline struct sge_qset *
533 fl_to_qset(struct sge_fl *q, int qidx)
535 return container_of(q, struct sge_qset, fl[qidx]);
538 static __inline struct sge_qset *
539 rspq_to_qset(struct sge_rspq *q)
541 return container_of(q, struct sge_qset, rspq);
544 static __inline struct sge_qset *
545 txq_to_qset(struct sge_txq *q, int qidx)
547 return container_of(q, struct sge_qset, txq[qidx]);
550 static __inline struct adapter *
551 tdev2adap(struct toedev *d)
553 return container_of(d, struct adapter, tdev);
558 #define OFFLOAD_DEVMAP_BIT 15
559 static inline int offload_running(adapter_t *adapter)
561 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);