2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
35 * Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
51 typedef struct qlnx_ivec qlnx_ivec_t;
53 //#define QLNX_MAX_RSS 30
54 #define QLNX_MAX_RSS 16
62 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
68 #define BITS_PER_BYTE 8
69 #endif /* #ifndef BITS_PER_BYTE */
73 * RX ring buffer contains pointer to kmalloc() data only,
82 QLNX_AGG_STATE_NONE = 0,
83 QLNX_AGG_STATE_START = 1,
84 QLNX_AGG_STATE_ERROR = 2
87 struct qlnx_agg_info {
88 /* rx_buf is a data buffer that can be placed /consumed from rx bd
89 * chain. It has two purposes: We will preallocate the data buffer
90 * for each aggregation when we open the interface and will place this
91 * buffer on the rx-bd-ring when we receive TPA_START. We don't want
92 * to be in a state where allocation fails, as we can't reuse the
93 * consumer buffer in the rx-chain since FW may still be writing to it
94 * (since header needs to be modified for TPA.
95 * The second purpose is to keep a pointer to the bd buffer during
98 struct sw_rx_data rx_buf;
99 enum qlnx_agg_state agg_state;
100 uint16_t placement_offset;
101 struct mbuf *mpf; /* first mbuf in chain */
102 struct mbuf *mpl; /* last mbuf in chain */
105 #define RX_RING_SIZE_POW 13
106 #define RX_RING_SIZE (1 << RX_RING_SIZE_POW)
108 #define TX_RING_SIZE_POW 14
109 #define TX_RING_SIZE (1 << TX_RING_SIZE_POW)
111 struct qlnx_rx_queue {
112 volatile __le16 *hw_cons_ptr;
113 struct sw_rx_data sw_rx_ring[RX_RING_SIZE];
116 struct ecore_chain rx_bd_ring;
117 struct ecore_chain rx_comp_ring;
118 void __iomem *hw_rxq_prod_addr;
122 struct qlnx_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
124 uint32_t rx_buf_size;
126 uint16_t num_rx_buffers;
137 struct eth_db_data data;
147 /* Set on the first BD descriptor when there is a split BD */
148 #define QLNX_TSO_SPLIT_BD (1<<0)
151 #define QLNX_MAX_SEGMENTS 255
152 struct qlnx_tx_queue {
154 int index; /* Queue index */
155 volatile __le16 *hw_cons_ptr;
156 struct sw_tx_bd sw_tx_ring[TX_RING_SIZE];
159 struct ecore_chain tx_pbl;
160 void __iomem *doorbell_addr;
164 bus_dma_segment_t segs[QLNX_MAX_SEGMENTS];
166 uint16_t num_tx_buffers;
169 #define BD_UNMAP_ADDR(bd) HILO_U64(le32toh((bd)->addr.hi), \
170 le32toh((bd)->addr.lo))
171 #define BD_UNMAP_LEN(bd) (le16toh((bd)->nbytes))
173 #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
175 (bd)->addr.hi = htole32(U64_HI(maddr)); \
176 (bd)->addr.lo = htole32(U64_LO(maddr)); \
177 (bd)->nbytes = htole16(len); \
181 #define QLNX_FP_MAX_SEGS 24
183 struct qlnx_fastpath {
186 struct ecore_sb_info *sb_info;
187 struct qlnx_rx_queue *rxq;
188 struct qlnx_tx_queue *txq[MAX_NUM_TC];
192 char tx_mtx_name[32];
193 struct buf_ring *tx_br;
194 uint32_t tx_ring_full;
197 struct taskqueue *fp_taskqueue;
199 /* transmit statistics */
200 uint64_t tx_pkts_processed;
201 uint64_t tx_pkts_freed;
202 uint64_t tx_pkts_transmitted;
203 uint64_t tx_pkts_completed;
204 uint64_t tx_lso_wnd_min_len;
206 uint64_t tx_nsegs_gt_elem_left;
207 uint32_t tx_tso_max_nsegs;
208 uint32_t tx_tso_min_nsegs;
209 uint32_t tx_tso_max_pkt_len;
210 uint32_t tx_tso_min_pkt_len;
211 uint64_t tx_pkts[QLNX_FP_MAX_SEGS];
212 uint64_t err_tx_nsegs_gt_elem_left;
213 uint64_t err_tx_dmamap_create;
214 uint64_t err_tx_defrag_dmamap_load;
215 uint64_t err_tx_non_tso_max_seg;
216 uint64_t err_tx_dmamap_load;
217 uint64_t err_tx_defrag;
218 uint64_t err_tx_free_pkt_null;
219 uint64_t err_tx_cons_idx_conflict;
222 uint64_t lro_cnt_128;
223 uint64_t lro_cnt_256;
224 uint64_t lro_cnt_512;
225 uint64_t lro_cnt_1024;
227 /* receive statistics */
232 uint64_t err_m_getcl;
233 uint64_t err_m_getjcl;
234 uint64_t err_rx_hw_errors;
235 uint64_t err_rx_alloc_errors;
236 uint64_t err_rx_jumbo_chain_pkts;
237 uint64_t err_rx_mp_null;
238 uint64_t err_rx_tpa_invalid_agg_num;
241 struct qlnx_update_vport_params {
243 uint8_t update_vport_active_rx_flg;
244 uint8_t vport_active_rx_flg;
245 uint8_t update_vport_active_tx_flg;
246 uint8_t vport_active_tx_flg;
247 uint8_t update_inner_vlan_removal_flg;
248 uint8_t inner_vlan_removal_flg;
249 struct ecore_rss_params *rss_params;
250 struct ecore_sge_tpa_params *sge_tpa_params;
256 struct qlnx_link_output {
258 uint32_t supported_caps;
259 uint32_t advertised_caps;
260 uint32_t link_partner_caps;
261 uint32_t speed; /* In Mb/s */
266 typedef struct qlnx_link_output qlnx_link_output_t;
268 #define QLNX_LINK_DUPLEX 0x0001
270 #define QLNX_LINK_CAP_FIBRE 0x0001
271 #define QLNX_LINK_CAP_Autoneg 0x0002
272 #define QLNX_LINK_CAP_Pause 0x0004
273 #define QLNX_LINK_CAP_Asym_Pause 0x0008
274 #define QLNX_LINK_CAP_1000baseT_Half 0x0010
275 #define QLNX_LINK_CAP_1000baseT_Full 0x0020
276 #define QLNX_LINK_CAP_10000baseKR_Full 0x0040
277 #define QLNX_LINK_CAP_25000baseKR_Full 0x0080
278 #define QLNX_LINK_CAP_40000baseLR4_Full 0x0100
279 #define QLNX_LINK_CAP_50000baseKR2_Full 0x0200
280 #define QLNX_LINK_CAP_100000baseKR4_Full 0x0400
283 /* Functions definition */
286 #define XMIT_L4_CSUM (1 << 0)
287 #define XMIT_LSO (1 << 1)
289 #define CQE_FLAGS_ERR (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << \
290 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT | \
291 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << \
292 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT | \
293 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \
294 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \
295 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \
296 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT)
298 #define RX_COPY_THRESH 92
299 #define ETH_MAX_PACKET_SIZE 1500
301 #define QLNX_MFW_VERSION_LENGTH 32
302 #define QLNX_STORMFW_VERSION_LENGTH 32
304 #define QLNX_TX_ELEM_RESERVE 2
306 #define QLNX_TPA_MAX_AGG_BUFFERS (20)
308 #define QLNX_MAX_NUM_MULTICAST_ADDRS ECORE_MAX_MC_ADDRS
309 typedef struct _qlnx_mcast {
312 } __packed qlnx_mcast_t;
315 * Adapter structure contains the hardware independent information of the
320 /* interface to ecore */
322 struct ecore_dev cdev;
335 /* interface to o.s */
343 volatile int link_up;
344 struct ifmedia media;
345 uint16_t max_frame_size;
347 struct cdev *ioctl_dev;
350 struct resource *pci_reg;
353 struct resource *pci_dbells;
355 uint64_t dbells_phys_addr;
356 uint32_t dbells_size;
358 struct resource *msix_bar;
368 uint32_t dbg_trace_lro_cnt;
369 uint32_t dbg_trace_tso_pkt_len;
374 uint8_t mfw_ver[QLNX_MFW_VERSION_LENGTH];
375 uint8_t stormfw_ver[QLNX_STORMFW_VERSION_LENGTH];
380 bus_dma_tag_t parent_tag;
381 bus_dma_tag_t tx_tag;
382 bus_dma_tag_t rx_tag;
385 struct ecore_sb_info sb_array[QLNX_MAX_RSS];
386 struct qlnx_rx_queue rxq_array[QLNX_MAX_RSS];
387 struct qlnx_tx_queue txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
388 struct qlnx_fastpath fp_array[QLNX_MAX_RSS];
391 struct callout tx_callout;
395 uint32_t rx_pkt_threshold;
396 uint32_t rx_jumbo_buf_eq_mtu;
398 /* slow path related */
399 struct resource *sp_irq[MAX_HWFNS_PER_DEVICE];
400 void *sp_handle[MAX_HWFNS_PER_DEVICE];
401 int sp_irq_rid[MAX_HWFNS_PER_DEVICE];
402 struct task sp_task[MAX_HWFNS_PER_DEVICE];
403 struct taskqueue *sp_taskqueue[MAX_HWFNS_PER_DEVICE];
405 struct callout qlnx_callout;
407 /* fast path related */
411 #define QLNX_MAX_TSS_CNT(ha) ((ha->num_rss) * (ha->num_tc))
413 qlnx_ivec_t irq_vec[QLNX_MAX_RSS];
418 qlnx_mcast_t mcast[QLNX_MAX_NUM_MULTICAST_ADDRS];
419 struct ecore_filter_mcast ecore_mcast;
420 uint8_t primary_mac[ETH_ALEN];
421 uint8_t prio_to_tc[MAX_NUM_PRI];
422 struct ecore_eth_stats hw_stats;
423 struct ecore_rss_params rss_params;
424 uint32_t rx_buf_size;
425 bool rx_csum_offload;
427 uint32_t rx_coalesce_usecs;
428 uint32_t tx_coalesce_usecs;
431 qlnx_link_output_t if_link;
433 /* global counters */
434 uint64_t sp_interrupts;
435 uint64_t err_illegal_intr;
436 uint64_t err_fp_null;
437 uint64_t err_get_proto_invalid_type;
439 /* grcdump related */
441 uint32_t grcdump_taken;
442 uint32_t grcdump_dwords[QLNX_MAX_HW_FUNCS];
443 uint32_t grcdump_size[QLNX_MAX_HW_FUNCS];
444 void *grcdump[QLNX_MAX_HW_FUNCS];
446 uint32_t idle_chk_taken;
447 uint32_t idle_chk_dwords[QLNX_MAX_HW_FUNCS];
448 uint32_t idle_chk_size[QLNX_MAX_HW_FUNCS];
449 void *idle_chk[QLNX_MAX_HW_FUNCS];
451 /* storm stats related */
452 #define QLNX_STORM_STATS_TOTAL \
453 (QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN)
454 qlnx_storm_stats_t storm_stats[QLNX_STORM_STATS_TOTAL];
455 uint32_t storm_stats_index;
456 uint32_t storm_stats_enable;
458 uint32_t personality;
461 typedef struct qlnx_host qlnx_host_t;
463 /* note that align has to be a power of 2 */
464 #define QL_ALIGN(size, align) (size + (align - 1)) & ~(align - 1);
465 #define QL_MIN(x, y) ((x < y) ? x : y)
467 #define QL_RUNNING(ifp) \
468 ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
471 #define QLNX_MAX_MTU 9000
472 #define QLNX_MAX_SEGMENTS_NON_TSO (ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)
473 #define QLNX_MAX_TSO_FRAME_SIZE ((64 * 1024 - 1) + 22)
475 #define QL_MAC_CMP(mac1, mac2) \
476 ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
477 (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
478 #define for_each_rss(i) for (i = 0; i < ha->num_rss; i++)
486 #define QL_DPRINT1(ha, x, ...) \
488 if ((ha)->dbg_level & 0x0001) { \
489 device_printf ((ha)->pci_dev, \
491 __func__, __LINE__, \
496 #define QL_DPRINT2(ha, x, ...) \
498 if ((ha)->dbg_level & 0x0002) { \
499 device_printf ((ha)->pci_dev, \
501 __func__, __LINE__, \
506 #define QL_DPRINT3(ha, x, ...) \
508 if ((ha)->dbg_level & 0x0004) { \
509 device_printf ((ha)->pci_dev, \
511 __func__, __LINE__, \
516 #define QL_DPRINT4(ha, x, ...) \
518 if ((ha)->dbg_level & 0x0008) { \
519 device_printf ((ha)->pci_dev, \
521 __func__, __LINE__, \
526 #define QL_DPRINT5(ha, x, ...) \
528 if ((ha)->dbg_level & 0x0010) { \
529 device_printf ((ha)->pci_dev, \
531 __func__, __LINE__, \
536 #define QL_DPRINT6(ha, x, ...) \
538 if ((ha)->dbg_level & 0x0020) { \
539 device_printf ((ha)->pci_dev, \
541 __func__, __LINE__, \
546 #define QL_DPRINT7(ha, x, ...) \
548 if ((ha)->dbg_level & 0x0040) { \
549 device_printf ((ha)->pci_dev, \
551 __func__, __LINE__, \
556 #define QL_DPRINT8(ha, x, ...) \
558 if ((ha)->dbg_level & 0x0080) { \
559 device_printf ((ha)->pci_dev, \
561 __func__, __LINE__, \
566 #define QL_DPRINT9(ha, x, ...) \
568 if ((ha)->dbg_level & 0x0100) { \
569 device_printf ((ha)->pci_dev, \
571 __func__, __LINE__, \
576 #define QL_DPRINT11(ha, x, ...) \
578 if ((ha)->dbg_level & 0x0400) { \
579 device_printf ((ha)->pci_dev, \
581 __func__, __LINE__, \
586 #define QL_DPRINT12(ha, x, ...) \
588 if ((ha)->dbg_level & 0x0800) { \
589 device_printf ((ha)->pci_dev, \
591 __func__, __LINE__, \
596 #define QL_DPRINT13(ha, x, ...) \
598 if ((ha)->dbg_level & 0x1000) { \
599 device_printf ((ha)->pci_dev, \
601 __func__, __LINE__, \
609 #define QL_DPRINT1(ha, x, ...)
610 #define QL_DPRINT2(ha, x, ...)
611 #define QL_DPRINT3(ha, x, ...)
612 #define QL_DPRINT4(ha, x, ...)
613 #define QL_DPRINT5(ha, x, ...)
614 #define QL_DPRINT6(ha, x, ...)
615 #define QL_DPRINT7(ha, x, ...)
616 #define QL_DPRINT8(ha, x, ...)
617 #define QL_DPRINT9(ha, x, ...)
618 #define QL_DPRINT11(ha, x, ...)
619 #define QL_DPRINT12(ha, x, ...)
620 #define QL_DPRINT13(ha, x, ...)
622 #endif /* #ifdef QLNX_DEBUG */
624 #define QL_ASSERT(ha, x, y) if (!x) panic y
626 #define QL_ERR_INJECT(ha, val) (ha->err_inject == val)
627 #define QL_RESET_ERR_INJECT(ha, val) {if (ha->err_inject == val) ha->err_inject = 0;}
628 #define QL_ERR_INJCT_TX_INT_DIFF 0x0001
629 #define QL_ERR_INJCT_TX_INT_MBUF_NULL 0x0002
635 extern int qlnx_make_cdev(qlnx_host_t *ha);
636 extern void qlnx_del_cdev(qlnx_host_t *ha);
637 extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
639 extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
641 extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
642 extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
643 struct qlnx_link_output *if_link);
646 * Some OS specific stuff
649 #if (defined IFM_100G_SR4)
650 #define QLNX_IFM_100G_SR4 IFM_100G_SR4
651 #define QLNX_IFM_100G_LR4 IFM_100G_LR4
652 #define QLNX_IFM_100G_CR4 IFM_100G_CR4
654 #define QLNX_IFM_100G_SR4 IFM_UNKNOWN
655 #define QLNX_IFM_100G_LR4 IFM_UNKNOWN
658 #if (defined IFM_25G_SR)
659 #define QLNX_IFM_25G_SR IFM_25G_SR
660 #define QLNX_IFM_25G_CR IFM_25G_CR
662 #define QLNX_IFM_25G_SR IFM_UNKNOWN
663 #define QLNX_IFM_25G_CR IFM_UNKNOWN
667 #if __FreeBSD_version < 1100000
669 #define QLNX_INC_IERRORS(ifp) ifp->if_ierrors++
670 #define QLNX_INC_IQDROPS(ifp) ifp->if_iqdrops++
671 #define QLNX_INC_IPACKETS(ifp) ifp->if_ipackets++
672 #define QLNX_INC_OPACKETS(ifp) ifp->if_opackets++
673 #define QLNX_INC_OBYTES(ifp, len) ifp->if_obytes += len
674 #define QLNX_INC_IBYTES(ifp, len) ifp->if_ibytes += len
678 #define QLNX_INC_IERRORS(ifp) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
679 #define QLNX_INC_IQDROPS(ifp) if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
680 #define QLNX_INC_IPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1)
681 #define QLNX_INC_OPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1)
683 #define QLNX_INC_OBYTES(ifp, len) \
684 if_inc_counter(ifp, IFCOUNTER_OBYTES, len)
685 #define QLNX_INC_IBYTES(ifp, len) \
686 if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len)
688 #endif /* #if __FreeBSD_version < 1100000 */
690 #define CQE_L3_PACKET(flags) \
691 ((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv4) || \
692 (((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv6))
694 #define CQE_IP_HDR_ERR(flags) \
695 ((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \
696 << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT))
698 #define CQE_L4_HAS_CSUM(flags) \
699 ((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \
700 << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT))
702 #define CQE_HAS_VLAN(flags) \
703 ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
704 << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
707 #endif /* #ifndef _QLNX_DEF_H_ */