2 * Copyright (c) 2017-2018 Cavium, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
35 * Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
51 typedef struct qlnx_ivec qlnx_ivec_t;
53 //#define QLNX_MAX_RSS 30
54 #define QLNX_MAX_RSS 36
55 #define QLNX_DEFAULT_RSS 16
63 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
69 #define BITS_PER_BYTE 8
70 #endif /* #ifndef BITS_PER_BYTE */
74 * RX ring buffer contains pointer to kmalloc() data only,
83 QLNX_AGG_STATE_NONE = 0,
84 QLNX_AGG_STATE_START = 1,
85 QLNX_AGG_STATE_ERROR = 2
88 struct qlnx_agg_info {
89 /* rx_buf is a data buffer that can be placed /consumed from rx bd
90 * chain. It has two purposes: We will preallocate the data buffer
91 * for each aggregation when we open the interface and will place this
92 * buffer on the rx-bd-ring when we receive TPA_START. We don't want
93 * to be in a state where allocation fails, as we can't reuse the
94 * consumer buffer in the rx-chain since FW may still be writing to it
95 * (since header needs to be modified for TPA.
96 * The second purpose is to keep a pointer to the bd buffer during
99 struct sw_rx_data rx_buf;
100 enum qlnx_agg_state agg_state;
101 uint16_t placement_offset;
102 struct mbuf *mpf; /* first mbuf in chain */
103 struct mbuf *mpl; /* last mbuf in chain */
106 #define RX_RING_SIZE_POW 13
107 #define RX_RING_SIZE (1 << RX_RING_SIZE_POW)
109 #define TX_RING_SIZE_POW 14
110 #define TX_RING_SIZE (1 << TX_RING_SIZE_POW)
112 struct qlnx_rx_queue {
113 volatile __le16 *hw_cons_ptr;
114 struct sw_rx_data sw_rx_ring[RX_RING_SIZE];
117 struct ecore_chain rx_bd_ring;
118 struct ecore_chain rx_comp_ring;
119 void __iomem *hw_rxq_prod_addr;
123 struct qlnx_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
125 uint32_t rx_buf_size;
127 uint16_t num_rx_buffers;
138 struct eth_db_data data;
148 /* Set on the first BD descriptor when there is a split BD */
149 #define QLNX_TSO_SPLIT_BD (1<<0)
152 #define QLNX_MAX_SEGMENTS 255
153 struct qlnx_tx_queue {
155 int index; /* Queue index */
156 volatile __le16 *hw_cons_ptr;
157 struct sw_tx_bd sw_tx_ring[TX_RING_SIZE];
160 struct ecore_chain tx_pbl;
161 void __iomem *doorbell_addr;
165 bus_dma_segment_t segs[QLNX_MAX_SEGMENTS];
167 uint16_t num_tx_buffers;
170 #define BD_UNMAP_ADDR(bd) HILO_U64(le32toh((bd)->addr.hi), \
171 le32toh((bd)->addr.lo))
172 #define BD_UNMAP_LEN(bd) (le16toh((bd)->nbytes))
174 #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
176 (bd)->addr.hi = htole32(U64_HI(maddr)); \
177 (bd)->addr.lo = htole32(U64_LO(maddr)); \
178 (bd)->nbytes = htole16(len); \
182 #define QLNX_FP_MAX_SEGS 24
184 struct qlnx_fastpath {
187 struct ecore_sb_info *sb_info;
188 struct qlnx_rx_queue *rxq;
189 struct qlnx_tx_queue *txq[MAX_NUM_TC];
193 char tx_mtx_name[32];
194 struct buf_ring *tx_br;
195 uint32_t tx_ring_full;
198 struct taskqueue *fp_taskqueue;
200 /* transmit statistics */
201 uint64_t tx_pkts_processed;
202 uint64_t tx_pkts_freed;
203 uint64_t tx_pkts_transmitted;
204 uint64_t tx_pkts_completed;
205 uint64_t tx_tso_pkts;
206 uint64_t tx_non_tso_pkts;
208 #ifdef QLNX_TRACE_PERF_DATA
209 uint64_t tx_pkts_trans_ctx;
210 uint64_t tx_pkts_compl_ctx;
211 uint64_t tx_pkts_trans_fp;
212 uint64_t tx_pkts_compl_fp;
213 uint64_t tx_pkts_compl_intr;
216 uint64_t tx_lso_wnd_min_len;
218 uint64_t tx_nsegs_gt_elem_left;
219 uint32_t tx_tso_max_nsegs;
220 uint32_t tx_tso_min_nsegs;
221 uint32_t tx_tso_max_pkt_len;
222 uint32_t tx_tso_min_pkt_len;
223 uint64_t tx_pkts[QLNX_FP_MAX_SEGS];
225 #ifdef QLNX_TRACE_PERF_DATA
226 uint64_t tx_pkts_hist[QLNX_FP_MAX_SEGS];
227 uint64_t tx_comInt[QLNX_FP_MAX_SEGS];
228 uint64_t tx_pkts_q[QLNX_FP_MAX_SEGS];
231 uint64_t err_tx_nsegs_gt_elem_left;
232 uint64_t err_tx_dmamap_create;
233 uint64_t err_tx_defrag_dmamap_load;
234 uint64_t err_tx_non_tso_max_seg;
235 uint64_t err_tx_dmamap_load;
236 uint64_t err_tx_defrag;
237 uint64_t err_tx_free_pkt_null;
238 uint64_t err_tx_cons_idx_conflict;
241 uint64_t lro_cnt_128;
242 uint64_t lro_cnt_256;
243 uint64_t lro_cnt_512;
244 uint64_t lro_cnt_1024;
246 /* receive statistics */
251 uint64_t err_m_getcl;
252 uint64_t err_m_getjcl;
253 uint64_t err_rx_hw_errors;
254 uint64_t err_rx_alloc_errors;
255 uint64_t err_rx_jumbo_chain_pkts;
256 uint64_t err_rx_mp_null;
257 uint64_t err_rx_tpa_invalid_agg_num;
260 struct qlnx_update_vport_params {
262 uint8_t update_vport_active_rx_flg;
263 uint8_t vport_active_rx_flg;
264 uint8_t update_vport_active_tx_flg;
265 uint8_t vport_active_tx_flg;
266 uint8_t update_inner_vlan_removal_flg;
267 uint8_t inner_vlan_removal_flg;
268 struct ecore_rss_params *rss_params;
269 struct ecore_sge_tpa_params *sge_tpa_params;
275 struct qlnx_link_output {
277 uint32_t supported_caps;
278 uint32_t advertised_caps;
279 uint32_t link_partner_caps;
280 uint32_t speed; /* In Mb/s */
285 typedef struct qlnx_link_output qlnx_link_output_t;
287 #define QLNX_LINK_DUPLEX 0x0001
289 #define QLNX_LINK_CAP_FIBRE 0x0001
290 #define QLNX_LINK_CAP_Autoneg 0x0002
291 #define QLNX_LINK_CAP_Pause 0x0004
292 #define QLNX_LINK_CAP_Asym_Pause 0x0008
293 #define QLNX_LINK_CAP_1000baseT_Half 0x0010
294 #define QLNX_LINK_CAP_1000baseT_Full 0x0020
295 #define QLNX_LINK_CAP_10000baseKR_Full 0x0040
296 #define QLNX_LINK_CAP_25000baseKR_Full 0x0080
297 #define QLNX_LINK_CAP_40000baseLR4_Full 0x0100
298 #define QLNX_LINK_CAP_50000baseKR2_Full 0x0200
299 #define QLNX_LINK_CAP_100000baseKR4_Full 0x0400
302 /* Functions definition */
305 #define XMIT_L4_CSUM (1 << 0)
306 #define XMIT_LSO (1 << 1)
308 #define CQE_FLAGS_ERR (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << \
309 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT | \
310 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << \
311 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT | \
312 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \
313 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \
314 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \
315 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT)
317 #define RX_COPY_THRESH 92
318 #define ETH_MAX_PACKET_SIZE 1500
320 #define QLNX_MFW_VERSION_LENGTH 32
321 #define QLNX_STORMFW_VERSION_LENGTH 32
323 #define QLNX_TX_ELEM_RESERVE 2
324 #define QLNX_TX_ELEM_THRESH 128
325 #define QLNX_TX_ELEM_MAX_THRESH 512
326 #define QLNX_TX_ELEM_MIN_THRESH 32
327 #define QLNX_TX_COMPL_THRESH 32
330 #define QLNX_TPA_MAX_AGG_BUFFERS (20)
332 #define QLNX_MAX_NUM_MULTICAST_ADDRS ECORE_MAX_MC_ADDRS
333 typedef struct _qlnx_mcast {
336 } __packed qlnx_mcast_t;
339 * Adapter structure contains the hardware independent information of the
344 /* interface to ecore */
346 struct ecore_dev cdev;
359 /* interface to o.s */
367 volatile int link_up;
368 struct ifmedia media;
369 uint16_t max_frame_size;
371 struct cdev *ioctl_dev;
374 struct resource *pci_reg;
377 struct resource *pci_dbells;
379 uint64_t dbells_phys_addr;
380 uint32_t dbells_size;
382 struct resource *msix_bar;
392 uint32_t dbg_trace_lro_cnt;
393 uint32_t dbg_trace_tso_pkt_len;
398 uint8_t mfw_ver[QLNX_MFW_VERSION_LENGTH];
399 uint8_t stormfw_ver[QLNX_STORMFW_VERSION_LENGTH];
404 bus_dma_tag_t parent_tag;
405 bus_dma_tag_t tx_tag;
406 bus_dma_tag_t rx_tag;
409 struct ecore_sb_info sb_array[QLNX_MAX_RSS];
410 struct qlnx_rx_queue rxq_array[QLNX_MAX_RSS];
411 struct qlnx_tx_queue txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
412 struct qlnx_fastpath fp_array[QLNX_MAX_RSS];
415 struct callout tx_callout;
419 uint32_t rx_pkt_threshold;
420 uint32_t rx_jumbo_buf_eq_mtu;
422 /* slow path related */
423 struct resource *sp_irq[MAX_HWFNS_PER_DEVICE];
424 void *sp_handle[MAX_HWFNS_PER_DEVICE];
425 int sp_irq_rid[MAX_HWFNS_PER_DEVICE];
426 struct task sp_task[MAX_HWFNS_PER_DEVICE];
427 struct taskqueue *sp_taskqueue[MAX_HWFNS_PER_DEVICE];
429 struct callout qlnx_callout;
431 /* fast path related */
435 #define QLNX_MAX_TSS_CNT(ha) ((ha->num_rss) * (ha->num_tc))
437 qlnx_ivec_t irq_vec[QLNX_MAX_RSS];
442 qlnx_mcast_t mcast[QLNX_MAX_NUM_MULTICAST_ADDRS];
443 struct ecore_filter_mcast ecore_mcast;
444 uint8_t primary_mac[ETH_ALEN];
445 uint8_t prio_to_tc[MAX_NUM_PRI];
446 struct ecore_eth_stats hw_stats;
447 struct ecore_rss_params rss_params;
448 uint32_t rx_buf_size;
449 bool rx_csum_offload;
451 uint32_t rx_coalesce_usecs;
452 uint32_t tx_coalesce_usecs;
455 qlnx_link_output_t if_link;
457 /* global counters */
458 uint64_t sp_interrupts;
459 uint64_t err_illegal_intr;
460 uint64_t err_fp_null;
461 uint64_t err_get_proto_invalid_type;
463 /* grcdump related */
465 uint32_t grcdump_taken;
466 uint32_t grcdump_dwords[QLNX_MAX_HW_FUNCS];
467 uint32_t grcdump_size[QLNX_MAX_HW_FUNCS];
468 void *grcdump[QLNX_MAX_HW_FUNCS];
470 uint32_t idle_chk_taken;
471 uint32_t idle_chk_dwords[QLNX_MAX_HW_FUNCS];
472 uint32_t idle_chk_size[QLNX_MAX_HW_FUNCS];
473 void *idle_chk[QLNX_MAX_HW_FUNCS];
475 /* storm stats related */
476 #define QLNX_STORM_STATS_TOTAL \
477 (QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN)
478 qlnx_storm_stats_t storm_stats[QLNX_STORM_STATS_TOTAL];
479 uint32_t storm_stats_index;
480 uint32_t storm_stats_enable;
481 uint32_t storm_stats_gather;
483 uint32_t personality;
486 typedef struct qlnx_host qlnx_host_t;
488 /* note that align has to be a power of 2 */
489 #define QL_ALIGN(size, align) (((size) + ((align) - 1)) & (~((align) - 1)))
490 #define QL_MIN(x, y) ((x < y) ? x : y)
492 #define QL_RUNNING(ifp) \
493 ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
496 #define QLNX_MAX_MTU 9000
497 #define QLNX_MAX_SEGMENTS_NON_TSO (ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)
498 //#define QLNX_MAX_TSO_FRAME_SIZE ((64 * 1024 - 1) + 22)
499 #define QLNX_MAX_TSO_FRAME_SIZE 65536
500 #define QLNX_MAX_TX_MBUF_SIZE 65536 /* bytes - bd_len = 16bits */
503 #define QL_MAC_CMP(mac1, mac2) \
504 ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
505 (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
506 #define for_each_rss(i) for (i = 0; i < ha->num_rss; i++)
514 #define QL_DPRINT1(ha, x, ...) \
516 if ((ha)->dbg_level & 0x0001) { \
517 device_printf ((ha)->pci_dev, \
519 __func__, __LINE__, \
524 #define QL_DPRINT2(ha, x, ...) \
526 if ((ha)->dbg_level & 0x0002) { \
527 device_printf ((ha)->pci_dev, \
529 __func__, __LINE__, \
534 #define QL_DPRINT3(ha, x, ...) \
536 if ((ha)->dbg_level & 0x0004) { \
537 device_printf ((ha)->pci_dev, \
539 __func__, __LINE__, \
544 #define QL_DPRINT4(ha, x, ...) \
546 if ((ha)->dbg_level & 0x0008) { \
547 device_printf ((ha)->pci_dev, \
549 __func__, __LINE__, \
554 #define QL_DPRINT5(ha, x, ...) \
556 if ((ha)->dbg_level & 0x0010) { \
557 device_printf ((ha)->pci_dev, \
559 __func__, __LINE__, \
564 #define QL_DPRINT6(ha, x, ...) \
566 if ((ha)->dbg_level & 0x0020) { \
567 device_printf ((ha)->pci_dev, \
569 __func__, __LINE__, \
574 #define QL_DPRINT7(ha, x, ...) \
576 if ((ha)->dbg_level & 0x0040) { \
577 device_printf ((ha)->pci_dev, \
579 __func__, __LINE__, \
584 #define QL_DPRINT8(ha, x, ...) \
586 if ((ha)->dbg_level & 0x0080) { \
587 device_printf ((ha)->pci_dev, \
589 __func__, __LINE__, \
594 #define QL_DPRINT9(ha, x, ...) \
596 if ((ha)->dbg_level & 0x0100) { \
597 device_printf ((ha)->pci_dev, \
599 __func__, __LINE__, \
604 #define QL_DPRINT11(ha, x, ...) \
606 if ((ha)->dbg_level & 0x0400) { \
607 device_printf ((ha)->pci_dev, \
609 __func__, __LINE__, \
614 #define QL_DPRINT12(ha, x, ...) \
616 if ((ha)->dbg_level & 0x0800) { \
617 device_printf ((ha)->pci_dev, \
619 __func__, __LINE__, \
624 #define QL_DPRINT13(ha, x, ...) \
626 if ((ha)->dbg_level & 0x1000) { \
627 device_printf ((ha)->pci_dev, \
629 __func__, __LINE__, \
637 #define QL_DPRINT1(ha, x, ...)
638 #define QL_DPRINT2(ha, x, ...)
639 #define QL_DPRINT3(ha, x, ...)
640 #define QL_DPRINT4(ha, x, ...)
641 #define QL_DPRINT5(ha, x, ...)
642 #define QL_DPRINT6(ha, x, ...)
643 #define QL_DPRINT7(ha, x, ...)
644 #define QL_DPRINT8(ha, x, ...)
645 #define QL_DPRINT9(ha, x, ...)
646 #define QL_DPRINT11(ha, x, ...)
647 #define QL_DPRINT12(ha, x, ...)
648 #define QL_DPRINT13(ha, x, ...)
650 #endif /* #ifdef QLNX_DEBUG */
652 #define QL_ASSERT(ha, x, y) if (!x) panic y
654 #define QL_ERR_INJECT(ha, val) (ha->err_inject == val)
655 #define QL_RESET_ERR_INJECT(ha, val) {if (ha->err_inject == val) ha->err_inject = 0;}
656 #define QL_ERR_INJCT_TX_INT_DIFF 0x0001
657 #define QL_ERR_INJCT_TX_INT_MBUF_NULL 0x0002
663 extern int qlnx_make_cdev(qlnx_host_t *ha);
664 extern void qlnx_del_cdev(qlnx_host_t *ha);
665 extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
667 extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
669 extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
670 extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
671 struct qlnx_link_output *if_link);
674 * Some OS specific stuff
677 #if (defined IFM_100G_SR4)
678 #define QLNX_IFM_100G_SR4 IFM_100G_SR4
679 #define QLNX_IFM_100G_LR4 IFM_100G_LR4
680 #define QLNX_IFM_100G_CR4 IFM_100G_CR4
682 #define QLNX_IFM_100G_SR4 IFM_UNKNOWN
683 #define QLNX_IFM_100G_LR4 IFM_UNKNOWN
686 #if (defined IFM_25G_SR)
687 #define QLNX_IFM_25G_SR IFM_25G_SR
688 #define QLNX_IFM_25G_CR IFM_25G_CR
690 #define QLNX_IFM_25G_SR IFM_UNKNOWN
691 #define QLNX_IFM_25G_CR IFM_UNKNOWN
695 #if __FreeBSD_version < 1100000
697 #define QLNX_INC_IERRORS(ifp) ifp->if_ierrors++
698 #define QLNX_INC_IQDROPS(ifp) ifp->if_iqdrops++
699 #define QLNX_INC_IPACKETS(ifp) ifp->if_ipackets++
700 #define QLNX_INC_OPACKETS(ifp) ifp->if_opackets++
701 #define QLNX_INC_OBYTES(ifp, len) ifp->if_obytes += len
702 #define QLNX_INC_IBYTES(ifp, len) ifp->if_ibytes += len
706 #define QLNX_INC_IERRORS(ifp) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
707 #define QLNX_INC_IQDROPS(ifp) if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
708 #define QLNX_INC_IPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1)
709 #define QLNX_INC_OPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1)
711 #define QLNX_INC_OBYTES(ifp, len) \
712 if_inc_counter(ifp, IFCOUNTER_OBYTES, len)
713 #define QLNX_INC_IBYTES(ifp, len) \
714 if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len)
716 #endif /* #if __FreeBSD_version < 1100000 */
718 #define CQE_L3_PACKET(flags) \
719 ((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv4) || \
720 (((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv6))
722 #define CQE_IP_HDR_ERR(flags) \
723 ((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \
724 << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT))
726 #define CQE_L4_HAS_CSUM(flags) \
727 ((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \
728 << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT))
730 #define CQE_HAS_VLAN(flags) \
731 ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
732 << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
734 #if defined(__i386__) || defined(__amd64__)
737 void prefetch(void *x)
739 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
747 #endif /* #ifndef _QLNX_DEF_H_ */