2 * Broadcom NetXtreme-C/E network driver.
4 * Copyright (c) 2016 Broadcom, All Rights Reserved.
5 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/param.h>
30 #include <sys/socket.h>
31 #include <sys/kernel.h>
33 #include <sys/module.h>
35 #include <sys/endian.h>
36 #include <sys/sockio.h>
39 #include <machine/bus.h>
40 #include <machine/resource.h>
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_var.h>
49 #include <net/ethernet.h>
50 #include <net/iflib.h>
53 #include "opt_inet6.h"
59 #include "bnxt_hwrm.h"
60 #include "bnxt_ioctl.h"
61 #include "bnxt_sysctl.h"
62 #include "hsi_struct_def.h"
63 #include "bnxt_mgmt.h"
69 static const pci_vendor_info_t bnxt_vendor_info_array[] =
71 PVID(BROADCOM_VENDOR_ID, BCM57301,
72 "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"),
73 PVID(BROADCOM_VENDOR_ID, BCM57302,
74 "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"),
75 PVID(BROADCOM_VENDOR_ID, BCM57304,
76 "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"),
77 PVID(BROADCOM_VENDOR_ID, BCM57311,
78 "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"),
79 PVID(BROADCOM_VENDOR_ID, BCM57312,
80 "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"),
81 PVID(BROADCOM_VENDOR_ID, BCM57314,
82 "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"),
83 PVID(BROADCOM_VENDOR_ID, BCM57402,
84 "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"),
85 PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR,
86 "Broadcom BCM57402 NetXtreme-E Partition"),
87 PVID(BROADCOM_VENDOR_ID, BCM57404,
88 "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"),
89 PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR,
90 "Broadcom BCM57404 NetXtreme-E Partition"),
91 PVID(BROADCOM_VENDOR_ID, BCM57406,
92 "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"),
93 PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR,
94 "Broadcom BCM57406 NetXtreme-E Partition"),
95 PVID(BROADCOM_VENDOR_ID, BCM57407,
96 "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"),
97 PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR,
98 "Broadcom BCM57407 NetXtreme-E Ethernet Partition"),
99 PVID(BROADCOM_VENDOR_ID, BCM57407_SFP,
100 "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"),
101 PVID(BROADCOM_VENDOR_ID, BCM57412,
102 "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"),
103 PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1,
104 "Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
105 PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2,
106 "Broadcom BCM57412 NetXtreme-E Ethernet Partition"),
107 PVID(BROADCOM_VENDOR_ID, BCM57414,
108 "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"),
109 PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1,
110 "Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
111 PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2,
112 "Broadcom BCM57414 NetXtreme-E Ethernet Partition"),
113 PVID(BROADCOM_VENDOR_ID, BCM57416,
114 "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"),
115 PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1,
116 "Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
117 PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2,
118 "Broadcom BCM57416 NetXtreme-E Ethernet Partition"),
119 PVID(BROADCOM_VENDOR_ID, BCM57416_SFP,
120 "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"),
121 PVID(BROADCOM_VENDOR_ID, BCM57417,
122 "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"),
123 PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1,
124 "Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
125 PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2,
126 "Broadcom BCM57417 NetXtreme-E Ethernet Partition"),
127 PVID(BROADCOM_VENDOR_ID, BCM57417_SFP,
128 "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"),
129 PVID(BROADCOM_VENDOR_ID, BCM57454,
130 "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"),
131 PVID(BROADCOM_VENDOR_ID, BCM58700,
132 "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"),
133 PVID(BROADCOM_VENDOR_ID, BCM57508,
134 "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
135 PVID(BROADCOM_VENDOR_ID, BCM57504,
136 "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
137 PVID(BROADCOM_VENDOR_ID, BCM57502,
138 "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
139 PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
140 "Broadcom NetXtreme-C Ethernet Virtual Function"),
141 PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
142 "Broadcom NetXtreme-C Ethernet Virtual Function"),
143 PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3,
144 "Broadcom NetXtreme-C Ethernet Virtual Function"),
145 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1,
146 "Broadcom NetXtreme-E Ethernet Virtual Function"),
147 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2,
148 "Broadcom NetXtreme-E Ethernet Virtual Function"),
149 PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3,
150 "Broadcom NetXtreme-E Ethernet Virtual Function"),
151 /* required last entry */
157 * Function prototypes
160 SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
161 int bnxt_num_pfs = 0;
163 static void *bnxt_register(device_t dev);
165 /* Soft queue setup and teardown */
166 static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
167 uint64_t *paddrs, int ntxqs, int ntxqsets);
168 static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
169 uint64_t *paddrs, int nrxqs, int nrxqsets);
170 static void bnxt_queues_free(if_ctx_t ctx);
172 /* Device setup and teardown */
173 static int bnxt_attach_pre(if_ctx_t ctx);
174 static int bnxt_attach_post(if_ctx_t ctx);
175 static int bnxt_detach(if_ctx_t ctx);
177 /* Device configuration */
178 static void bnxt_init(if_ctx_t ctx);
179 static void bnxt_stop(if_ctx_t ctx);
180 static void bnxt_multi_set(if_ctx_t ctx);
181 static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu);
182 static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr);
183 static int bnxt_media_change(if_ctx_t ctx);
184 static int bnxt_promisc_set(if_ctx_t ctx, int flags);
185 static uint64_t bnxt_get_counter(if_ctx_t, ift_counter);
186 static void bnxt_update_admin_status(if_ctx_t ctx);
187 static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid);
189 /* Interrupt enable / disable */
190 static void bnxt_intr_enable(if_ctx_t ctx);
191 static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
192 static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
193 static void bnxt_disable_intr(if_ctx_t ctx);
194 static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix);
197 static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag);
198 static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag);
201 static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
203 static int bnxt_shutdown(if_ctx_t ctx);
204 static int bnxt_suspend(if_ctx_t ctx);
205 static int bnxt_resume(if_ctx_t ctx);
207 /* Internal support functions */
208 static int bnxt_probe_phy(struct bnxt_softc *softc);
209 static void bnxt_add_media_types(struct bnxt_softc *softc);
210 static int bnxt_pci_mapping(struct bnxt_softc *softc);
211 static void bnxt_pci_mapping_free(struct bnxt_softc *softc);
212 static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state);
213 static int bnxt_handle_def_cp(void *arg);
214 static int bnxt_handle_isr(void *arg);
215 static void bnxt_clear_ids(struct bnxt_softc *softc);
216 static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
217 static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
218 static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
219 static void bnxt_def_cp_task(void *context);
220 static void bnxt_handle_async_event(struct bnxt_softc *softc,
221 struct cmpl_base *cmpl);
222 static uint8_t get_phy_type(struct bnxt_softc *softc);
223 static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
224 static void bnxt_get_wol_settings(struct bnxt_softc *softc);
225 static int bnxt_wol_config(if_ctx_t ctx);
226 static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
229 * Device Interface Declaration
232 static device_method_t bnxt_methods[] = {
233 /* Device interface */
234 DEVMETHOD(device_register, bnxt_register),
235 DEVMETHOD(device_probe, iflib_device_probe),
236 DEVMETHOD(device_attach, iflib_device_attach),
237 DEVMETHOD(device_detach, iflib_device_detach),
238 DEVMETHOD(device_shutdown, iflib_device_shutdown),
239 DEVMETHOD(device_suspend, iflib_device_suspend),
240 DEVMETHOD(device_resume, iflib_device_resume),
244 static driver_t bnxt_driver = {
245 "bnxt", bnxt_methods, sizeof(struct bnxt_softc),
248 DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
250 MODULE_DEPEND(bnxt, pci, 1, 1, 1);
251 MODULE_DEPEND(bnxt, ether, 1, 1, 1);
252 MODULE_DEPEND(bnxt, iflib, 1, 1, 1);
254 IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
256 static device_method_t bnxt_iflib_methods[] = {
257 DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
258 DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
259 DEVMETHOD(ifdi_queues_free, bnxt_queues_free),
261 DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre),
262 DEVMETHOD(ifdi_attach_post, bnxt_attach_post),
263 DEVMETHOD(ifdi_detach, bnxt_detach),
265 DEVMETHOD(ifdi_init, bnxt_init),
266 DEVMETHOD(ifdi_stop, bnxt_stop),
267 DEVMETHOD(ifdi_multi_set, bnxt_multi_set),
268 DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set),
269 DEVMETHOD(ifdi_media_status, bnxt_media_status),
270 DEVMETHOD(ifdi_media_change, bnxt_media_change),
271 DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set),
272 DEVMETHOD(ifdi_get_counter, bnxt_get_counter),
273 DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status),
274 DEVMETHOD(ifdi_timer, bnxt_if_timer),
276 DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable),
277 DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable),
278 DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable),
279 DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr),
280 DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign),
282 DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register),
283 DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister),
285 DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl),
287 DEVMETHOD(ifdi_suspend, bnxt_suspend),
288 DEVMETHOD(ifdi_shutdown, bnxt_shutdown),
289 DEVMETHOD(ifdi_resume, bnxt_resume),
291 DEVMETHOD(ifdi_needs_restart, bnxt_if_needs_restart),
296 static driver_t bnxt_iflib_driver = {
297 "bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc)
301 * iflib shared context
304 #define BNXT_DRIVER_VERSION "2.20.0.1"
305 const char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
306 extern struct if_txrx bnxt_txrx;
307 static struct if_shared_ctx bnxt_sctx_init = {
308 .isc_magic = IFLIB_MAGIC,
309 .isc_driver = &bnxt_iflib_driver,
310 .isc_nfl = 2, // Number of Free Lists
311 .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD,
312 .isc_q_align = PAGE_SIZE,
313 .isc_tx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
314 .isc_tx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
315 .isc_tso_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
316 .isc_tso_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
317 .isc_rx_maxsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
318 .isc_rx_maxsegsize = BNXT_TSO_SIZE + sizeof(struct ether_vlan_header),
320 // Only use a single segment to avoid page size constraints
321 .isc_rx_nsegments = 1,
324 .isc_nrxd_min = {16, 16, 16},
325 .isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8,
326 PAGE_SIZE / sizeof(struct rx_prod_pkt_bd),
327 PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)},
328 .isc_nrxd_max = {BNXT_MAX_RXD, BNXT_MAX_RXD, BNXT_MAX_RXD},
329 .isc_ntxd_min = {16, 16, 16},
330 .isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
331 PAGE_SIZE / sizeof(struct tx_bd_short),
332 PAGE_SIZE / sizeof(struct cmpl_base) * 2},
333 .isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
335 .isc_admin_intrcnt = 1,
336 .isc_vendor_info = bnxt_vendor_info_array,
337 .isc_driver_version = bnxt_driver_version,
345 bnxt_register(device_t dev)
347 return (&bnxt_sctx_init);
351 bnxt_nq_alloc(struct bnxt_softc *softc, int nqsets)
357 softc->nq_rings = malloc(sizeof(struct bnxt_cp_ring) * nqsets,
358 M_DEVBUF, M_NOWAIT | M_ZERO);
362 bnxt_nq_free(struct bnxt_softc *softc)
366 free(softc->nq_rings, M_DEVBUF);
367 softc->nq_rings = NULL;
371 * Device Dependent Configuration Functions
374 /* Soft queue setup and teardown */
376 bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
377 uint64_t *paddrs, int ntxqs, int ntxqsets)
379 struct bnxt_softc *softc;
383 softc = iflib_get_softc(ctx);
385 if (BNXT_CHIP_P5(softc)) {
386 bnxt_nq_alloc(softc, ntxqsets);
387 if (!softc->nq_rings) {
388 device_printf(iflib_get_dev(ctx),
389 "unable to allocate NQ rings\n");
395 softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets,
396 M_DEVBUF, M_NOWAIT | M_ZERO);
397 if (!softc->tx_cp_rings) {
398 device_printf(iflib_get_dev(ctx),
399 "unable to allocate TX completion rings\n");
403 softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets,
404 M_DEVBUF, M_NOWAIT | M_ZERO);
405 if (!softc->tx_rings) {
406 device_printf(iflib_get_dev(ctx),
407 "unable to allocate TX rings\n");
409 goto ring_alloc_fail;
412 for (i=0; i < ntxqsets; i++) {
413 rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
414 &softc->tx_stats[i], 0);
417 bus_dmamap_sync(softc->tx_stats[i].idi_tag, softc->tx_stats[i].idi_map,
418 BUS_DMASYNC_PREREAD);
421 for (i = 0; i < ntxqsets; i++) {
422 /* Set up the completion ring */
423 softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
424 softc->tx_cp_rings[i].ring.phys_id =
425 (uint16_t)HWRM_NA_SIGNATURE;
426 softc->tx_cp_rings[i].ring.softc = softc;
427 softc->tx_cp_rings[i].ring.idx = i;
428 softc->tx_cp_rings[i].ring.id =
429 (softc->scctx->isc_nrxqsets * 2) + 1 + i;
430 softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
431 DB_PF_OFFSET_P5: softc->tx_cp_rings[i].ring.id * 0x80;
432 softc->tx_cp_rings[i].ring.ring_size =
433 softc->scctx->isc_ntxd[0];
434 softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
435 softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
437 /* Set up the TX ring */
438 softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
439 softc->tx_rings[i].softc = softc;
440 softc->tx_rings[i].idx = i;
441 softc->tx_rings[i].id =
442 (softc->scctx->isc_nrxqsets * 2) + 1 + i;
443 softc->tx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
444 DB_PF_OFFSET_P5 : softc->tx_rings[i].id * 0x80;
445 softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
446 softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
447 softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
449 bnxt_create_tx_sysctls(softc, i);
451 if (BNXT_CHIP_P5(softc)) {
452 /* Set up the Notification ring (NQ) */
453 softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
454 softc->nq_rings[i].ring.phys_id =
455 (uint16_t)HWRM_NA_SIGNATURE;
456 softc->nq_rings[i].ring.softc = softc;
457 softc->nq_rings[i].ring.idx = i;
458 softc->nq_rings[i].ring.id = i;
459 softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
460 DB_PF_OFFSET_P5 : softc->nq_rings[i].ring.id * 0x80;
461 softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
462 softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
463 softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
467 softc->ntxqsets = ntxqsets;
471 for (i = i - 1; i >= 0; i--)
472 iflib_dma_free(&softc->tx_stats[i]);
473 free(softc->tx_rings, M_DEVBUF);
475 free(softc->tx_cp_rings, M_DEVBUF);
483 bnxt_queues_free(if_ctx_t ctx)
485 struct bnxt_softc *softc = iflib_get_softc(ctx);
489 for (i=0; i<softc->ntxqsets; i++)
490 iflib_dma_free(&softc->tx_stats[i]);
491 free(softc->tx_rings, M_DEVBUF);
492 softc->tx_rings = NULL;
493 free(softc->tx_cp_rings, M_DEVBUF);
494 softc->tx_cp_rings = NULL;
498 for (i=0; i<softc->nrxqsets; i++)
499 iflib_dma_free(&softc->rx_stats[i]);
500 iflib_dma_free(&softc->hw_tx_port_stats);
501 iflib_dma_free(&softc->hw_rx_port_stats);
502 iflib_dma_free(&softc->hw_tx_port_stats_ext);
503 iflib_dma_free(&softc->hw_rx_port_stats_ext);
504 free(softc->grp_info, M_DEVBUF);
505 free(softc->ag_rings, M_DEVBUF);
506 free(softc->rx_rings, M_DEVBUF);
507 free(softc->rx_cp_rings, M_DEVBUF);
512 bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
513 uint64_t *paddrs, int nrxqs, int nrxqsets)
515 struct bnxt_softc *softc;
519 softc = iflib_get_softc(ctx);
521 softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets,
522 M_DEVBUF, M_NOWAIT | M_ZERO);
523 if (!softc->rx_cp_rings) {
524 device_printf(iflib_get_dev(ctx),
525 "unable to allocate RX completion rings\n");
529 softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
530 M_DEVBUF, M_NOWAIT | M_ZERO);
531 if (!softc->rx_rings) {
532 device_printf(iflib_get_dev(ctx),
533 "unable to allocate RX rings\n");
535 goto ring_alloc_fail;
537 softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets,
538 M_DEVBUF, M_NOWAIT | M_ZERO);
539 if (!softc->ag_rings) {
540 device_printf(iflib_get_dev(ctx),
541 "unable to allocate aggregation rings\n");
545 softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets,
546 M_DEVBUF, M_NOWAIT | M_ZERO);
547 if (!softc->grp_info) {
548 device_printf(iflib_get_dev(ctx),
549 "unable to allocate ring groups\n");
554 for (i=0; i < nrxqsets; i++) {
555 rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats),
556 &softc->rx_stats[i], 0);
558 goto hw_stats_alloc_fail;
559 bus_dmamap_sync(softc->rx_stats[i].idi_tag, softc->rx_stats[i].idi_map,
560 BUS_DMASYNC_PREREAD);
564 * Additional 512 bytes for future expansion.
565 * To prevent corruption when loaded with newer firmwares with added counters.
566 * This can be deleted when there will be no further additions of counters.
568 #define BNXT_PORT_STAT_PADDING 512
570 rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING,
571 &softc->hw_rx_port_stats, 0);
573 goto hw_port_rx_stats_alloc_fail;
575 bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag,
576 softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
579 rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING,
580 &softc->hw_tx_port_stats, 0);
582 goto hw_port_tx_stats_alloc_fail;
584 bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag,
585 softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD);
587 softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr;
588 softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr;
591 rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats_ext),
592 &softc->hw_rx_port_stats_ext, 0);
594 goto hw_port_rx_stats_ext_alloc_fail;
596 bus_dmamap_sync(softc->hw_rx_port_stats_ext.idi_tag,
597 softc->hw_rx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
599 rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats_ext),
600 &softc->hw_tx_port_stats_ext, 0);
602 goto hw_port_tx_stats_ext_alloc_fail;
604 bus_dmamap_sync(softc->hw_tx_port_stats_ext.idi_tag,
605 softc->hw_tx_port_stats_ext.idi_map, BUS_DMASYNC_PREREAD);
607 softc->rx_port_stats_ext = (void *) softc->hw_rx_port_stats_ext.idi_vaddr;
608 softc->tx_port_stats_ext = (void *) softc->hw_tx_port_stats_ext.idi_vaddr;
610 for (i = 0; i < nrxqsets; i++) {
611 /* Allocation the completion ring */
612 softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
613 softc->rx_cp_rings[i].ring.phys_id =
614 (uint16_t)HWRM_NA_SIGNATURE;
615 softc->rx_cp_rings[i].ring.softc = softc;
616 softc->rx_cp_rings[i].ring.idx = i;
617 softc->rx_cp_rings[i].ring.id = i + 1;
618 softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
619 DB_PF_OFFSET_P5 : softc->rx_cp_rings[i].ring.id * 0x80;
621 * If this ring overflows, RX stops working.
623 softc->rx_cp_rings[i].ring.ring_size =
624 softc->scctx->isc_nrxd[0];
625 softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
626 softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
628 /* Allocate the RX ring */
629 softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
630 softc->rx_rings[i].softc = softc;
631 softc->rx_rings[i].idx = i;
632 softc->rx_rings[i].id = i + 1;
633 softc->rx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
634 DB_PF_OFFSET_P5 : softc->rx_rings[i].id * 0x80;
635 softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
636 softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
637 softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
639 /* Allocate the TPA start buffer */
640 softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) *
641 (RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT),
642 M_DEVBUF, M_NOWAIT | M_ZERO);
643 if (softc->rx_rings[i].tpa_start == NULL) {
645 device_printf(softc->dev,
646 "Unable to allocate space for TPA\n");
650 /* Allocate the AG ring */
651 softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
652 softc->ag_rings[i].softc = softc;
653 softc->ag_rings[i].idx = i;
654 softc->ag_rings[i].id = nrxqsets + i + 1;
655 softc->ag_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
656 DB_PF_OFFSET_P5 : softc->ag_rings[i].id * 0x80;
657 softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
658 softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
659 softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
661 /* Allocate the ring group */
662 softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
663 softc->grp_info[i].stats_ctx =
664 softc->rx_cp_rings[i].stats_ctx_id;
665 softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
666 softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
667 softc->grp_info[i].cp_ring_id =
668 softc->rx_cp_rings[i].ring.phys_id;
670 bnxt_create_rx_sysctls(softc, i);
674 * When SR-IOV is enabled, avoid each VF sending PORT_QSTATS
675 * HWRM every sec with which firmware timeouts can happen
678 bnxt_create_port_stats_sysctls(softc);
680 /* And finally, the VNIC */
681 softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
682 softc->vnic_info.filter_id = -1;
683 softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE;
684 softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
685 softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
686 softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
687 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
688 softc->vnic_info.mc_list_count = 0;
689 softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT;
690 rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN,
691 &softc->vnic_info.mc_list, 0);
693 goto mc_list_alloc_fail;
695 /* The VNIC RSS Hash Key */
696 rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE,
697 &softc->vnic_info.rss_hash_key_tbl, 0);
699 goto rss_hash_alloc_fail;
700 bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag,
701 softc->vnic_info.rss_hash_key_tbl.idi_map,
702 BUS_DMASYNC_PREWRITE);
703 memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr,
704 softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE);
706 /* Allocate the RSS tables */
707 rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t),
708 &softc->vnic_info.rss_grp_tbl, 0);
710 goto rss_grp_alloc_fail;
711 bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag,
712 softc->vnic_info.rss_grp_tbl.idi_map,
713 BUS_DMASYNC_PREWRITE);
714 memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
715 softc->vnic_info.rss_grp_tbl.idi_size);
717 softc->nrxqsets = nrxqsets;
721 iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
723 iflib_dma_free(&softc->vnic_info.mc_list);
725 for (i = i - 1; i >= 0; i--) {
726 if (softc->rx_rings[i].tpa_start)
727 free(softc->rx_rings[i].tpa_start, M_DEVBUF);
730 iflib_dma_free(&softc->hw_tx_port_stats_ext);
731 hw_port_tx_stats_ext_alloc_fail:
732 iflib_dma_free(&softc->hw_rx_port_stats_ext);
733 hw_port_rx_stats_ext_alloc_fail:
734 iflib_dma_free(&softc->hw_tx_port_stats);
735 hw_port_tx_stats_alloc_fail:
736 iflib_dma_free(&softc->hw_rx_port_stats);
737 hw_port_rx_stats_alloc_fail:
738 for (i=0; i < nrxqsets; i++) {
739 if (softc->rx_stats[i].idi_vaddr)
740 iflib_dma_free(&softc->rx_stats[i]);
743 free(softc->grp_info, M_DEVBUF);
745 free(softc->ag_rings, M_DEVBUF);
747 free(softc->rx_rings, M_DEVBUF);
749 free(softc->rx_cp_rings, M_DEVBUF);
754 static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc)
756 if (softc->hwrm_short_cmd_req_addr.idi_vaddr)
757 iflib_dma_free(&softc->hwrm_short_cmd_req_addr);
758 softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL;
761 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
765 rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len,
766 &softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT);
771 static void bnxt_free_ring(struct bnxt_softc *bp, struct bnxt_ring_mem_info *rmem)
775 for (i = 0; i < rmem->nr_pages; i++) {
776 if (!rmem->pg_arr[i].idi_vaddr)
779 iflib_dma_free(&rmem->pg_arr[i]);
780 rmem->pg_arr[i].idi_vaddr = NULL;
782 if (rmem->pg_tbl.idi_vaddr) {
783 iflib_dma_free(&rmem->pg_tbl);
784 rmem->pg_tbl.idi_vaddr = NULL;
787 if (rmem->vmem_size && *rmem->vmem) {
788 free(*rmem->vmem, M_DEVBUF);
793 static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
795 uint64_t valid_bit = 0;
799 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
800 valid_bit = PTU_PTE_VALID;
802 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl.idi_vaddr) {
803 size_t pg_tbl_size = rmem->nr_pages * 8;
805 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
806 pg_tbl_size = rmem->page_size;
808 rc = iflib_dma_alloc(softc->ctx, pg_tbl_size, &rmem->pg_tbl, 0);
813 for (i = 0; i < rmem->nr_pages; i++) {
814 uint64_t extra_bits = valid_bit;
817 rc = iflib_dma_alloc(softc->ctx, rmem->page_size, &rmem->pg_arr[i], 0);
822 memset(rmem->pg_arr[i].idi_vaddr, rmem->init_val, rmem->page_size);
824 if (rmem->nr_pages > 1 || rmem->depth > 0) {
825 if (i == rmem->nr_pages - 2 &&
826 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
827 extra_bits |= PTU_PTE_NEXT_TO_LAST;
828 else if (i == rmem->nr_pages - 1 &&
829 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
830 extra_bits |= PTU_PTE_LAST;
832 ptr = (void *) rmem->pg_tbl.idi_vaddr;
833 ptr[i] = htole64(rmem->pg_arr[i].idi_paddr | extra_bits);
837 if (rmem->vmem_size) {
838 *rmem->vmem = malloc(rmem->vmem_size, M_DEVBUF, M_NOWAIT | M_ZERO);
845 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \
846 (HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP | \
847 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \
848 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ | \
849 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
850 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
852 static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
853 struct bnxt_ctx_pg_info *ctx_pg)
855 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
857 rmem->page_size = BNXT_PAGE_SIZE;
858 rmem->pg_arr = ctx_pg->ctx_arr;
859 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
860 if (rmem->depth >= 1)
861 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
863 return bnxt_alloc_ring(softc, rmem);
866 static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
867 struct bnxt_ctx_pg_info *ctx_pg, uint32_t mem_size,
868 uint8_t depth, bool use_init_val)
870 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
876 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
877 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
878 ctx_pg->nr_pages = 0;
881 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
885 ctx_pg->ctx_pg_tbl = malloc(MAX_CTX_PAGES * sizeof(ctx_pg),
886 M_DEVBUF, M_NOWAIT | M_ZERO);
887 if (!ctx_pg->ctx_pg_tbl)
889 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
890 rmem->nr_pages = nr_tbls;
891 rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
894 for (i = 0; i < nr_tbls; i++) {
895 struct bnxt_ctx_pg_info *pg_tbl;
897 pg_tbl = malloc(sizeof(*pg_tbl), M_DEVBUF, M_NOWAIT | M_ZERO);
900 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
901 rmem = &pg_tbl->ring_mem;
902 memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
904 rmem->nr_pages = MAX_CTX_PAGES;
906 rmem->init_val = softc->ctx_mem->ctx_kind_initializer;
907 if (i == (nr_tbls - 1)) {
908 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
911 rmem->nr_pages = rem;
913 rc = bnxt_alloc_ctx_mem_blk(softc, pg_tbl);
918 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
919 if (rmem->nr_pages > 1 || depth)
922 rmem->init_val = softc->ctx_mem->ctx_kind_initializer;
923 rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
928 static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
929 struct bnxt_ctx_pg_info *ctx_pg)
931 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
933 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
934 ctx_pg->ctx_pg_tbl) {
935 int i, nr_tbls = rmem->nr_pages;
937 for (i = 0; i < nr_tbls; i++) {
938 struct bnxt_ctx_pg_info *pg_tbl;
939 struct bnxt_ring_mem_info *rmem2;
941 pg_tbl = ctx_pg->ctx_pg_tbl[i];
944 rmem2 = &pg_tbl->ring_mem;
945 bnxt_free_ring(softc, rmem2);
946 ctx_pg->ctx_arr[i].idi_vaddr = NULL;
947 free(pg_tbl , M_DEVBUF);
948 ctx_pg->ctx_pg_tbl[i] = NULL;
950 free(ctx_pg->ctx_pg_tbl , M_DEVBUF);
951 ctx_pg->ctx_pg_tbl = NULL;
953 bnxt_free_ring(softc, rmem);
954 ctx_pg->nr_pages = 0;
957 static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
959 struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
965 if (ctx->tqm_mem[0]) {
966 for (i = 0; i < softc->max_q + 1; i++) {
967 if (!ctx->tqm_mem[i])
969 bnxt_free_ctx_pg_tbls(softc, ctx->tqm_mem[i]);
971 free(ctx->tqm_mem[0] , M_DEVBUF);
972 ctx->tqm_mem[0] = NULL;
975 bnxt_free_ctx_pg_tbls(softc, &ctx->tim_mem);
976 bnxt_free_ctx_pg_tbls(softc, &ctx->mrav_mem);
977 bnxt_free_ctx_pg_tbls(softc, &ctx->stat_mem);
978 bnxt_free_ctx_pg_tbls(softc, &ctx->vnic_mem);
979 bnxt_free_ctx_pg_tbls(softc, &ctx->cq_mem);
980 bnxt_free_ctx_pg_tbls(softc, &ctx->srq_mem);
981 bnxt_free_ctx_pg_tbls(softc, &ctx->qp_mem);
982 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
983 free(softc->ctx_mem, M_DEVBUF);
984 softc->ctx_mem = NULL;
987 static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
989 struct bnxt_ctx_pg_info *ctx_pg;
990 struct bnxt_ctx_mem_info *ctx;
991 uint32_t mem_size, ena, entries;
994 if (!BNXT_CHIP_P5(softc))
997 rc = bnxt_hwrm_func_backing_store_qcaps(softc);
999 device_printf(softc->dev, "Failed querying context mem capability, rc = %d.\n",
1003 ctx = softc->ctx_mem;
1004 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
1007 ctx_pg = &ctx->qp_mem;
1008 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
1009 (1024 * 64); /* FIXME: Enable 64K QPs */
1010 mem_size = ctx->qp_entry_size * ctx_pg->entries;
1011 rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
1015 ctx_pg = &ctx->srq_mem;
1016 /* FIXME: Temporarily enable 8K RoCE SRQs */
1017 ctx_pg->entries = ctx->srq_max_l2_entries + (1024 * 8);
1018 mem_size = ctx->srq_entry_size * ctx_pg->entries;
1019 rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
1023 ctx_pg = &ctx->cq_mem;
1024 /* FIXME: Temporarily enable 64K RoCE CQ */
1025 ctx_pg->entries = ctx->cq_max_l2_entries + (1024 * 64 * 2);
1026 mem_size = ctx->cq_entry_size * ctx_pg->entries;
1027 rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
1031 ctx_pg = &ctx->vnic_mem;
1032 ctx_pg->entries = ctx->vnic_max_vnic_entries +
1033 ctx->vnic_max_ring_table_entries;
1034 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
1035 rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true);
1039 ctx_pg = &ctx->stat_mem;
1040 ctx_pg->entries = ctx->stat_max_entries;
1041 mem_size = ctx->stat_entry_size * ctx_pg->entries;
1042 rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true);
1046 ctx_pg = &ctx->mrav_mem;
1047 /* FIXME: Temporarily enable 256K RoCE MRs */
1048 ctx_pg->entries = 1024 * 256;
1049 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
1050 rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
1053 ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
1055 ctx_pg = &ctx->tim_mem;
1056 /* Firmware needs number of TIM entries equal to
1057 * number of Total QP contexts enabled, including
1060 ctx_pg->entries = ctx->qp_min_qp1_entries +
1061 ctx->qp_max_l2_entries + 1024 * 64;
1062 /* FIXME: L2 driver is not able to create queue depth
1063 * worth of 1M 32bit timers. Need a fix when l2-roce
1064 * interface is well designed.
1066 mem_size = ctx->tim_entry_size * ctx_pg->entries;
1067 rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false);
1070 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
1072 /* FIXME: Temporarily increase the TQM queue depth
1073 * by 1K for 1K RoCE QPs.
1075 entries = ctx->qp_max_l2_entries + 1024 * 64;
1076 entries = roundup(entries, ctx->tqm_entries_multiple);
1077 entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
1078 ctx->tqm_max_entries_per_ring);
1079 for (i = 0; i < softc->max_q + 1; i++) {
1080 ctx_pg = ctx->tqm_mem[i];
1081 ctx_pg->entries = entries;
1082 mem_size = ctx->tqm_entry_size * entries;
1083 rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false);
1086 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
1088 ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
1089 rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
1091 device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
1094 ctx->flags |= BNXT_CTX_FLAG_INITED;
1099 * If we update the index, a write barrier is needed after the write to ensure
1100 * the completion ring has space before the RX/TX ring does. Since we can't
1101 * make the RX and AG doorbells covered by the same barrier without remapping
1102 * MSI-X vectors, we create the barrier over the enture doorbell bar.
1103 * TODO: Remap the MSI-X vectors to allow a barrier to only cover the doorbells
1104 * for a single ring group.
1106 * A barrier of just the size of the write is used to ensure the ordering
1107 * remains correct and no writes are lost.
1110 static void bnxt_cuw_db_rx(void *db_ptr, uint16_t idx)
1112 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1113 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1115 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1116 BUS_SPACE_BARRIER_WRITE);
1117 bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1118 htole32(RX_DOORBELL_KEY_RX | idx));
1121 static void bnxt_cuw_db_tx(void *db_ptr, uint16_t idx)
1123 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1124 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1126 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 4,
1127 BUS_SPACE_BARRIER_WRITE);
1128 bus_space_write_4(db_bar->tag, db_bar->handle, ring->doorbell,
1129 htole32(TX_DOORBELL_KEY_TX | idx));
1132 static void bnxt_cuw_db_cq(void *db_ptr, bool enable_irq)
1134 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1135 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1137 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 4,
1138 BUS_SPACE_BARRIER_WRITE);
1139 bus_space_write_4(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1140 htole32(CMPL_DOORBELL_KEY_CMPL |
1141 ((cpr->cons == UINT32_MAX) ? 0 :
1142 (cpr->cons | CMPL_DOORBELL_IDX_VALID)) |
1143 ((enable_irq) ? 0 : CMPL_DOORBELL_MASK)));
1144 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1145 BUS_SPACE_BARRIER_WRITE);
1148 static void bnxt_thor_db_rx(void *db_ptr, uint16_t idx)
1150 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1151 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1153 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1154 BUS_SPACE_BARRIER_WRITE);
1155 bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1156 htole64((DBR_PATH_L2 | DBR_TYPE_SRQ | idx) |
1157 ((uint64_t)ring->phys_id << DBR_XID_SFT)));
1160 static void bnxt_thor_db_tx(void *db_ptr, uint16_t idx)
1162 struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
1163 struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
1165 bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
1166 BUS_SPACE_BARRIER_WRITE);
1167 bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
1168 htole64((DBR_PATH_L2 | DBR_TYPE_SQ | idx) |
1169 ((uint64_t)ring->phys_id << DBR_XID_SFT)));
1172 static void bnxt_thor_db_rx_cq(void *db_ptr, bool enable_irq)
1174 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1175 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1176 dbc_dbc_t db_msg = { 0 };
1177 uint32_t cons = cpr->cons;
1179 if (cons == UINT32_MAX)
1182 cons = RING_NEXT(&cpr->ring, cons);
1184 db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1186 db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1187 DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1188 ((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1190 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1191 BUS_SPACE_BARRIER_WRITE);
1192 bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1193 htole64(*(uint64_t *)&db_msg));
1194 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1195 BUS_SPACE_BARRIER_WRITE);
1198 static void bnxt_thor_db_tx_cq(void *db_ptr, bool enable_irq)
1200 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1201 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1202 dbc_dbc_t db_msg = { 0 };
1203 uint32_t cons = cpr->cons;
1205 db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1207 db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1208 DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1209 ((enable_irq) ? DBC_DBC_TYPE_CQ_ARMALL: DBC_DBC_TYPE_CQ);
1211 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1212 BUS_SPACE_BARRIER_WRITE);
1213 bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1214 htole64(*(uint64_t *)&db_msg));
1215 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1216 BUS_SPACE_BARRIER_WRITE);
1219 static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
1221 struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
1222 struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
1223 dbc_dbc_t db_msg = { 0 };
1224 uint32_t cons = cpr->cons;
1226 db_msg.index = ((cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK);
1228 db_msg.type_path_xid = ((cpr->ring.phys_id << DBC_DBC_XID_SFT) &
1229 DBC_DBC_XID_MASK) | DBC_DBC_PATH_L2 |
1230 ((enable_irq) ? DBC_DBC_TYPE_NQ_ARM: DBC_DBC_TYPE_NQ);
1232 bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
1233 BUS_SPACE_BARRIER_WRITE);
1234 bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
1235 htole64(*(uint64_t *)&db_msg));
1236 bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
1237 BUS_SPACE_BARRIER_WRITE);
1240 struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
1242 struct bnxt_softc_list *sc = NULL;
1244 SLIST_FOREACH(sc, &pf_list, next) {
1245 /* get the softc reference based on device name */
1246 if (dev_name && !strncmp(dev_name, if_name(iflib_get_ifp(sc->softc->ctx)), BNXT_MAX_STR)) {
1249 /* get the softc reference based on domain,bus,device,function */
1251 (domain == sc->softc->domain) &&
1252 (bus == sc->softc->bus) &&
1253 (dev_fn == sc->softc->dev_fn)) {
1262 /* Device setup and teardown */
1264 bnxt_attach_pre(if_ctx_t ctx)
1266 struct bnxt_softc *softc = iflib_get_softc(ctx);
1267 if_softc_ctx_t scctx;
1271 softc->dev = iflib_get_dev(ctx);
1272 softc->media = iflib_get_media(ctx);
1273 softc->scctx = iflib_get_softc_ctx(ctx);
1274 softc->sctx = iflib_get_sctx(ctx);
1275 scctx = softc->scctx;
1277 /* TODO: Better way of detecting NPAR/VF is needed */
1278 switch (pci_get_device(softc->dev)) {
1283 case BCM57412_NPAR1:
1284 case BCM57412_NPAR2:
1285 case BCM57414_NPAR1:
1286 case BCM57414_NPAR2:
1287 case BCM57416_NPAR1:
1288 case BCM57416_NPAR2:
1289 softc->flags |= BNXT_FLAG_NPAR;
1291 case NETXTREME_C_VF1:
1292 case NETXTREME_C_VF2:
1293 case NETXTREME_C_VF3:
1294 case NETXTREME_E_VF1:
1295 case NETXTREME_E_VF2:
1296 case NETXTREME_E_VF3:
1297 softc->flags |= BNXT_FLAG_VF;
1301 #define PCI_DEVFN(device, func) ((((device) & 0x1f) << 3) | ((func) & 0x07))
1302 softc->domain = pci_get_domain(softc->dev);
1303 softc->bus = pci_get_bus(softc->dev);
1304 softc->slot = pci_get_slot(softc->dev);
1305 softc->function = pci_get_function(softc->dev);
1306 softc->dev_fn = PCI_DEVFN(softc->slot, softc->function);
1308 if (bnxt_num_pfs == 0)
1309 SLIST_INIT(&pf_list);
1311 softc->list.softc = softc;
1312 SLIST_INSERT_HEAD(&pf_list, &softc->list, next);
1314 pci_enable_busmaster(softc->dev);
1316 if (bnxt_pci_mapping(softc))
1319 /* HWRM setup/init */
1320 BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
1321 rc = bnxt_alloc_hwrm_dma_mem(softc);
1325 /* Get firmware version and compare with driver */
1326 softc->ver_info = malloc(sizeof(struct bnxt_ver_info),
1327 M_DEVBUF, M_NOWAIT | M_ZERO);
1328 if (softc->ver_info == NULL) {
1330 device_printf(softc->dev,
1331 "Unable to allocate space for version info\n");
1332 goto ver_alloc_fail;
1334 /* Default minimum required HWRM version */
1335 softc->ver_info->hwrm_min_major = HWRM_VERSION_MAJOR;
1336 softc->ver_info->hwrm_min_minor = HWRM_VERSION_MINOR;
1337 softc->ver_info->hwrm_min_update = HWRM_VERSION_UPDATE;
1339 rc = bnxt_hwrm_ver_get(softc);
1341 device_printf(softc->dev, "attach: hwrm ver get failed\n");
1345 /* Now perform a function reset */
1346 rc = bnxt_hwrm_func_reset(softc);
1348 if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
1349 softc->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
1350 rc = bnxt_alloc_hwrm_short_cmd_req(softc);
1352 goto hwrm_short_cmd_alloc_fail;
1355 if ((softc->ver_info->chip_num == BCM57508) ||
1356 (softc->ver_info->chip_num == BCM57504) ||
1357 (softc->ver_info->chip_num == BCM57502))
1358 softc->flags |= BNXT_FLAG_CHIP_P5;
1360 softc->flags |= BNXT_FLAG_TPA;
1362 /* No TPA for Thor A0 */
1363 if (BNXT_CHIP_P5(softc) && (!softc->ver_info->chip_rev) &&
1364 (!softc->ver_info->chip_metal))
1365 softc->flags &= ~BNXT_FLAG_TPA;
1367 /* TBD ++ Add TPA support from Thor B1 */
1368 if (BNXT_CHIP_P5(softc))
1369 softc->flags &= ~BNXT_FLAG_TPA;
1371 /* Get NVRAM info */
1372 if (BNXT_PF(softc)) {
1373 softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
1374 M_DEVBUF, M_NOWAIT | M_ZERO);
1375 if (softc->nvm_info == NULL) {
1377 device_printf(softc->dev,
1378 "Unable to allocate space for NVRAM info\n");
1379 goto nvm_alloc_fail;
1382 rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id,
1383 &softc->nvm_info->device_id, &softc->nvm_info->sector_size,
1384 &softc->nvm_info->size, &softc->nvm_info->reserved_size,
1385 &softc->nvm_info->available_size);
1388 if (BNXT_CHIP_P5(softc)) {
1389 softc->db_ops.bnxt_db_tx = bnxt_thor_db_tx;
1390 softc->db_ops.bnxt_db_rx = bnxt_thor_db_rx;
1391 softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
1392 softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
1393 softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
1395 softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
1396 softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
1397 softc->db_ops.bnxt_db_rx_cq = bnxt_cuw_db_cq;
1398 softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
1401 /* Register the driver with the FW */
1402 rc = bnxt_hwrm_func_drv_rgtr(softc);
1404 device_printf(softc->dev, "attach: hwrm drv rgtr failed\n");
1408 rc = bnxt_hwrm_func_rgtr_async_events(softc, NULL, 0);
1410 device_printf(softc->dev, "attach: hwrm rgtr async evts failed\n");
1414 /* Get the queue config */
1415 rc = bnxt_hwrm_queue_qportcfg(softc);
1417 device_printf(softc->dev, "attach: hwrm qportcfg failed\n");
1421 if (softc->hwrm_spec_code >= 0x10803) {
1422 rc = bnxt_alloc_ctx_mem(softc);
1424 device_printf(softc->dev, "attach: alloc_ctx_mem failed\n");
1427 rc = bnxt_hwrm_func_resc_qcaps(softc, true);
1429 softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
1432 /* Get the HW capabilities */
1433 rc = bnxt_hwrm_func_qcaps(softc);
1437 /* Get the current configuration of this function */
1438 rc = bnxt_hwrm_func_qcfg(softc);
1440 device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
1444 iflib_set_mac(ctx, softc->func.mac_addr);
1446 scctx->isc_txrx = &bnxt_txrx;
1447 scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP |
1448 CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO);
1449 scctx->isc_capabilities = scctx->isc_capenable =
1450 /* These are translated to hwassit bits */
1451 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 |
1452 /* These are checked by iflib */
1453 IFCAP_LRO | IFCAP_VLAN_HWFILTER |
1454 /* These are part of the iflib mask */
1455 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU |
1456 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO |
1457 /* These likely get lost... */
1458 IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU;
1460 if (bnxt_wol_supported(softc))
1461 scctx->isc_capabilities |= IFCAP_WOL_MAGIC;
1462 bnxt_get_wol_settings(softc);
1464 scctx->isc_capenable |= IFCAP_WOL_MAGIC;
1466 /* Get the queue config */
1467 bnxt_get_wol_settings(softc);
1468 if (BNXT_CHIP_P5(softc))
1469 bnxt_hwrm_reserve_pf_rings(softc);
1470 rc = bnxt_hwrm_func_qcfg(softc);
1472 device_printf(softc->dev, "attach: hwrm func qcfg failed\n");
1476 bnxt_clear_ids(softc);
1480 /* Now set up iflib sc */
1481 scctx->isc_tx_nsegments = 31,
1482 scctx->isc_tx_tso_segments_max = 31;
1483 scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE;
1484 scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE;
1485 scctx->isc_vectors = softc->func.max_cp_rings;
1486 scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE;
1487 scctx->isc_txrx = &bnxt_txrx;
1489 if (scctx->isc_nrxd[0] <
1490 ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2]))
1491 device_printf(softc->dev,
1492 "WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d). Driver may be unstable\n",
1493 scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]);
1494 if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2)
1495 device_printf(softc->dev,
1496 "WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d). Driver may be unstable\n",
1497 scctx->isc_ntxd[0], scctx->isc_ntxd[1]);
1498 scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0];
1499 scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) *
1501 scctx->isc_txqsizes[2] = sizeof(struct cmpl_base) * scctx->isc_ntxd[2];
1502 scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0];
1503 scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) *
1505 scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) *
1508 scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1,
1509 softc->fn_qcfg.alloc_completion_rings - 1);
1510 scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
1511 softc->fn_qcfg.alloc_rx_rings);
1512 scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max,
1513 softc->fn_qcfg.alloc_vnics);
1514 scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings,
1515 softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1);
1517 scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE;
1518 scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1;
1520 /* iflib will map and release this bar */
1521 scctx->isc_msix_bar = pci_msix_table_bar(softc->dev);
1524 * Default settings for HW LRO (TPA):
1525 * Disable HW LRO by default
1526 * Can be enabled after taking care of 'packet forwarding'
1528 if (softc->flags & BNXT_FLAG_TPA) {
1529 softc->hw_lro.enable = 0;
1530 softc->hw_lro.is_mode_gro = 0;
1531 softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */
1532 softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX;
1533 softc->hw_lro.min_agg_len = 512;
1536 /* Allocate the default completion ring */
1537 softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
1538 softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
1539 softc->def_cp_ring.ring.softc = softc;
1540 softc->def_cp_ring.ring.id = 0;
1541 softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5(softc)) ?
1542 DB_PF_OFFSET_P5 : softc->def_cp_ring.ring.id * 0x80;
1543 softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
1544 sizeof(struct cmpl_base);
1545 rc = iflib_dma_alloc(ctx,
1546 sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
1547 &softc->def_cp_ring_mem, 0);
1548 softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
1549 softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
1550 iflib_config_gtask_init(ctx, &softc->def_cp_task, bnxt_def_cp_task,
1553 rc = bnxt_init_sysctl_ctx(softc);
1555 goto init_sysctl_failed;
1556 if (BNXT_PF(softc)) {
1557 rc = bnxt_create_nvram_sysctls(softc->nvm_info);
1562 arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0);
1563 softc->vnic_info.rss_hash_type =
1564 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
1565 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
1566 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 |
1567 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
1568 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 |
1569 HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
1570 rc = bnxt_create_config_sysctls_pre(softc);
1574 rc = bnxt_create_hw_lro_sysctls(softc);
1578 rc = bnxt_create_pause_fc_sysctls(softc);
1582 /* Initialize the vlan list */
1583 SLIST_INIT(&softc->vnic_info.vlan_tags);
1584 softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
1585 softc->state_bv = bit_alloc(BNXT_STATE_MAX, M_DEVBUF,
1591 bnxt_free_sysctl_ctx(softc);
1593 bnxt_hwrm_func_drv_unrgtr(softc, false);
1596 free(softc->nvm_info, M_DEVBUF);
1598 bnxt_free_hwrm_short_cmd_req(softc);
1599 hwrm_short_cmd_alloc_fail:
1601 free(softc->ver_info, M_DEVBUF);
1603 bnxt_free_hwrm_dma_mem(softc);
1605 BNXT_HWRM_LOCK_DESTROY(softc);
1606 bnxt_pci_mapping_free(softc);
1607 pci_disable_busmaster(softc->dev);
1612 bnxt_attach_post(if_ctx_t ctx)
1614 struct bnxt_softc *softc = iflib_get_softc(ctx);
1615 if_t ifp = iflib_get_ifp(ctx);
1618 bnxt_create_config_sysctls_post(softc);
1620 /* Update link state etc... */
1621 rc = bnxt_probe_phy(softc);
1625 /* Needs to be done after probing the phy */
1626 bnxt_create_ver_sysctls(softc);
1627 bnxt_add_media_types(softc);
1628 ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
1630 softc->scctx->isc_max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN +
1633 softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
1640 bnxt_detach(if_ctx_t ctx)
1642 struct bnxt_softc *softc = iflib_get_softc(ctx);
1643 struct bnxt_vlan_tag *tag;
1644 struct bnxt_vlan_tag *tmp;
1647 SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
1649 bnxt_wol_config(ctx);
1650 bnxt_do_disable_intr(&softc->def_cp_ring);
1651 bnxt_free_sysctl_ctx(softc);
1652 bnxt_hwrm_func_reset(softc);
1653 bnxt_free_ctx_mem(softc);
1654 bnxt_clear_ids(softc);
1655 iflib_irq_free(ctx, &softc->def_cp_ring.irq);
1656 iflib_config_gtask_deinit(&softc->def_cp_task);
1657 /* We need to free() these here... */
1658 for (i = softc->nrxqsets-1; i>=0; i--) {
1659 if (BNXT_CHIP_P5(softc))
1660 iflib_irq_free(ctx, &softc->nq_rings[i].irq);
1662 iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
1665 iflib_dma_free(&softc->vnic_info.mc_list);
1666 iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl);
1667 iflib_dma_free(&softc->vnic_info.rss_grp_tbl);
1668 if (softc->vnic_info.vlan_tag_list.idi_vaddr)
1669 iflib_dma_free(&softc->vnic_info.vlan_tag_list);
1670 SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp)
1671 free(tag, M_DEVBUF);
1672 iflib_dma_free(&softc->def_cp_ring_mem);
1673 for (i = 0; i < softc->nrxqsets; i++)
1674 free(softc->rx_rings[i].tpa_start, M_DEVBUF);
1675 free(softc->ver_info, M_DEVBUF);
1677 free(softc->nvm_info, M_DEVBUF);
1679 bnxt_hwrm_func_drv_unrgtr(softc, false);
1680 bnxt_free_hwrm_dma_mem(softc);
1681 bnxt_free_hwrm_short_cmd_req(softc);
1682 BNXT_HWRM_LOCK_DESTROY(softc);
1684 free(softc->state_bv, M_DEVBUF);
1685 pci_disable_busmaster(softc->dev);
1686 bnxt_pci_mapping_free(softc);
1692 bnxt_hwrm_resource_free(struct bnxt_softc *softc)
1696 rc = bnxt_hwrm_ring_free(softc,
1697 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1698 &softc->def_cp_ring.ring,
1699 (uint16_t)HWRM_NA_SIGNATURE);
1703 for (i = 0; i < softc->ntxqsets; i++) {
1704 rc = bnxt_hwrm_ring_free(softc,
1705 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
1706 &softc->tx_rings[i],
1707 softc->tx_cp_rings[i].ring.phys_id);
1711 rc = bnxt_hwrm_ring_free(softc,
1712 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1713 &softc->tx_cp_rings[i].ring,
1714 (uint16_t)HWRM_NA_SIGNATURE);
1718 rc = bnxt_hwrm_stat_ctx_free(softc, &softc->tx_cp_rings[i]);
1722 rc = bnxt_hwrm_free_filter(softc);
1726 rc = bnxt_hwrm_vnic_free(softc, &softc->vnic_info);
1730 rc = bnxt_hwrm_vnic_ctx_free(softc, softc->vnic_info.rss_id);
1734 for (i = 0; i < softc->nrxqsets; i++) {
1735 rc = bnxt_hwrm_ring_grp_free(softc, &softc->grp_info[i]);
1739 rc = bnxt_hwrm_ring_free(softc,
1740 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
1741 &softc->ag_rings[i],
1742 (uint16_t)HWRM_NA_SIGNATURE);
1746 rc = bnxt_hwrm_ring_free(softc,
1747 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1748 &softc->rx_rings[i],
1749 softc->rx_cp_rings[i].ring.phys_id);
1753 rc = bnxt_hwrm_ring_free(softc,
1754 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1755 &softc->rx_cp_rings[i].ring,
1756 (uint16_t)HWRM_NA_SIGNATURE);
1760 if (BNXT_CHIP_P5(softc)) {
1761 rc = bnxt_hwrm_ring_free(softc,
1762 HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
1763 &softc->nq_rings[i].ring,
1764 (uint16_t)HWRM_NA_SIGNATURE);
1769 rc = bnxt_hwrm_stat_ctx_free(softc, &softc->rx_cp_rings[i]);
1780 bnxt_func_reset(struct bnxt_softc *softc)
1783 if (!BNXT_CHIP_P5(softc)) {
1784 bnxt_hwrm_func_reset(softc);
1788 bnxt_hwrm_resource_free(softc);
1793 bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
1795 uint16_t *rgt = (uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr;
1798 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
1799 if (BNXT_CHIP_P5(softc)) {
1800 rgt[i++] = htole16(softc->rx_rings[j].phys_id);
1801 rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
1803 rgt[i] = htole16(softc->grp_info[j].grp_id);
1805 if (++j == softc->nrxqsets)
1810 /* Device configuration */
1812 bnxt_init(if_ctx_t ctx)
1814 struct bnxt_softc *softc = iflib_get_softc(ctx);
1815 struct ifmediareq ifmr;
1819 if (!BNXT_CHIP_P5(softc)) {
1820 rc = bnxt_hwrm_func_reset(softc);
1823 } else if (softc->is_dev_init) {
1827 softc->is_dev_init = true;
1828 bnxt_clear_ids(softc);
1830 // TBD -- Check if it is needed for Thor as well
1831 if (BNXT_CHIP_P5(softc))
1832 goto skip_def_cp_ring;
1833 /* Allocate the default completion ring */
1834 softc->def_cp_ring.cons = UINT32_MAX;
1835 softc->def_cp_ring.v_bit = 1;
1836 bnxt_mark_cpr_invalid(&softc->def_cp_ring);
1837 rc = bnxt_hwrm_ring_alloc(softc,
1838 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1839 &softc->def_cp_ring.ring);
1843 for (i = 0; i < softc->nrxqsets; i++) {
1844 /* Allocate the statistics context */
1845 rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i],
1846 softc->rx_stats[i].idi_paddr);
1850 if (BNXT_CHIP_P5(softc)) {
1851 /* Allocate the NQ */
1852 softc->nq_rings[i].cons = 0;
1853 softc->nq_rings[i].v_bit = 1;
1854 softc->nq_rings[i].last_idx = UINT32_MAX;
1855 bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
1856 rc = bnxt_hwrm_ring_alloc(softc,
1857 HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
1858 &softc->nq_rings[i].ring);
1862 softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
1864 /* Allocate the completion ring */
1865 softc->rx_cp_rings[i].cons = UINT32_MAX;
1866 softc->rx_cp_rings[i].v_bit = 1;
1867 softc->rx_cp_rings[i].last_idx = UINT32_MAX;
1868 bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
1869 rc = bnxt_hwrm_ring_alloc(softc,
1870 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1871 &softc->rx_cp_rings[i].ring);
1875 if (BNXT_CHIP_P5(softc))
1876 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
1878 /* Allocate the RX ring */
1879 rc = bnxt_hwrm_ring_alloc(softc,
1880 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
1883 softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
1885 /* Allocate the AG ring */
1886 rc = bnxt_hwrm_ring_alloc(softc,
1887 HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
1888 &softc->ag_rings[i]);
1891 softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
1893 /* Allocate the ring group */
1894 softc->grp_info[i].stats_ctx =
1895 softc->rx_cp_rings[i].stats_ctx_id;
1896 softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id;
1897 softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id;
1898 softc->grp_info[i].cp_ring_id =
1899 softc->rx_cp_rings[i].ring.phys_id;
1900 rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]);
1905 /* And now set the default CP / NQ ring for the async */
1906 rc = bnxt_cfg_async_cr(softc);
1910 /* Allocate the VNIC RSS context */
1911 rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id);
1915 /* Allocate the vnic */
1916 softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id;
1917 softc->vnic_info.mru = softc->scctx->isc_max_frame_size;
1918 rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info);
1921 rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info);
1924 rc = bnxt_hwrm_vnic_set_hds(softc, &softc->vnic_info);
1927 rc = bnxt_hwrm_set_filter(softc);
1931 bnxt_rss_grp_tbl_init(softc);
1933 rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info,
1934 softc->vnic_info.rss_hash_type);
1938 rc = bnxt_hwrm_vnic_tpa_cfg(softc);
1942 for (i = 0; i < softc->ntxqsets; i++) {
1943 /* Allocate the statistics context */
1944 rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i],
1945 softc->tx_stats[i].idi_paddr);
1949 /* Allocate the completion ring */
1950 softc->tx_cp_rings[i].cons = UINT32_MAX;
1951 softc->tx_cp_rings[i].v_bit = 1;
1952 bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
1953 rc = bnxt_hwrm_ring_alloc(softc,
1954 HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1955 &softc->tx_cp_rings[i].ring);
1959 if (BNXT_CHIP_P5(softc))
1960 softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
1962 /* Allocate the TX ring */
1963 rc = bnxt_hwrm_ring_alloc(softc,
1964 HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
1965 &softc->tx_rings[i]);
1968 softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
1971 bnxt_do_enable_intr(&softc->def_cp_ring);
1972 bnxt_media_status(softc->ctx, &ifmr);
1973 bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
1977 bnxt_func_reset(softc);
1978 bnxt_clear_ids(softc);
1983 bnxt_stop(if_ctx_t ctx)
1985 struct bnxt_softc *softc = iflib_get_softc(ctx);
1987 softc->is_dev_init = false;
1988 bnxt_do_disable_intr(&softc->def_cp_ring);
1989 bnxt_func_reset(softc);
1990 bnxt_clear_ids(softc);
1995 bnxt_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1999 if (cnt == BNXT_MAX_MC_ADDRS)
2002 bcopy(LLADDR(sdl), &mta[cnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
2008 bnxt_multi_set(if_ctx_t ctx)
2010 struct bnxt_softc *softc = iflib_get_softc(ctx);
2011 if_t ifp = iflib_get_ifp(ctx);
2015 mta = softc->vnic_info.mc_list.idi_vaddr;
2016 bzero(mta, softc->vnic_info.mc_list.idi_size);
2017 mcnt = if_foreach_llmaddr(ifp, bnxt_copy_maddr, mta);
2019 if (mcnt > BNXT_MAX_MC_ADDRS) {
2020 softc->vnic_info.rx_mask |=
2021 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2022 bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
2024 softc->vnic_info.rx_mask &=
2025 ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2026 bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag,
2027 softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE);
2028 softc->vnic_info.mc_list_count = mcnt;
2029 softc->vnic_info.rx_mask |=
2030 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
2031 if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info))
2032 device_printf(softc->dev,
2033 "set_multi: rx_mask set failed\n");
2038 bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu)
2040 struct bnxt_softc *softc = iflib_get_softc(ctx);
2042 if (mtu > BNXT_MAX_MTU)
2045 softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2046 softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
2051 bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
2053 struct bnxt_softc *softc = iflib_get_softc(ctx);
2054 struct bnxt_link_info *link_info = &softc->link_info;
2055 struct ifmedia_entry *next;
2056 uint64_t target_baudrate = bnxt_get_baudrate(link_info);
2057 int active_media = IFM_UNKNOWN;
2059 bnxt_update_link(softc, true);
2061 ifmr->ifm_status = IFM_AVALID;
2062 ifmr->ifm_active = IFM_ETHER;
2064 if (link_info->link_up)
2065 ifmr->ifm_status |= IFM_ACTIVE;
2067 ifmr->ifm_status &= ~IFM_ACTIVE;
2069 if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
2070 ifmr->ifm_active |= IFM_FDX;
2072 ifmr->ifm_active |= IFM_HDX;
2075 * Go through the list of supported media which got prepared
2076 * as part of bnxt_add_media_types() using api ifmedia_add().
2078 LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
2079 if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
2080 active_media = next->ifm_media;
2084 ifmr->ifm_active |= active_media;
2086 if (link_info->flow_ctrl.rx)
2087 ifmr->ifm_active |= IFM_ETH_RXPAUSE;
2088 if (link_info->flow_ctrl.tx)
2089 ifmr->ifm_active |= IFM_ETH_TXPAUSE;
2091 bnxt_report_link(softc);
2096 bnxt_media_change(if_ctx_t ctx)
2098 struct bnxt_softc *softc = iflib_get_softc(ctx);
2099 struct ifmedia *ifm = iflib_get_media(ctx);
2100 struct ifmediareq ifmr;
2103 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2106 switch (IFM_SUBTYPE(ifm->ifm_media)) {
2108 softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2109 softc->link_info.req_link_speed =
2110 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
2114 case IFM_1000_SGMII:
2118 softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2119 softc->link_info.req_link_speed =
2120 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
2124 softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2125 softc->link_info.req_link_speed =
2126 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
2133 softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2134 softc->link_info.req_link_speed =
2135 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2138 softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2139 softc->link_info.req_link_speed =
2140 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
2145 softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2146 softc->link_info.req_link_speed =
2147 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
2154 case IFM_40G_XLAUI_AC:
2155 softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2156 softc->link_info.req_link_speed =
2157 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2161 softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2162 softc->link_info.req_link_speed =
2163 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2169 softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
2170 softc->link_info.req_link_speed =
2171 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2174 device_printf(softc->dev,
2175 "Unsupported media type! Using auto\n");
2179 softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
2182 rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
2183 bnxt_media_status(softc->ctx, &ifmr);
2188 bnxt_promisc_set(if_ctx_t ctx, int flags)
2190 struct bnxt_softc *softc = iflib_get_softc(ctx);
2191 if_t ifp = iflib_get_ifp(ctx);
2194 if (if_getflags(ifp) & IFF_ALLMULTI ||
2195 if_llmaddr_count(ifp) > BNXT_MAX_MC_ADDRS)
2196 softc->vnic_info.rx_mask |=
2197 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2199 softc->vnic_info.rx_mask &=
2200 ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
2202 if (if_getflags(ifp) & IFF_PROMISC)
2203 softc->vnic_info.rx_mask |=
2204 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS |
2205 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
2207 softc->vnic_info.rx_mask &=
2208 ~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS);
2210 rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info);
2216 bnxt_get_counter(if_ctx_t ctx, ift_counter cnt)
2218 if_t ifp = iflib_get_ifp(ctx);
2220 if (cnt < IFCOUNTERS)
2221 return if_get_counter_default(ifp, cnt);
2227 bnxt_update_admin_status(if_ctx_t ctx)
2229 struct bnxt_softc *softc = iflib_get_softc(ctx);
2232 * When SR-IOV is enabled, avoid each VF sending this HWRM
2233 * request every sec with which firmware timeouts can happen
2235 if (!BNXT_PF(softc))
2238 bnxt_hwrm_port_qstats(softc);
2240 if (BNXT_CHIP_P5(softc) &&
2241 (softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS))
2242 bnxt_hwrm_port_qstats_ext(softc);
2244 if (BNXT_CHIP_P5(softc)) {
2245 struct ifmediareq ifmr;
2247 if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
2248 bit_clear(softc->state_bv, BNXT_STATE_LINK_CHANGE);
2249 bnxt_media_status(softc->ctx, &ifmr);
2257 bnxt_if_timer(if_ctx_t ctx, uint16_t qid)
2260 struct bnxt_softc *softc = iflib_get_softc(ctx);
2261 uint64_t ticks_now = ticks;
2263 /* Schedule bnxt_update_admin_status() once per sec */
2264 if (ticks_now - softc->admin_ticks >= hz) {
2265 softc->admin_ticks = ticks_now;
2266 iflib_admin_intr_deferred(ctx);
2273 bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
2275 struct bnxt_softc *softc = cpr->ring.softc;
2277 if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
2280 if (BNXT_CHIP_P5(softc))
2281 softc->db_ops.bnxt_db_nq(cpr, 1);
2283 softc->db_ops.bnxt_db_rx_cq(cpr, 1);
2287 bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
2289 struct bnxt_softc *softc = cpr->ring.softc;
2291 if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
2294 if (BNXT_CHIP_P5(softc))
2295 softc->db_ops.bnxt_db_nq(cpr, 0);
2297 softc->db_ops.bnxt_db_rx_cq(cpr, 0);
2300 /* Enable all interrupts */
2302 bnxt_intr_enable(if_ctx_t ctx)
2304 struct bnxt_softc *softc = iflib_get_softc(ctx);
2307 bnxt_do_enable_intr(&softc->def_cp_ring);
2308 for (i = 0; i < softc->nrxqsets; i++)
2309 if (BNXT_CHIP_P5(softc))
2310 softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
2312 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
2317 /* Enable interrupt for a single queue */
2319 bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
2321 struct bnxt_softc *softc = iflib_get_softc(ctx);
2323 if (BNXT_CHIP_P5(softc))
2324 softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
2326 softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
2332 bnxt_process_cmd_cmpl(struct bnxt_softc *softc, hwrm_cmpl_t *cmd_cmpl)
2334 device_printf(softc->dev, "cmd sequence number %d\n",
2335 cmd_cmpl->sequence_id);
2340 bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
2342 struct bnxt_softc *softc = cpr->ring.softc;
2343 uint16_t type = cmpl->flags_type & TX_CMPL_TYPE_MASK;
2346 case HWRM_CMPL_TYPE_HWRM_DONE:
2347 bnxt_process_cmd_cmpl(softc, (hwrm_cmpl_t *)cmpl);
2349 case HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT:
2350 bnxt_handle_async_event(softc, (cmpl_base_t *) cmpl);
2353 device_printf(softc->dev, "%s:%d Unhandled async message %x\n",
2354 __FUNCTION__, __LINE__, type);
2360 process_nq(struct bnxt_softc *softc, uint16_t nqid)
2362 struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
2363 nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
2364 bool v_bit = cpr->v_bit;
2365 uint32_t cons = cpr->cons;
2366 uint16_t nq_type, nqe_cnt = 0;
2369 if (!NQ_VALID(&cmp[cons], v_bit))
2372 nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
2374 if (nq_type != NQ_CN_TYPE_CQ_NOTIFICATION)
2375 bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
2377 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
2388 bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
2390 struct bnxt_softc *softc = iflib_get_softc(ctx);
2392 if (BNXT_CHIP_P5(softc)) {
2393 process_nq(softc, qid);
2394 softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
2396 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[qid], 1);
2400 /* Disable all interrupts */
2402 bnxt_disable_intr(if_ctx_t ctx)
2404 struct bnxt_softc *softc = iflib_get_softc(ctx);
2408 * NOTE: These TX interrupts should never get enabled, so don't
2411 for (i = 0; i < softc->nrxqsets; i++)
2412 if (BNXT_CHIP_P5(softc))
2413 softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
2415 softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
2422 bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
2424 struct bnxt_softc *softc = iflib_get_softc(ctx);
2425 struct bnxt_cp_ring *ring;
2432 if (BNXT_CHIP_P5(softc))
2433 goto skip_default_cp;
2435 rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
2436 softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN,
2437 bnxt_handle_def_cp, softc, 0, "def_cp");
2439 device_printf(iflib_get_dev(ctx),
2440 "Failed to register default completion ring handler\n");
2445 for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
2446 if (BNXT_CHIP_P5(softc)) {
2447 irq = &softc->nq_rings[i].irq;
2448 id = softc->nq_rings[i].ring.id;
2449 ring = &softc->nq_rings[i];
2451 irq = &softc->rx_cp_rings[i].irq;
2452 id = softc->rx_cp_rings[i].ring.id ;
2453 ring = &softc->rx_cp_rings[i];
2455 snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
2456 rc = iflib_irq_alloc_generic(ctx, irq, id + 1, IFLIB_INTR_RX,
2457 bnxt_handle_isr, ring, i, irq_name);
2459 device_printf(iflib_get_dev(ctx),
2460 "Failed to register RX completion ring handler\n");
2466 for (i=0; i<softc->scctx->isc_ntxqsets; i++)
2467 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp");
2473 iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
2474 iflib_irq_free(ctx, &softc->def_cp_ring.irq);
2479 * We're explicitly allowing duplicates here. They will need to be
2480 * removed as many times as they are added.
2483 bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag)
2485 struct bnxt_softc *softc = iflib_get_softc(ctx);
2486 struct bnxt_vlan_tag *new_tag;
2488 new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT);
2489 if (new_tag == NULL)
2491 new_tag->tag = vtag;
2492 new_tag->filter_id = -1;
2493 SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next);
2497 bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag)
2499 struct bnxt_softc *softc = iflib_get_softc(ctx);
2500 struct bnxt_vlan_tag *vlan_tag;
2502 SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) {
2503 if (vlan_tag->tag == vtag) {
2504 SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag,
2505 bnxt_vlan_tag, next);
2506 free(vlan_tag, M_DEVBUF);
2513 bnxt_wol_config(if_ctx_t ctx)
2515 struct bnxt_softc *softc = iflib_get_softc(ctx);
2516 if_t ifp = iflib_get_ifp(ctx);
2521 if (!bnxt_wol_supported(softc))
2524 if (if_getcapenable(ifp) & IFCAP_WOL_MAGIC) {
2526 if (bnxt_hwrm_alloc_wol_fltr(softc))
2532 if (bnxt_hwrm_free_wol_fltr(softc))
2542 bnxt_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
2545 case IFLIB_RESTART_VLAN_CONFIG:
2552 bnxt_shutdown(if_ctx_t ctx)
2554 bnxt_wol_config(ctx);
2559 bnxt_suspend(if_ctx_t ctx)
2561 bnxt_wol_config(ctx);
2566 bnxt_resume(if_ctx_t ctx)
2568 struct bnxt_softc *softc = iflib_get_softc(ctx);
2570 bnxt_get_wol_settings(softc);
2575 bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
2577 struct bnxt_softc *softc = iflib_get_softc(ctx);
2578 struct ifreq *ifr = (struct ifreq *)data;
2579 struct bnxt_ioctl_header *ioh;
2582 struct bnxt_ioctl_data iod_storage, *iod = &iod_storage;
2585 case SIOCGPRIVATE_0:
2586 if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0)
2589 ioh = ifr_buffer_get_buffer(ifr);
2590 iol = ifr_buffer_get_length(ifr);
2591 if (iol > sizeof(iod_storage))
2594 if ((rc = copyin(ioh, iod, iol)) != 0)
2597 switch (iod->hdr.type) {
2598 case BNXT_HWRM_NVM_FIND_DIR_ENTRY:
2600 struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find =
2603 rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type,
2604 &find->ordinal, find->ext, &find->index,
2605 find->use_index, find->search_opt,
2606 &find->data_length, &find->item_length,
2610 rc = copyout(&iod->hdr.rc, &ioh->rc,
2614 rc = copyout(iod, ioh, iol);
2619 case BNXT_HWRM_NVM_READ:
2621 struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read;
2622 struct iflib_dma_info dma_data;
2628 * Some HWRM versions can't read more than 0x8000 bytes
2630 rc = iflib_dma_alloc(softc->ctx,
2631 min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT);
2634 for (remain = rd->length, offset = 0;
2635 remain && offset < rd->length; offset += 0x8000) {
2636 csize = min(remain, 0x8000);
2637 rc = bnxt_hwrm_nvm_read(softc, rd->index,
2638 rd->offset + offset, csize, &dma_data);
2641 rc = copyout(&iod->hdr.rc, &ioh->rc,
2645 rc = copyout(dma_data.idi_vaddr,
2646 rd->data + offset, csize);
2652 rc = copyout(iod, ioh, iol);
2654 iflib_dma_free(&dma_data);
2657 case BNXT_HWRM_FW_RESET:
2659 struct bnxt_ioctl_hwrm_fw_reset *rst =
2662 rc = bnxt_hwrm_fw_reset(softc, rst->processor,
2666 rc = copyout(&iod->hdr.rc, &ioh->rc,
2670 rc = copyout(iod, ioh, iol);
2675 case BNXT_HWRM_FW_QSTATUS:
2677 struct bnxt_ioctl_hwrm_fw_qstatus *qstat =
2680 rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor,
2684 rc = copyout(&iod->hdr.rc, &ioh->rc,
2688 rc = copyout(iod, ioh, iol);
2693 case BNXT_HWRM_NVM_WRITE:
2695 struct bnxt_ioctl_hwrm_nvm_write *wr =
2698 rc = bnxt_hwrm_nvm_write(softc, wr->data, true,
2699 wr->type, wr->ordinal, wr->ext, wr->attr,
2700 wr->option, wr->data_length, wr->keep,
2701 &wr->item_length, &wr->index);
2704 rc = copyout(&iod->hdr.rc, &ioh->rc,
2709 rc = copyout(iod, ioh, iol);
2714 case BNXT_HWRM_NVM_ERASE_DIR_ENTRY:
2716 struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase =
2719 rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index);
2722 rc = copyout(&iod->hdr.rc, &ioh->rc,
2726 rc = copyout(iod, ioh, iol);
2731 case BNXT_HWRM_NVM_GET_DIR_INFO:
2733 struct bnxt_ioctl_hwrm_nvm_get_dir_info *info =
2736 rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries,
2737 &info->entry_length);
2740 rc = copyout(&iod->hdr.rc, &ioh->rc,
2744 rc = copyout(iod, ioh, iol);
2749 case BNXT_HWRM_NVM_GET_DIR_ENTRIES:
2751 struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get =
2753 struct iflib_dma_info dma_data;
2755 rc = iflib_dma_alloc(softc->ctx, get->max_size,
2756 &dma_data, BUS_DMA_NOWAIT);
2759 rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries,
2760 &get->entry_length, &dma_data);
2763 rc = copyout(&iod->hdr.rc, &ioh->rc,
2766 rc = copyout(dma_data.idi_vaddr, get->data,
2767 get->entry_length * get->entries);
2770 rc = copyout(iod, ioh, iol);
2772 iflib_dma_free(&dma_data);
2776 case BNXT_HWRM_NVM_VERIFY_UPDATE:
2778 struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy =
2781 rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type,
2782 vrfy->ordinal, vrfy->ext);
2785 rc = copyout(&iod->hdr.rc, &ioh->rc,
2789 rc = copyout(iod, ioh, iol);
2794 case BNXT_HWRM_NVM_INSTALL_UPDATE:
2796 struct bnxt_ioctl_hwrm_nvm_install_update *inst =
2799 rc = bnxt_hwrm_nvm_install_update(softc,
2800 inst->install_type, &inst->installed_items,
2801 &inst->result, &inst->problem_item,
2802 &inst->reset_required);
2805 rc = copyout(&iod->hdr.rc, &ioh->rc,
2809 rc = copyout(iod, ioh, iol);
2814 case BNXT_HWRM_NVM_MODIFY:
2816 struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify;
2818 rc = bnxt_hwrm_nvm_modify(softc, mod->index,
2819 mod->offset, mod->data, true, mod->length);
2822 rc = copyout(&iod->hdr.rc, &ioh->rc,
2826 rc = copyout(iod, ioh, iol);
2831 case BNXT_HWRM_FW_GET_TIME:
2833 struct bnxt_ioctl_hwrm_fw_get_time *gtm =
2836 rc = bnxt_hwrm_fw_get_time(softc, >m->year,
2837 >m->month, >m->day, >m->hour, >m->minute,
2838 >m->second, >m->millisecond, >m->zone);
2841 rc = copyout(&iod->hdr.rc, &ioh->rc,
2845 rc = copyout(iod, ioh, iol);
2850 case BNXT_HWRM_FW_SET_TIME:
2852 struct bnxt_ioctl_hwrm_fw_set_time *stm =
2855 rc = bnxt_hwrm_fw_set_time(softc, stm->year,
2856 stm->month, stm->day, stm->hour, stm->minute,
2857 stm->second, stm->millisecond, stm->zone);
2860 rc = copyout(&iod->hdr.rc, &ioh->rc,
2864 rc = copyout(iod, ioh, iol);
2881 bnxt_probe_phy(struct bnxt_softc *softc)
2883 struct bnxt_link_info *link_info = &softc->link_info;
2886 rc = bnxt_update_link(softc, false);
2888 device_printf(softc->dev,
2889 "Probe phy can't update link (rc: %x)\n", rc);
2893 /*initialize the ethool setting copy with NVM settings */
2894 if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE)
2895 link_info->autoneg |= BNXT_AUTONEG_SPEED;
2897 link_info->req_duplex = link_info->duplex_setting;
2898 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
2899 link_info->req_link_speed = link_info->auto_link_speed;
2901 link_info->req_link_speed = link_info->force_link_speed;
2906 bnxt_add_media_types(struct bnxt_softc *softc)
2908 struct bnxt_link_info *link_info = &softc->link_info;
2910 uint8_t phy_type = get_phy_type(softc);
2912 supported = link_info->support_speeds;
2914 /* Auto is always supported */
2915 ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2917 if (softc->flags & BNXT_FLAG_NPAR)
2921 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
2922 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
2923 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
2924 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
2925 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
2926 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
2927 BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_CR4);
2928 BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_CR2);
2929 BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_CR4);
2930 BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_CR);
2931 BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_CR1);
2932 BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_CX);
2935 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
2936 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
2937 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
2938 BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_LR4);
2939 BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_LR4);
2940 BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_LR);
2941 BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_LR);
2942 BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_LX);
2945 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
2946 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
2947 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
2948 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
2949 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
2950 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
2951 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
2952 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
2953 BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_SR4);
2954 BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_SR4);
2955 BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_SR);
2956 BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_SR);
2957 BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SX);
2960 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
2961 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
2962 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
2963 BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_KR4);
2964 BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR2);
2965 BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_KR4);
2966 BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_KR);
2967 BNXT_IFMEDIA_ADD(supported, SPEEDS_20GB, IFM_20G_KR2);
2968 BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
2969 BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
2972 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
2973 BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_ACC);
2974 BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_AOC);
2975 BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI);
2976 BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI_AC);
2979 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
2980 BNXT_IFMEDIA_ADD(supported, SPEEDS_1GBHD, IFM_1000_CX);
2983 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
2984 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
2985 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
2986 BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_T);
2987 BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_T);
2988 BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T);
2989 BNXT_IFMEDIA_ADD(supported, SPEEDS_100MB, IFM_100_T);
2990 BNXT_IFMEDIA_ADD(supported, SPEEDS_10MB, IFM_10_T);
2993 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
2994 BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
2995 BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_KX);
2996 BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
2999 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
3000 BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SGMII);
3003 case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
3004 /* Only Autoneg is supported for TYPE_UNKNOWN */
3005 device_printf(softc->dev, "Unknown phy type\n");
3009 /* Only Autoneg is supported for new phy type values */
3010 device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
3018 bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable)
3022 if (bar->res != NULL) {
3023 device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
3027 bar->rid = PCIR_BAR(bar_num);
3030 flag |= RF_SHAREABLE;
3033 bus_alloc_resource_any(softc->dev,
3037 device_printf(softc->dev,
3038 "PCI BAR%d mapping failure\n", bar_num);
3041 bar->tag = rman_get_bustag(bar->res);
3042 bar->handle = rman_get_bushandle(bar->res);
3043 bar->size = rman_get_size(bar->res);
3049 bnxt_pci_mapping(struct bnxt_softc *softc)
3053 rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true);
3057 rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false);
3063 bnxt_pci_mapping_free(struct bnxt_softc *softc)
3065 if (softc->hwrm_bar.res != NULL)
3066 bus_release_resource(softc->dev, SYS_RES_MEMORY,
3067 softc->hwrm_bar.rid, softc->hwrm_bar.res);
3068 softc->hwrm_bar.res = NULL;
3070 if (softc->doorbell_bar.res != NULL)
3071 bus_release_resource(softc->dev, SYS_RES_MEMORY,
3072 softc->doorbell_bar.rid, softc->doorbell_bar.res);
3073 softc->doorbell_bar.res = NULL;
3077 bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state)
3079 struct bnxt_link_info *link_info = &softc->link_info;
3080 uint8_t link_up = link_info->link_up;
3083 rc = bnxt_hwrm_port_phy_qcfg(softc);
3087 /* TODO: need to add more logic to report VF link */
3088 if (chng_link_state) {
3089 if (link_info->phy_link_status ==
3090 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
3091 link_info->link_up = 1;
3093 link_info->link_up = 0;
3094 if (link_up != link_info->link_up)
3095 bnxt_report_link(softc);
3097 /* always link down if not require to update link state */
3098 link_info->link_up = 0;
3106 bnxt_report_link(struct bnxt_softc *softc)
3108 struct bnxt_link_info *link_info = &softc->link_info;
3109 const char *duplex = NULL, *flow_ctrl = NULL;
3111 if (link_info->link_up == link_info->last_link_up) {
3112 if (!link_info->link_up)
3114 if ((link_info->duplex == link_info->last_duplex) &&
3115 (link_info->phy_type == link_info->last_phy_type) &&
3116 (!(BNXT_IS_FLOW_CTRL_CHANGED(link_info))))
3120 if (link_info->link_up) {
3121 if (link_info->duplex ==
3122 HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL)
3123 duplex = "full duplex";
3125 duplex = "half duplex";
3126 if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx)
3127 flow_ctrl = "FC - receive & transmit";
3128 else if (link_info->flow_ctrl.tx)
3129 flow_ctrl = "FC - transmit";
3130 else if (link_info->flow_ctrl.rx)
3131 flow_ctrl = "FC - receive";
3133 flow_ctrl = "FC - none";
3134 iflib_link_state_change(softc->ctx, LINK_STATE_UP,
3136 device_printf(softc->dev, "Link is UP %s, %s - %d Mbps \n", duplex,
3137 flow_ctrl, (link_info->link_speed * 100));
3139 iflib_link_state_change(softc->ctx, LINK_STATE_DOWN,
3140 bnxt_get_baudrate(&softc->link_info));
3141 device_printf(softc->dev, "Link is Down\n");
3144 link_info->last_link_up = link_info->link_up;
3145 link_info->last_duplex = link_info->duplex;
3146 link_info->last_phy_type = link_info->phy_type;
3147 link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx;
3148 link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx;
3149 link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg;
3150 /* update media types */
3151 ifmedia_removeall(softc->media);
3152 bnxt_add_media_types(softc);
3153 ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
3157 bnxt_handle_isr(void *arg)
3159 struct bnxt_cp_ring *cpr = arg;
3160 struct bnxt_softc *softc = cpr->ring.softc;
3163 /* Disable further interrupts for this queue */
3164 if (!BNXT_CHIP_P5(softc))
3165 softc->db_ops.bnxt_db_rx_cq(cpr, 0);
3167 return FILTER_SCHEDULE_THREAD;
3171 bnxt_handle_def_cp(void *arg)
3173 struct bnxt_softc *softc = arg;
3175 softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
3176 GROUPTASK_ENQUEUE(&softc->def_cp_task);
3177 return FILTER_HANDLED;
3181 bnxt_clear_ids(struct bnxt_softc *softc)
3185 softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
3186 softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3187 softc->def_nq_ring.stats_ctx_id = HWRM_NA_SIGNATURE;
3188 softc->def_nq_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3189 for (i = 0; i < softc->ntxqsets; i++) {
3190 softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
3191 softc->tx_cp_rings[i].ring.phys_id =
3192 (uint16_t)HWRM_NA_SIGNATURE;
3193 softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3195 if (!softc->nq_rings)
3197 softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
3198 softc->nq_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3200 for (i = 0; i < softc->nrxqsets; i++) {
3201 softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
3202 softc->rx_cp_rings[i].ring.phys_id =
3203 (uint16_t)HWRM_NA_SIGNATURE;
3204 softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3205 softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3206 softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE;
3208 softc->vnic_info.filter_id = -1;
3209 softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE;
3210 softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
3211 memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff,
3212 softc->vnic_info.rss_grp_tbl.idi_size);
3216 bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
3218 struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
3221 for (i = 0; i < cpr->ring.ring_size; i++)
3222 cmp[i].info3_v = !cpr->v_bit;
3226 bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
3228 struct hwrm_async_event_cmpl *ae = (void *)cmpl;
3229 uint16_t async_id = le16toh(ae->event_id);
3230 struct ifmediareq ifmr;
3233 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
3234 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
3235 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
3236 if (BNXT_CHIP_P5(softc))
3237 bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
3239 bnxt_media_status(softc->ctx, &ifmr);
3241 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
3242 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
3243 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
3244 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED:
3245 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD:
3246 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD:
3247 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
3248 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD:
3249 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR:
3250 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE:
3251 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE:
3252 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
3253 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR:
3254 device_printf(softc->dev,
3255 "Unhandled async completion type %u\n", async_id);
3258 device_printf(softc->dev,
3259 "Unknown async completion type %u\n", async_id);
3265 bnxt_def_cp_task(void *context)
3267 if_ctx_t ctx = context;
3268 struct bnxt_softc *softc = iflib_get_softc(ctx);
3269 struct bnxt_cp_ring *cpr = &softc->def_cp_ring;
3271 /* Handle completions on the default completion ring */
3272 struct cmpl_base *cmpl;
3273 uint32_t cons = cpr->cons;
3274 bool v_bit = cpr->v_bit;
3282 NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
3283 cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
3285 if (!CMP_VALID(cmpl, v_bit))
3288 type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
3290 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
3291 bnxt_handle_async_event(softc, cmpl);
3293 case CMPL_BASE_TYPE_TX_L2:
3294 case CMPL_BASE_TYPE_RX_L2:
3295 case CMPL_BASE_TYPE_RX_AGG:
3296 case CMPL_BASE_TYPE_RX_TPA_START:
3297 case CMPL_BASE_TYPE_RX_TPA_END:
3298 case CMPL_BASE_TYPE_STAT_EJECT:
3299 case CMPL_BASE_TYPE_HWRM_DONE:
3300 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
3301 case CMPL_BASE_TYPE_HWRM_FWD_RESP:
3302 case CMPL_BASE_TYPE_CQ_NOTIFICATION:
3303 case CMPL_BASE_TYPE_SRQ_EVENT:
3304 case CMPL_BASE_TYPE_DBQ_EVENT:
3305 case CMPL_BASE_TYPE_QP_EVENT:
3306 case CMPL_BASE_TYPE_FUNC_EVENT:
3307 device_printf(softc->dev,
3308 "Unhandled completion type %u\n", type);
3311 device_printf(softc->dev,
3312 "Unknown completion type %u\n", type);
3317 cpr->cons = last_cons;
3318 cpr->v_bit = last_v_bit;
3319 softc->db_ops.bnxt_db_rx_cq(cpr, 1);
3323 get_phy_type(struct bnxt_softc *softc)
3325 struct bnxt_link_info *link_info = &softc->link_info;
3326 uint8_t phy_type = link_info->phy_type;
3329 if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN)
3332 /* Deduce the phy type from the media type and supported speeds */
3333 supported = link_info->support_speeds;
3335 if (link_info->media_type ==
3336 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP)
3337 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET;
3338 if (link_info->media_type ==
3339 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) {
3340 if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB)
3341 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX;
3342 if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB)
3343 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR;
3344 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR;
3346 if (link_info->media_type ==
3347 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE)
3348 return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR;
3354 bnxt_check_hwrm_version(struct bnxt_softc *softc)
3358 sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major,
3359 softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update);
3360 if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) {
3361 device_printf(softc->dev,
3362 "WARNING: HWRM version %s is too old (older than %s)\n",
3363 softc->ver_info->hwrm_if_ver, buf);
3366 else if(softc->ver_info->hwrm_min_major ==
3367 softc->ver_info->hwrm_if_major) {
3368 if (softc->ver_info->hwrm_min_minor >
3369 softc->ver_info->hwrm_if_minor) {
3370 device_printf(softc->dev,
3371 "WARNING: HWRM version %s is too old (older than %s)\n",
3372 softc->ver_info->hwrm_if_ver, buf);
3375 else if (softc->ver_info->hwrm_min_minor ==
3376 softc->ver_info->hwrm_if_minor) {
3377 if (softc->ver_info->hwrm_min_update >
3378 softc->ver_info->hwrm_if_update) {
3379 device_printf(softc->dev,
3380 "WARNING: HWRM version %s is too old (older than %s)\n",
3381 softc->ver_info->hwrm_if_ver, buf);
3390 bnxt_get_baudrate(struct bnxt_link_info *link)
3392 switch (link->link_speed) {
3393 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
3394 return IF_Mbps(100);
3395 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
3397 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
3399 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
3400 return IF_Mbps(2500);
3401 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
3403 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
3405 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
3407 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
3409 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
3411 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
3412 return IF_Gbps(100);
3413 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
3416 return IF_Gbps(100);
3420 bnxt_get_wol_settings(struct bnxt_softc *softc)
3422 uint16_t wol_handle = 0;
3424 if (!bnxt_wol_supported(softc))
3428 wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle);
3429 } while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS);