2 * Broadcom NetXtreme-C/E network driver.
4 * Copyright (c) 2016 Broadcom, All Rights Reserved.
5 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/endian.h>
33 #include <sys/bitstring.h>
36 #include "bnxt_hwrm.h"
37 #include "hsi_struct_def.h"
39 static int bnxt_hwrm_err_map(uint16_t err);
40 static inline int _is_valid_ether_addr(uint8_t *);
41 static inline void get_random_ether_addr(uint8_t *);
42 static void bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
43 struct hwrm_port_phy_cfg_input *req);
44 static void bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
45 struct hwrm_port_phy_cfg_input *req);
46 static void bnxt_hwrm_set_eee(struct bnxt_softc *softc,
47 struct hwrm_port_phy_cfg_input *req);
48 static int _hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
49 static int hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
50 static void bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
52 /* NVRam stuff has a five minute timeout */
53 #define BNXT_NVM_TIMEO (5 * 60 * 1000)
56 bnxt_hwrm_err_map(uint16_t err)
61 case HWRM_ERR_CODE_SUCCESS:
63 case HWRM_ERR_CODE_INVALID_PARAMS:
64 case HWRM_ERR_CODE_INVALID_FLAGS:
65 case HWRM_ERR_CODE_INVALID_ENABLES:
67 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
69 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
71 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
73 case HWRM_ERR_CODE_FAIL:
75 case HWRM_ERR_CODE_HWRM_ERROR:
76 case HWRM_ERR_CODE_UNKNOWN_ERR:
85 bnxt_alloc_hwrm_dma_mem(struct bnxt_softc *softc)
89 rc = iflib_dma_alloc(softc->ctx, PAGE_SIZE, &softc->hwrm_cmd_resp,
95 bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc)
97 if (softc->hwrm_cmd_resp.idi_vaddr)
98 iflib_dma_free(&softc->hwrm_cmd_resp);
99 softc->hwrm_cmd_resp.idi_vaddr = NULL;
104 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
107 struct input *req = request;
109 req->req_type = htole16(req_type);
110 req->cmpl_ring = 0xffff;
111 req->target_id = 0xffff;
112 req->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
116 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
118 struct input *req = msg;
119 struct hwrm_err_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
120 uint32_t *data = msg;
126 /* TODO: DMASYNC in here. */
127 req->seq_id = htole16(softc->hwrm_cmd_seq++);
128 memset(resp, 0, PAGE_SIZE);
129 cp_ring_id = le16toh(req->cmpl_ring);
131 /* Write request msg to hwrm channel */
132 for (i = 0; i < msg_len; i += 4) {
133 bus_space_write_4(softc->hwrm_bar.tag,
134 softc->hwrm_bar.handle,
139 /* Clear to the end of the request buffer */
140 for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4)
141 bus_space_write_4(softc->hwrm_bar.tag, softc->hwrm_bar.handle,
144 /* Ring channel doorbell */
145 bus_space_write_4(softc->hwrm_bar.tag,
146 softc->hwrm_bar.handle,
149 /* Check if response len is updated */
150 for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
151 if (resp->resp_len && resp->resp_len <= 4096)
155 if (i >= softc->hwrm_cmd_timeo) {
156 device_printf(softc->dev,
157 "Timeout sending %s: (timeout: %u) seq: %d\n",
158 GET_HWRM_REQ_TYPE(req->req_type), softc->hwrm_cmd_timeo,
159 le16toh(req->seq_id));
162 /* Last byte of resp contains the valid key */
163 valid = (uint8_t *)resp + resp->resp_len - 1;
164 for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
165 if (*valid == HWRM_RESP_VALID_KEY)
169 if (i >= softc->hwrm_cmd_timeo) {
170 device_printf(softc->dev, "Timeout sending %s: "
171 "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
172 GET_HWRM_REQ_TYPE(req->req_type),
173 softc->hwrm_cmd_timeo, le16toh(req->req_type),
174 le16toh(req->seq_id), msg_len,
179 err = le16toh(resp->error_code);
181 /* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
182 if (err != HWRM_ERR_CODE_FAIL) {
183 device_printf(softc->dev,
184 "%s command returned %s error.\n",
185 GET_HWRM_REQ_TYPE(req->req_type),
186 GET_HWRM_ERROR_CODE(err));
188 return bnxt_hwrm_err_map(err);
195 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
199 BNXT_HWRM_LOCK(softc);
200 rc = _hwrm_send_message(softc, msg, msg_len);
201 BNXT_HWRM_UNLOCK(softc);
206 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
208 struct hwrm_queue_qportcfg_input req = {0};
209 struct hwrm_queue_qportcfg_output *resp =
210 (void *)softc->hwrm_cmd_resp.idi_vaddr;
215 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
217 BNXT_HWRM_LOCK(softc);
218 rc = _hwrm_send_message(softc, &req, sizeof(req));
222 if (!resp->max_configurable_queues) {
226 softc->max_tc = resp->max_configurable_queues;
227 if (softc->max_tc > BNXT_MAX_QUEUE)
228 softc->max_tc = BNXT_MAX_QUEUE;
230 qptr = &resp->queue_id0;
231 for (int i = 0; i < softc->max_tc; i++) {
232 softc->q_info[i].id = *qptr++;
233 softc->q_info[i].profile = *qptr++;
237 BNXT_HWRM_UNLOCK(softc);
243 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
245 struct hwrm_ver_get_input req = {0};
246 struct hwrm_ver_get_output *resp =
247 (void *)softc->hwrm_cmd_resp.idi_vaddr;
249 const char nastr[] = "<not installed>";
250 const char naver[] = "<N/A>";
252 softc->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
253 softc->hwrm_cmd_timeo = 1000;
254 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
256 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
257 req.hwrm_intf_min = HWRM_VERSION_MINOR;
258 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
260 BNXT_HWRM_LOCK(softc);
261 rc = _hwrm_send_message(softc, &req, sizeof(req));
265 snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
266 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
267 softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj;
268 softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min;
269 softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd;
270 snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
271 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
272 strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
274 strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
277 if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 &&
278 resp->mgmt_fw_bld == 0) {
279 strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
280 strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
283 snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
284 "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min,
286 strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
289 if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 &&
290 resp->netctrl_fw_bld == 0) {
291 strlcpy(softc->ver_info->netctrl_fw_ver, naver,
293 strlcpy(softc->ver_info->netctrl_fw_name, nastr,
297 snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
298 "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min,
299 resp->netctrl_fw_bld);
300 strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
303 if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 &&
304 resp->roce_fw_bld == 0) {
305 strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
306 strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
309 snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
310 "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min,
312 strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
315 softc->ver_info->chip_num = le16toh(resp->chip_num);
316 softc->ver_info->chip_rev = resp->chip_rev;
317 softc->ver_info->chip_metal = resp->chip_metal;
318 softc->ver_info->chip_bond_id = resp->chip_bond_id;
319 softc->ver_info->chip_type = resp->chip_platform_type;
321 if (resp->max_req_win_len)
322 softc->hwrm_max_req_len = le16toh(resp->max_req_win_len);
323 if (resp->def_req_timeout)
324 softc->hwrm_cmd_timeo = le16toh(resp->def_req_timeout);
327 BNXT_HWRM_UNLOCK(softc);
332 bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
334 struct hwrm_func_drv_rgtr_input req = {0};
336 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
338 req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
339 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
340 req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
342 req.ver_maj = __FreeBSD_version / 100000;
343 req.ver_min = (__FreeBSD_version / 1000) % 100;
344 req.ver_upd = (__FreeBSD_version / 100) % 10;
346 return hwrm_send_message(softc, &req, sizeof(req));
351 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
353 struct hwrm_func_drv_unrgtr_input req = {0};
355 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
356 if (shutdown == true)
358 HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
359 return hwrm_send_message(softc, &req, sizeof(req));
364 _is_valid_ether_addr(uint8_t *addr)
366 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
368 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
375 get_random_ether_addr(uint8_t *addr)
377 uint8_t temp[ETHER_ADDR_LEN];
379 arc4rand(&temp, sizeof(temp), 0);
382 bcopy(temp, addr, sizeof(temp));
386 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
389 struct hwrm_func_qcaps_input req = {0};
390 struct hwrm_func_qcaps_output *resp =
391 (void *)softc->hwrm_cmd_resp.idi_vaddr;
392 struct bnxt_func_info *func = &softc->func;
394 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
395 req.fid = htole16(0xffff);
397 BNXT_HWRM_LOCK(softc);
398 rc = _hwrm_send_message(softc, &req, sizeof(req));
403 htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
404 softc->flags |= BNXT_FLAG_WOL_CAP;
406 func->fw_fid = le16toh(resp->fid);
407 memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
408 func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
409 func->max_cp_rings = le16toh(resp->max_cmpl_rings);
410 func->max_tx_rings = le16toh(resp->max_tx_rings);
411 func->max_rx_rings = le16toh(resp->max_rx_rings);
412 func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
413 if (!func->max_hw_ring_grps)
414 func->max_hw_ring_grps = func->max_tx_rings;
415 func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
416 func->max_vnics = le16toh(resp->max_vnics);
417 func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
418 if (BNXT_PF(softc)) {
419 struct bnxt_pf_info *pf = &softc->pf;
421 pf->port_id = le16toh(resp->port_id);
422 pf->first_vf_id = le16toh(resp->first_vf_id);
423 pf->max_vfs = le16toh(resp->max_vfs);
424 pf->max_encap_records = le32toh(resp->max_encap_records);
425 pf->max_decap_records = le32toh(resp->max_decap_records);
426 pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
427 pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
428 pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
429 pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
431 if (!_is_valid_ether_addr(func->mac_addr)) {
432 device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
433 get_random_ether_addr(func->mac_addr);
437 BNXT_HWRM_UNLOCK(softc);
442 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
444 struct hwrm_func_qcfg_input req = {0};
445 struct hwrm_func_qcfg_output *resp =
446 (void *)softc->hwrm_cmd_resp.idi_vaddr;
447 struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg;
450 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
451 req.fid = htole16(0xffff);
452 BNXT_HWRM_LOCK(softc);
453 rc = _hwrm_send_message(softc, &req, sizeof(req));
457 fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
458 fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
459 fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
460 fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
462 BNXT_HWRM_UNLOCK(softc);
467 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
469 struct hwrm_func_reset_input req = {0};
471 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
474 return hwrm_send_message(softc, &req, sizeof(req));
478 bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
479 struct hwrm_port_phy_cfg_input *req)
481 uint8_t autoneg = softc->link_info.autoneg;
482 uint16_t fw_link_speed = softc->link_info.req_link_speed;
484 if (autoneg & BNXT_AUTONEG_SPEED) {
486 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
489 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
491 htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
493 req->force_link_speed = htole16(fw_link_speed);
494 req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
497 /* tell chimp that the setting takes effect immediately */
498 req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
503 bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
504 struct hwrm_port_phy_cfg_input *req)
506 if (softc->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
508 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE;
509 if (softc->link_info.req_flow_ctrl &
510 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX)
512 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
513 if (softc->link_info.req_flow_ctrl &
514 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX)
516 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
518 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
520 if (softc->link_info.req_flow_ctrl &
521 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX)
523 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
524 if (softc->link_info.req_flow_ctrl &
525 HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX)
527 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
529 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE);
530 req->auto_pause = req->force_pause;
531 req->enables |= htole32(
532 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
537 /* JFV this needs interface connection */
539 bnxt_hwrm_set_eee(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req)
541 /* struct ethtool_eee *eee = &softc->eee; */
542 bool eee_enabled = false;
547 uint32_t flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE;
549 if (eee->tx_lpi_enabled)
550 flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI;
552 req->flags |= htole32(flags);
553 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
554 req->eee_link_speed_mask = htole16(eee_speeds);
555 req->tx_lpi_timer = htole32(eee->tx_lpi_timer);
559 htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE);
565 bnxt_hwrm_set_link_setting(struct bnxt_softc *softc, bool set_pause,
568 struct hwrm_port_phy_cfg_input req = {0};
570 if (softc->flags & BNXT_FLAG_NPAR)
573 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_CFG);
575 bnxt_hwrm_set_pause_common(softc, &req);
577 bnxt_hwrm_set_link_common(softc, &req);
579 bnxt_hwrm_set_eee(softc, &req);
580 return hwrm_send_message(softc, &req, sizeof(req));
585 bnxt_hwrm_set_pause(struct bnxt_softc *softc)
587 struct hwrm_port_phy_cfg_input req = {0};
590 if (softc->flags & BNXT_FLAG_NPAR)
593 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_CFG);
594 bnxt_hwrm_set_pause_common(softc, &req);
596 if (softc->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)
597 bnxt_hwrm_set_link_common(softc, &req);
599 BNXT_HWRM_LOCK(softc);
600 rc = _hwrm_send_message(softc, &req, sizeof(req));
601 if (!rc && !(softc->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
602 /* since changing of pause setting doesn't trigger any link
603 * change event, the driver needs to update the current pause
604 * result upon successfully return of the phy_cfg command */
605 softc->link_info.pause =
606 softc->link_info.force_pause = softc->link_info.req_flow_ctrl;
607 softc->link_info.auto_pause = 0;
608 bnxt_report_link(softc);
610 BNXT_HWRM_UNLOCK(softc);
615 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
617 struct hwrm_vnic_cfg_input req = {0};
618 struct hwrm_vnic_cfg_output *resp;
620 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
621 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
623 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
624 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
625 if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
626 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
627 if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
628 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
629 req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
630 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
631 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
632 req.vnic_id = htole16(vnic->id);
633 req.dflt_ring_grp = htole16(vnic->def_ring_grp);
634 req.rss_rule = htole16(vnic->rss_id);
635 req.cos_rule = htole16(vnic->cos_rule);
636 req.lb_rule = htole16(vnic->lb_rule);
637 req.mru = htole16(vnic->mru);
639 return hwrm_send_message(softc, &req, sizeof(req));
643 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
645 struct hwrm_vnic_alloc_input req = {0};
646 struct hwrm_vnic_alloc_output *resp =
647 (void *)softc->hwrm_cmd_resp.idi_vaddr;
650 if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
651 device_printf(softc->dev,
652 "Attempt to re-allocate vnic %04x\n", vnic->id);
656 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
658 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
659 req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
661 BNXT_HWRM_LOCK(softc);
662 rc = _hwrm_send_message(softc, &req, sizeof(req));
666 vnic->id = le32toh(resp->vnic_id);
669 BNXT_HWRM_UNLOCK(softc);
674 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
676 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
677 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
678 (void *)softc->hwrm_cmd_resp.idi_vaddr;
681 if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
682 device_printf(softc->dev,
683 "Attempt to re-allocate vnic ctx %04x\n", *ctx_id);
687 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
689 BNXT_HWRM_LOCK(softc);
690 rc = _hwrm_send_message(softc, &req, sizeof(req));
694 *ctx_id = le32toh(resp->rss_cos_lb_ctx_id);
697 BNXT_HWRM_UNLOCK(softc);
702 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
704 struct hwrm_ring_grp_alloc_input req = {0};
705 struct hwrm_ring_grp_alloc_output *resp;
708 if (grp->grp_id != (uint16_t)HWRM_NA_SIGNATURE) {
709 device_printf(softc->dev,
710 "Attempt to re-allocate ring group %04x\n", grp->grp_id);
714 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
715 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
716 req.cr = htole16(grp->cp_ring_id);
717 req.rr = htole16(grp->rx_ring_id);
718 req.ar = htole16(grp->ag_ring_id);
719 req.sc = htole16(grp->stats_ctx);
721 BNXT_HWRM_LOCK(softc);
722 rc = _hwrm_send_message(softc, &req, sizeof(req));
726 grp->grp_id = le32toh(resp->ring_group_id);
729 BNXT_HWRM_UNLOCK(softc);
734 * Ring allocation message to the firmware
737 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
738 struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id,
741 struct hwrm_ring_alloc_input req = {0};
742 struct hwrm_ring_alloc_output *resp;
745 if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
746 device_printf(softc->dev,
747 "Attempt to re-allocate ring %04x\n", ring->phys_id);
751 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
752 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
753 req.enables = htole32(0);
754 req.fbo = htole32(0);
756 if (stat_ctx_id != HWRM_NA_SIGNATURE) {
757 req.enables |= htole32(
758 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
759 req.stat_ctx_id = htole32(stat_ctx_id);
761 req.ring_type = type;
762 req.page_tbl_addr = htole64(ring->paddr);
763 req.length = htole32(ring->ring_size);
764 req.logical_id = htole16(ring->id);
765 req.cmpl_ring_id = htole16(cmpl_ring_id);
766 req.queue_id = htole16(softc->q_info[0].id);
768 /* MODE_POLL appears to crash the firmware */
770 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
772 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_POLL;
774 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
776 BNXT_HWRM_LOCK(softc);
777 rc = _hwrm_send_message(softc, &req, sizeof(req));
781 ring->phys_id = le16toh(resp->ring_id);
784 BNXT_HWRM_UNLOCK(softc);
789 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
792 struct hwrm_stat_ctx_alloc_input req = {0};
793 struct hwrm_stat_ctx_alloc_output *resp;
796 if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
797 device_printf(softc->dev,
798 "Attempt to re-allocate stats ctx %08x\n",
803 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
804 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
806 req.update_period_ms = htole32(1000);
807 req.stats_dma_addr = htole64(paddr);
809 BNXT_HWRM_LOCK(softc);
810 rc = _hwrm_send_message(softc, &req, sizeof(req));
814 cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
817 BNXT_HWRM_UNLOCK(softc);
823 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
825 struct hwrm_port_qstats_input req = {0};
828 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
830 req.port_id = htole16(softc->pf.port_id);
831 req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
832 req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
834 BNXT_HWRM_LOCK(softc);
835 rc = _hwrm_send_message(softc, &req, sizeof(req));
836 BNXT_HWRM_UNLOCK(softc);
842 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
843 struct bnxt_vnic_info *vnic)
845 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
846 struct bnxt_vlan_tag *tag;
848 uint32_t num_vlan_tags = 0;;
850 uint32_t mask = vnic->rx_mask;
853 SLIST_FOREACH(tag, &vnic->vlan_tags, next)
858 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN)) {
859 if (!vnic->vlan_only)
860 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
863 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
865 if (vnic->vlan_tag_list.idi_vaddr) {
866 iflib_dma_free(&vnic->vlan_tag_list);
867 vnic->vlan_tag_list.idi_vaddr = NULL;
869 rc = iflib_dma_alloc(softc->ctx, 4 * num_vlan_tags,
870 &vnic->vlan_tag_list, BUS_DMA_NOWAIT);
873 tags = (uint32_t *)vnic->vlan_tag_list.idi_vaddr;
876 SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
877 tags[i] = htole32((tag->tpid << 16) | tag->tag);
881 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
883 req.vnic_id = htole32(vnic->id);
884 req.mask = htole32(mask);
885 req.mc_tbl_addr = htole64(vnic->mc_list.idi_paddr);
886 req.num_mc_entries = htole32(vnic->mc_list_count);
887 req.vlan_tag_tbl_addr = htole64(vnic->vlan_tag_list.idi_paddr);
888 req.num_vlan_tags = htole32(num_vlan_tags);
889 return hwrm_send_message(softc, &req, sizeof(req));
894 bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
896 struct hwrm_cfa_l2_filter_alloc_input req = {0};
897 struct hwrm_cfa_l2_filter_alloc_output *resp;
898 uint32_t enables = 0;
901 if (vnic->filter_id != -1) {
902 device_printf(softc->dev,
903 "Attempt to re-allocate l2 ctx filter\n");
907 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
908 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
910 req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX);
911 enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
912 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
913 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
914 req.enables = htole32(enables);
915 req.dst_id = htole16(vnic->id);
916 memcpy(req.l2_addr, if_getlladdr(iflib_get_ifp(softc->ctx)),
918 memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
920 BNXT_HWRM_LOCK(softc);
921 rc = _hwrm_send_message(softc, &req, sizeof(req));
925 vnic->filter_id = le64toh(resp->l2_filter_id);
926 vnic->flow_id = le64toh(resp->flow_id);
929 BNXT_HWRM_UNLOCK(softc);
934 bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
937 struct hwrm_vnic_rss_cfg_input req = {0};
938 struct hwrm_vnic_rss_cfg_output *resp;
940 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
941 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
943 req.hash_type = htole32(hash_type);
944 req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
945 req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
946 req.rss_ctx_idx = htole16(vnic->rss_id);
948 return hwrm_send_message(softc, &req, sizeof(req));
952 bnxt_cfg_async_cr(struct bnxt_softc *softc)
956 if (BNXT_PF(softc)) {
957 struct hwrm_func_cfg_input req = {0};
959 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
962 req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
963 req.async_event_cr = softc->def_cp_ring.ring.phys_id;
965 rc = hwrm_send_message(softc, &req, sizeof(req));
968 struct hwrm_func_vf_cfg_input req = {0};
970 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG);
972 req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
973 req.async_event_cr = softc->def_cp_ring.ring.phys_id;
975 rc = hwrm_send_message(softc, &req, sizeof(req));
981 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
983 softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
985 softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
987 softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
988 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
990 softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
991 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
993 softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
997 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
999 struct hwrm_vnic_tpa_cfg_input req = {0};
1002 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
1004 if (softc->hw_lro.enable) {
1005 flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1006 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1007 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1008 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
1010 if (softc->hw_lro.is_mode_gro)
1011 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
1013 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
1015 req.flags = htole32(flags);
1017 req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1018 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1019 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1021 req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
1022 req.max_aggs = htole16(softc->hw_lro.max_aggs);
1023 req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
1026 req.vnic_id = htole16(softc->vnic_info.id);
1028 return hwrm_send_message(softc, &req, sizeof(req));
1032 bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc *softc, uint16_t type,
1033 uint16_t *ordinal, uint16_t ext, uint16_t *index, bool use_index,
1034 uint8_t search_opt, uint32_t *data_length, uint32_t *item_length,
1037 struct hwrm_nvm_find_dir_entry_input req = {0};
1038 struct hwrm_nvm_find_dir_entry_output *resp =
1039 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1045 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_FIND_DIR_ENTRY);
1047 req.enables = htole32(
1048 HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID);
1049 req.dir_idx = htole16(*index);
1051 req.dir_type = htole16(type);
1052 req.dir_ordinal = htole16(*ordinal);
1053 req.dir_ext = htole16(ext);
1054 req.opt_ordinal = search_opt;
1056 BNXT_HWRM_LOCK(softc);
1057 old_timeo = softc->hwrm_cmd_timeo;
1058 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1059 rc = _hwrm_send_message(softc, &req, sizeof(req));
1060 softc->hwrm_cmd_timeo = old_timeo;
1065 *item_length = le32toh(resp->dir_item_length);
1067 *data_length = le32toh(resp->dir_data_length);
1069 *fw_ver = le32toh(resp->fw_ver);
1070 *ordinal = le16toh(resp->dir_ordinal);
1072 *index = le16toh(resp->dir_idx);
1075 BNXT_HWRM_UNLOCK(softc);
1080 bnxt_hwrm_nvm_read(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
1081 uint32_t length, struct iflib_dma_info *data)
1083 struct hwrm_nvm_read_input req = {0};
1087 if (length > data->idi_size) {
1091 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_READ);
1092 req.host_dest_addr = htole64(data->idi_paddr);
1093 req.dir_idx = htole16(index);
1094 req.offset = htole32(offset);
1095 req.len = htole32(length);
1096 BNXT_HWRM_LOCK(softc);
1097 old_timeo = softc->hwrm_cmd_timeo;
1098 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1099 rc = _hwrm_send_message(softc, &req, sizeof(req));
1100 softc->hwrm_cmd_timeo = old_timeo;
1101 BNXT_HWRM_UNLOCK(softc);
1104 bus_dmamap_sync(data->idi_tag, data->idi_map, BUS_DMASYNC_POSTREAD);
1113 bnxt_hwrm_nvm_modify(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
1114 void *data, bool cpyin, uint32_t length)
1116 struct hwrm_nvm_modify_input req = {0};
1117 struct iflib_dma_info dma_data;
1121 if (length == 0 || !data)
1123 rc = iflib_dma_alloc(softc->ctx, length, &dma_data,
1128 rc = copyin(data, dma_data.idi_vaddr, length);
1133 memcpy(dma_data.idi_vaddr, data, length);
1134 bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
1135 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1137 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_MODIFY);
1138 req.host_src_addr = htole64(dma_data.idi_paddr);
1139 req.dir_idx = htole16(index);
1140 req.offset = htole32(offset);
1141 req.len = htole32(length);
1142 BNXT_HWRM_LOCK(softc);
1143 old_timeo = softc->hwrm_cmd_timeo;
1144 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1145 rc = _hwrm_send_message(softc, &req, sizeof(req));
1146 softc->hwrm_cmd_timeo = old_timeo;
1147 BNXT_HWRM_UNLOCK(softc);
1150 iflib_dma_free(&dma_data);
1155 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
1158 struct hwrm_fw_reset_input req = {0};
1159 struct hwrm_fw_reset_output *resp =
1160 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1165 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
1166 req.embedded_proc_type = processor;
1167 req.selfrst_status = *selfreset;
1169 BNXT_HWRM_LOCK(softc);
1170 rc = _hwrm_send_message(softc, &req, sizeof(req));
1173 *selfreset = resp->selfrst_status;
1176 BNXT_HWRM_UNLOCK(softc);
1181 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
1183 struct hwrm_fw_qstatus_input req = {0};
1184 struct hwrm_fw_qstatus_output *resp =
1185 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1190 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
1191 req.embedded_proc_type = type;
1193 BNXT_HWRM_LOCK(softc);
1194 rc = _hwrm_send_message(softc, &req, sizeof(req));
1197 *selfreset = resp->selfrst_status;
1200 BNXT_HWRM_UNLOCK(softc);
1205 bnxt_hwrm_nvm_write(struct bnxt_softc *softc, void *data, bool cpyin,
1206 uint16_t type, uint16_t ordinal, uint16_t ext, uint16_t attr,
1207 uint16_t option, uint32_t data_length, bool keep, uint32_t *item_length,
1210 struct hwrm_nvm_write_input req = {0};
1211 struct hwrm_nvm_write_output *resp =
1212 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1213 struct iflib_dma_info dma_data;
1218 rc = iflib_dma_alloc(softc->ctx, data_length, &dma_data,
1223 rc = copyin(data, dma_data.idi_vaddr, data_length);
1228 memcpy(dma_data.idi_vaddr, data, data_length);
1229 bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
1230 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1233 dma_data.idi_paddr = 0;
1235 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_WRITE);
1237 req.host_src_addr = htole64(dma_data.idi_paddr);
1238 req.dir_type = htole16(type);
1239 req.dir_ordinal = htole16(ordinal);
1240 req.dir_ext = htole16(ext);
1241 req.dir_attr = htole16(attr);
1242 req.dir_data_length = htole32(data_length);
1243 req.option = htole16(option);
1246 htole16(HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG);
1249 req.dir_item_length = htole32(*item_length);
1251 BNXT_HWRM_LOCK(softc);
1252 old_timeo = softc->hwrm_cmd_timeo;
1253 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1254 rc = _hwrm_send_message(softc, &req, sizeof(req));
1255 softc->hwrm_cmd_timeo = old_timeo;
1259 *item_length = le32toh(resp->dir_item_length);
1261 *index = le16toh(resp->dir_idx);
1264 BNXT_HWRM_UNLOCK(softc);
1267 iflib_dma_free(&dma_data);
1272 bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc *softc, uint16_t index)
1274 struct hwrm_nvm_erase_dir_entry_input req = {0};
1278 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_ERASE_DIR_ENTRY);
1279 req.dir_idx = htole16(index);
1280 BNXT_HWRM_LOCK(softc);
1281 old_timeo = softc->hwrm_cmd_timeo;
1282 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1283 rc = _hwrm_send_message(softc, &req, sizeof(req));
1284 softc->hwrm_cmd_timeo = old_timeo;
1285 BNXT_HWRM_UNLOCK(softc);
1290 bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc *softc, uint32_t *entries,
1291 uint32_t *entry_length)
1293 struct hwrm_nvm_get_dir_info_input req = {0};
1294 struct hwrm_nvm_get_dir_info_output *resp =
1295 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1299 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_INFO);
1301 BNXT_HWRM_LOCK(softc);
1302 old_timeo = softc->hwrm_cmd_timeo;
1303 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1304 rc = _hwrm_send_message(softc, &req, sizeof(req));
1305 softc->hwrm_cmd_timeo = old_timeo;
1310 *entries = le32toh(resp->entries);
1312 *entry_length = le32toh(resp->entry_length);
1315 BNXT_HWRM_UNLOCK(softc);
1320 bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc *softc, uint32_t *entries,
1321 uint32_t *entry_length, struct iflib_dma_info *dma_data)
1323 struct hwrm_nvm_get_dir_entries_input req = {0};
1332 entry_length = &ent_len;
1334 rc = bnxt_hwrm_nvm_get_dir_info(softc, entries, entry_length);
1337 if (*entries * *entry_length > dma_data->idi_size) {
1343 * TODO: There's a race condition here that could blow up DMA memory...
1344 * we need to allocate the max size, not the currently in use
1345 * size. The command should totally have a max size here.
1347 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_ENTRIES);
1348 req.host_dest_addr = htole64(dma_data->idi_paddr);
1349 BNXT_HWRM_LOCK(softc);
1350 old_timeo = softc->hwrm_cmd_timeo;
1351 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1352 rc = _hwrm_send_message(softc, &req, sizeof(req));
1353 softc->hwrm_cmd_timeo = old_timeo;
1354 BNXT_HWRM_UNLOCK(softc);
1357 bus_dmamap_sync(dma_data->idi_tag, dma_data->idi_map,
1358 BUS_DMASYNC_POSTWRITE);
1365 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
1366 uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
1367 uint32_t *reserved_size, uint32_t *available_size)
1369 struct hwrm_nvm_get_dev_info_input req = {0};
1370 struct hwrm_nvm_get_dev_info_output *resp =
1371 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1375 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
1377 BNXT_HWRM_LOCK(softc);
1378 old_timeo = softc->hwrm_cmd_timeo;
1379 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1380 rc = _hwrm_send_message(softc, &req, sizeof(req));
1381 softc->hwrm_cmd_timeo = old_timeo;
1386 *mfg_id = le16toh(resp->manufacturer_id);
1388 *device_id = le16toh(resp->device_id);
1390 *sector_size = le32toh(resp->sector_size);
1392 *nvram_size = le32toh(resp->nvram_size);
1394 *reserved_size = le32toh(resp->reserved_size);
1396 *available_size = le32toh(resp->available_size);
1399 BNXT_HWRM_UNLOCK(softc);
1404 bnxt_hwrm_nvm_install_update(struct bnxt_softc *softc,
1405 uint32_t install_type, uint64_t *installed_items, uint8_t *result,
1406 uint8_t *problem_item, uint8_t *reset_required)
1408 struct hwrm_nvm_install_update_input req = {0};
1409 struct hwrm_nvm_install_update_output *resp =
1410 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1414 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_INSTALL_UPDATE);
1415 req.install_type = htole32(install_type);
1417 BNXT_HWRM_LOCK(softc);
1418 old_timeo = softc->hwrm_cmd_timeo;
1419 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1420 rc = _hwrm_send_message(softc, &req, sizeof(req));
1421 softc->hwrm_cmd_timeo = old_timeo;
1425 if (installed_items)
1426 *installed_items = le32toh(resp->installed_items);
1428 *result = resp->result;
1430 *problem_item = resp->problem_item;
1432 *reset_required = resp->reset_required;
1435 BNXT_HWRM_UNLOCK(softc);
1440 bnxt_hwrm_nvm_verify_update(struct bnxt_softc *softc, uint16_t type,
1441 uint16_t ordinal, uint16_t ext)
1443 struct hwrm_nvm_verify_update_input req = {0};
1447 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_VERIFY_UPDATE);
1449 req.dir_type = htole16(type);
1450 req.dir_ordinal = htole16(ordinal);
1451 req.dir_ext = htole16(ext);
1453 BNXT_HWRM_LOCK(softc);
1454 old_timeo = softc->hwrm_cmd_timeo;
1455 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1456 rc = _hwrm_send_message(softc, &req, sizeof(req));
1457 softc->hwrm_cmd_timeo = old_timeo;
1458 BNXT_HWRM_UNLOCK(softc);
1463 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
1464 uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
1465 uint16_t *millisecond, uint16_t *zone)
1467 struct hwrm_fw_get_time_input req = {0};
1468 struct hwrm_fw_get_time_output *resp =
1469 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1472 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
1474 BNXT_HWRM_LOCK(softc);
1475 rc = _hwrm_send_message(softc, &req, sizeof(req));
1480 *year = le16toh(resp->year);
1482 *month = resp->month;
1488 *minute = resp->minute;
1490 *second = resp->second;
1492 *millisecond = le16toh(resp->millisecond);
1494 *zone = le16toh(resp->zone);
1497 BNXT_HWRM_UNLOCK(softc);
1502 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
1503 uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
1504 uint16_t millisecond, uint16_t zone)
1506 struct hwrm_fw_set_time_input req = {0};
1508 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
1510 req.year = htole16(year);
1514 req.minute = minute;
1515 req.second = second;
1516 req.millisecond = htole16(millisecond);
1517 req.zone = htole16(zone);
1518 return hwrm_send_message(softc, &req, sizeof(req));
1522 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc)
1524 struct bnxt_link_info *link_info = &softc->link_info;
1525 struct hwrm_port_phy_qcfg_input req = {0};
1526 struct hwrm_port_phy_qcfg_output *resp =
1527 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1530 BNXT_HWRM_LOCK(softc);
1531 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
1533 rc = _hwrm_send_message(softc, &req, sizeof(req));
1537 link_info->phy_link_status = resp->link;
1538 link_info->duplex = resp->duplex_cfg;
1539 link_info->pause = resp->pause;
1540 link_info->auto_mode = resp->auto_mode;
1541 link_info->auto_pause = resp->auto_pause;
1542 link_info->force_pause = resp->force_pause;
1543 link_info->duplex_setting = resp->duplex_cfg;
1544 if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
1545 link_info->link_speed = le16toh(resp->link_speed);
1547 link_info->link_speed = 0;
1548 link_info->force_link_speed = le16toh(resp->force_link_speed);
1549 link_info->auto_link_speed = le16toh(resp->auto_link_speed);
1550 link_info->support_speeds = le16toh(resp->support_speeds);
1551 link_info->auto_link_speeds = le16toh(resp->auto_link_speed_mask);
1552 link_info->preemphasis = le32toh(resp->preemphasis);
1553 link_info->phy_ver[0] = resp->phy_maj;
1554 link_info->phy_ver[1] = resp->phy_min;
1555 link_info->phy_ver[2] = resp->phy_bld;
1556 snprintf(softc->ver_info->phy_ver, sizeof(softc->ver_info->phy_ver),
1557 "%d.%d.%d", link_info->phy_ver[0], link_info->phy_ver[1],
1558 link_info->phy_ver[2]);
1559 strlcpy(softc->ver_info->phy_vendor, resp->phy_vendor_name,
1561 strlcpy(softc->ver_info->phy_partnumber, resp->phy_vendor_partnumber,
1563 link_info->media_type = resp->media_type;
1564 link_info->phy_type = resp->phy_type;
1565 link_info->transceiver = resp->xcvr_pkg_type;
1566 link_info->phy_addr = resp->eee_config_phy_addr &
1567 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK;
1570 BNXT_HWRM_UNLOCK(softc);
1575 bnxt_hwrm_get_wol_fltrs(struct bnxt_softc *softc, uint16_t handle)
1577 struct hwrm_wol_filter_qcfg_input req = {0};
1578 struct hwrm_wol_filter_qcfg_output *resp =
1579 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1580 uint16_t next_handle = 0;
1583 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_QCFG);
1584 req.port_id = htole16(softc->pf.port_id);
1585 req.handle = htole16(handle);
1586 rc = hwrm_send_message(softc, &req, sizeof(req));
1588 next_handle = le16toh(resp->next_handle);
1589 if (next_handle != 0) {
1590 if (resp->wol_type ==
1591 HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT) {
1593 softc->wol_filter_id = resp->wol_filter_id;
1601 bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc *softc)
1603 struct hwrm_wol_filter_alloc_input req = {0};
1604 struct hwrm_wol_filter_alloc_output *resp =
1605 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1608 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_ALLOC);
1609 req.port_id = htole16(softc->pf.port_id);
1610 req.wol_type = HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT;
1612 htole32(HWRM_WOL_FILTER_ALLOC_INPUT_ENABLES_MAC_ADDRESS);
1613 memcpy(req.mac_address, softc->func.mac_addr, ETHER_ADDR_LEN);
1614 rc = hwrm_send_message(softc, &req, sizeof(req));
1616 softc->wol_filter_id = resp->wol_filter_id;
1622 bnxt_hwrm_free_wol_fltr(struct bnxt_softc *softc)
1624 struct hwrm_wol_filter_free_input req = {0};
1626 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_FREE);
1627 req.port_id = htole16(softc->pf.port_id);
1629 htole32(HWRM_WOL_FILTER_FREE_INPUT_ENABLES_WOL_FILTER_ID);
1630 req.wol_filter_id = softc->wol_filter_id;
1631 return hwrm_send_message(softc, &req, sizeof(req));
1634 static void bnxt_hwrm_set_coal_params(struct bnxt_softc *softc, uint32_t max_frames,
1635 uint32_t buf_tmrs, uint16_t flags,
1636 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
1638 req->flags = htole16(flags);
1639 req->num_cmpl_dma_aggr = htole16((uint16_t)max_frames);
1640 req->num_cmpl_dma_aggr_during_int = htole16(max_frames >> 16);
1641 req->cmpl_aggr_dma_tmr = htole16((uint16_t)buf_tmrs);
1642 req->cmpl_aggr_dma_tmr_during_int = htole16(buf_tmrs >> 16);
1643 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
1644 req->int_lat_tmr_min = htole16((uint16_t)buf_tmrs * 2);
1645 req->int_lat_tmr_max = htole16((uint16_t)buf_tmrs * 4);
1646 req->num_cmpl_aggr_int = htole16((uint16_t)max_frames * 4);
1650 int bnxt_hwrm_set_coal(struct bnxt_softc *softc)
1653 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
1655 uint16_t max_buf, max_buf_irq;
1656 uint16_t buf_tmr, buf_tmr_irq;
1659 bnxt_hwrm_cmd_hdr_init(softc, &req_rx,
1660 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
1661 bnxt_hwrm_cmd_hdr_init(softc, &req_tx,
1662 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
1664 /* Each rx completion (2 records) should be DMAed immediately.
1665 * DMA 1/4 of the completion buffers at a time.
1667 max_buf = min_t(uint16_t, softc->rx_coal_frames / 4, 2);
1668 /* max_buf must not be zero */
1669 max_buf = clamp_t(uint16_t, max_buf, 1, 63);
1670 max_buf_irq = clamp_t(uint16_t, softc->rx_coal_frames_irq, 1, 63);
1671 buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs);
1672 /* buf timer set to 1/4 of interrupt timer */
1673 buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
1674 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs_irq);
1675 buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
1677 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
1679 /* RING_IDLE generates more IRQs for lower latency. Enable it only
1680 * if coal_usecs is less than 25 us.
1682 if (softc->rx_coal_usecs < 25)
1683 flags |= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
1685 bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
1686 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
1688 /* max_buf must not be zero */
1689 max_buf = clamp_t(uint16_t, softc->tx_coal_frames, 1, 63);
1690 max_buf_irq = clamp_t(uint16_t, softc->tx_coal_frames_irq, 1, 63);
1691 buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs);
1692 /* buf timer set to 1/4 of interrupt timer */
1693 buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
1694 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs_irq);
1695 buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
1696 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
1697 bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
1698 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
1700 for (i = 0; i < softc->nrxqsets; i++) {
1706 * Check if Tx also needs to be done
1707 * So far, Tx processing has been done in softirq contest
1711 req->ring_id = htole16(softc->grp_info[i].cp_ring_id);
1713 rc = hwrm_send_message(softc, req, sizeof(*req));
1722 int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc, unsigned long *bmap,
1725 struct hwrm_func_drv_rgtr_input req = {0};
1726 bitstr_t *async_events_bmap;
1730 async_events_bmap = bit_alloc(256, M_DEVBUF, M_WAITOK|M_ZERO);
1731 events = (uint32_t *)async_events_bmap;
1733 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
1736 htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1738 memset(async_events_bmap, 0, sizeof(256 / 8));
1740 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
1741 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
1742 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED);
1743 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE);
1744 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
1746 if (bmap && bmap_size) {
1747 for (i = 0; i < bmap_size; i++) {
1748 if (bit_test(bmap, i))
1749 bit_set(async_events_bmap, i);
1753 for (i = 0; i < 8; i++)
1754 req.async_event_fwd[i] |= htole32(events[i]);
1756 free(async_events_bmap, M_DEVBUF);
1758 return hwrm_send_message(softc, &req, sizeof(req));