2 * Broadcom NetXtreme-C/E network driver.
4 * Copyright (c) 2016 Broadcom, All Rights Reserved.
5 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/endian.h>
33 #include <sys/bitstring.h>
36 #include "bnxt_hwrm.h"
37 #include "hsi_struct_def.h"
39 static int bnxt_hwrm_err_map(uint16_t err);
40 static inline int _is_valid_ether_addr(uint8_t *);
41 static inline void get_random_ether_addr(uint8_t *);
42 static void bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
43 struct hwrm_port_phy_cfg_input *req);
44 static void bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
45 struct hwrm_port_phy_cfg_input *req);
46 static void bnxt_hwrm_set_eee(struct bnxt_softc *softc,
47 struct hwrm_port_phy_cfg_input *req);
48 static int _hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
49 static int hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
50 static void bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
52 /* NVRam stuff has a five minute timeout */
53 #define BNXT_NVM_TIMEO (5 * 60 * 1000)
56 bnxt_hwrm_err_map(uint16_t err)
61 case HWRM_ERR_CODE_SUCCESS:
63 case HWRM_ERR_CODE_INVALID_PARAMS:
64 case HWRM_ERR_CODE_INVALID_FLAGS:
65 case HWRM_ERR_CODE_INVALID_ENABLES:
67 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
69 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
71 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
73 case HWRM_ERR_CODE_FAIL:
75 case HWRM_ERR_CODE_HWRM_ERROR:
76 case HWRM_ERR_CODE_UNKNOWN_ERR:
85 bnxt_alloc_hwrm_dma_mem(struct bnxt_softc *softc)
89 rc = iflib_dma_alloc(softc->ctx, PAGE_SIZE, &softc->hwrm_cmd_resp,
95 bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc)
97 if (softc->hwrm_cmd_resp.idi_vaddr)
98 iflib_dma_free(&softc->hwrm_cmd_resp);
99 softc->hwrm_cmd_resp.idi_vaddr = NULL;
104 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
107 struct input *req = request;
109 req->req_type = htole16(req_type);
110 req->cmpl_ring = 0xffff;
111 req->target_id = 0xffff;
112 req->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
116 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
118 struct input *req = msg;
119 struct hwrm_err_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
120 uint32_t *data = msg;
126 /* TODO: DMASYNC in here. */
127 req->seq_id = htole16(softc->hwrm_cmd_seq++);
128 memset(resp, 0, PAGE_SIZE);
129 cp_ring_id = le16toh(req->cmpl_ring);
131 /* Write request msg to hwrm channel */
132 for (i = 0; i < msg_len; i += 4) {
133 bus_space_write_4(softc->hwrm_bar.tag,
134 softc->hwrm_bar.handle,
139 /* Clear to the end of the request buffer */
140 for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4)
141 bus_space_write_4(softc->hwrm_bar.tag, softc->hwrm_bar.handle,
144 /* Ring channel doorbell */
145 bus_space_write_4(softc->hwrm_bar.tag,
146 softc->hwrm_bar.handle,
149 /* Check if response len is updated */
150 for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
151 if (resp->resp_len && resp->resp_len <= 4096)
155 if (i >= softc->hwrm_cmd_timeo) {
156 device_printf(softc->dev,
157 "Timeout sending %s: (timeout: %u) seq: %d\n",
158 GET_HWRM_REQ_TYPE(req->req_type), softc->hwrm_cmd_timeo,
159 le16toh(req->seq_id));
162 /* Last byte of resp contains the valid key */
163 valid = (uint8_t *)resp + resp->resp_len - 1;
164 for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
165 if (*valid == HWRM_RESP_VALID_KEY)
169 if (i >= softc->hwrm_cmd_timeo) {
170 device_printf(softc->dev, "Timeout sending %s: "
171 "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
172 GET_HWRM_REQ_TYPE(req->req_type),
173 softc->hwrm_cmd_timeo, le16toh(req->req_type),
174 le16toh(req->seq_id), msg_len,
179 err = le16toh(resp->error_code);
181 /* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
182 if (err != HWRM_ERR_CODE_FAIL) {
183 device_printf(softc->dev,
184 "%s command returned %s error.\n",
185 GET_HWRM_REQ_TYPE(req->req_type),
186 GET_HWRM_ERROR_CODE(err));
188 return bnxt_hwrm_err_map(err);
195 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
199 BNXT_HWRM_LOCK(softc);
200 rc = _hwrm_send_message(softc, msg, msg_len);
201 BNXT_HWRM_UNLOCK(softc);
206 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
208 struct hwrm_queue_qportcfg_input req = {0};
209 struct hwrm_queue_qportcfg_output *resp =
210 (void *)softc->hwrm_cmd_resp.idi_vaddr;
215 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
217 BNXT_HWRM_LOCK(softc);
218 rc = _hwrm_send_message(softc, &req, sizeof(req));
222 if (!resp->max_configurable_queues) {
226 softc->max_tc = resp->max_configurable_queues;
227 if (softc->max_tc > BNXT_MAX_QUEUE)
228 softc->max_tc = BNXT_MAX_QUEUE;
230 qptr = &resp->queue_id0;
231 for (int i = 0; i < softc->max_tc; i++) {
232 softc->q_info[i].id = *qptr++;
233 softc->q_info[i].profile = *qptr++;
237 BNXT_HWRM_UNLOCK(softc);
243 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
245 struct hwrm_ver_get_input req = {0};
246 struct hwrm_ver_get_output *resp =
247 (void *)softc->hwrm_cmd_resp.idi_vaddr;
249 const char nastr[] = "<not installed>";
250 const char naver[] = "<N/A>";
252 softc->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
253 softc->hwrm_cmd_timeo = 1000;
254 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
256 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
257 req.hwrm_intf_min = HWRM_VERSION_MINOR;
258 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
260 BNXT_HWRM_LOCK(softc);
261 rc = _hwrm_send_message(softc, &req, sizeof(req));
265 snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
266 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
267 softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj;
268 softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min;
269 softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd;
270 snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
271 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
272 strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
274 strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
277 if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 &&
278 resp->mgmt_fw_bld == 0) {
279 strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
280 strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
283 snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
284 "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min,
286 strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
289 if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 &&
290 resp->netctrl_fw_bld == 0) {
291 strlcpy(softc->ver_info->netctrl_fw_ver, naver,
293 strlcpy(softc->ver_info->netctrl_fw_name, nastr,
297 snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
298 "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min,
299 resp->netctrl_fw_bld);
300 strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
303 if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 &&
304 resp->roce_fw_bld == 0) {
305 strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
306 strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
309 snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
310 "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min,
312 strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
315 softc->ver_info->chip_num = le16toh(resp->chip_num);
316 softc->ver_info->chip_rev = resp->chip_rev;
317 softc->ver_info->chip_metal = resp->chip_metal;
318 softc->ver_info->chip_bond_id = resp->chip_bond_id;
319 softc->ver_info->chip_type = resp->chip_platform_type;
321 if (resp->max_req_win_len)
322 softc->hwrm_max_req_len = le16toh(resp->max_req_win_len);
323 if (resp->def_req_timeout)
324 softc->hwrm_cmd_timeo = le16toh(resp->def_req_timeout);
327 BNXT_HWRM_UNLOCK(softc);
332 bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
334 struct hwrm_func_drv_rgtr_input req = {0};
336 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
338 req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
339 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
340 req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
342 req.ver_maj = __FreeBSD_version / 100000;
343 req.ver_min = (__FreeBSD_version / 1000) % 100;
344 req.ver_upd = (__FreeBSD_version / 100) % 10;
346 return hwrm_send_message(softc, &req, sizeof(req));
351 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
353 struct hwrm_func_drv_unrgtr_input req = {0};
355 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
356 if (shutdown == true)
358 HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
359 return hwrm_send_message(softc, &req, sizeof(req));
364 _is_valid_ether_addr(uint8_t *addr)
366 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
368 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
375 get_random_ether_addr(uint8_t *addr)
377 uint8_t temp[ETHER_ADDR_LEN];
379 arc4rand(&temp, sizeof(temp), 0);
382 bcopy(temp, addr, sizeof(temp));
386 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
389 struct hwrm_func_qcaps_input req = {0};
390 struct hwrm_func_qcaps_output *resp =
391 (void *)softc->hwrm_cmd_resp.idi_vaddr;
392 struct bnxt_func_info *func = &softc->func;
394 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
395 req.fid = htole16(0xffff);
397 BNXT_HWRM_LOCK(softc);
398 rc = _hwrm_send_message(softc, &req, sizeof(req));
403 htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
404 softc->flags |= BNXT_FLAG_WOL_CAP;
406 func->fw_fid = le16toh(resp->fid);
407 memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
408 func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
409 func->max_cp_rings = le16toh(resp->max_cmpl_rings);
410 func->max_tx_rings = le16toh(resp->max_tx_rings);
411 func->max_rx_rings = le16toh(resp->max_rx_rings);
412 func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
413 if (!func->max_hw_ring_grps)
414 func->max_hw_ring_grps = func->max_tx_rings;
415 func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
416 func->max_vnics = le16toh(resp->max_vnics);
417 func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
418 if (BNXT_PF(softc)) {
419 struct bnxt_pf_info *pf = &softc->pf;
421 pf->port_id = le16toh(resp->port_id);
422 pf->first_vf_id = le16toh(resp->first_vf_id);
423 pf->max_vfs = le16toh(resp->max_vfs);
424 pf->max_encap_records = le32toh(resp->max_encap_records);
425 pf->max_decap_records = le32toh(resp->max_decap_records);
426 pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
427 pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
428 pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
429 pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
431 if (!_is_valid_ether_addr(func->mac_addr)) {
432 device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
433 get_random_ether_addr(func->mac_addr);
437 BNXT_HWRM_UNLOCK(softc);
442 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
444 struct hwrm_func_qcfg_input req = {0};
445 struct hwrm_func_qcfg_output *resp =
446 (void *)softc->hwrm_cmd_resp.idi_vaddr;
447 struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg;
450 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
451 req.fid = htole16(0xffff);
452 BNXT_HWRM_LOCK(softc);
453 rc = _hwrm_send_message(softc, &req, sizeof(req));
457 fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
458 fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
459 fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
460 fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
462 BNXT_HWRM_UNLOCK(softc);
467 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
469 struct hwrm_func_reset_input req = {0};
471 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
474 return hwrm_send_message(softc, &req, sizeof(req));
478 bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
479 struct hwrm_port_phy_cfg_input *req)
481 uint8_t autoneg = softc->link_info.autoneg;
482 uint16_t fw_link_speed = softc->link_info.req_link_speed;
484 if (autoneg & BNXT_AUTONEG_SPEED) {
486 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
489 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
491 htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
493 req->force_link_speed = htole16(fw_link_speed);
494 req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
497 /* tell chimp that the setting takes effect immediately */
498 req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
503 bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
504 struct hwrm_port_phy_cfg_input *req)
506 struct bnxt_link_info *link_info = &softc->link_info;
508 if (link_info->flow_ctrl.autoneg) {
510 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE;
511 if (link_info->flow_ctrl.rx)
513 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
514 if (link_info->flow_ctrl.tx)
516 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
518 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
520 if (link_info->flow_ctrl.rx)
522 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
523 if (link_info->flow_ctrl.tx)
525 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
527 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE);
532 /* JFV this needs interface connection */
534 bnxt_hwrm_set_eee(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req)
536 /* struct ethtool_eee *eee = &softc->eee; */
537 bool eee_enabled = false;
542 uint32_t flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE;
544 if (eee->tx_lpi_enabled)
545 flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI;
547 req->flags |= htole32(flags);
548 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
549 req->eee_link_speed_mask = htole16(eee_speeds);
550 req->tx_lpi_timer = htole32(eee->tx_lpi_timer);
554 htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE);
560 bnxt_hwrm_set_link_setting(struct bnxt_softc *softc, bool set_pause,
561 bool set_eee, bool set_link)
563 struct hwrm_port_phy_cfg_input req = {0};
566 if (softc->flags & BNXT_FLAG_NPAR)
569 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_CFG);
572 bnxt_hwrm_set_pause_common(softc, &req);
574 if (softc->link_info.flow_ctrl.autoneg)
579 bnxt_hwrm_set_link_common(softc, &req);
582 bnxt_hwrm_set_eee(softc, &req);
584 BNXT_HWRM_LOCK(softc);
585 rc = _hwrm_send_message(softc, &req, sizeof(req));
589 /* since changing of 'force pause' setting doesn't
590 * trigger any link change event, the driver needs to
591 * update the current pause result upon successfully i
592 * return of the phy_cfg command */
593 if (!softc->link_info.flow_ctrl.autoneg)
594 bnxt_report_link(softc);
597 BNXT_HWRM_UNLOCK(softc);
602 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
604 struct hwrm_vnic_cfg_input req = {0};
605 struct hwrm_vnic_cfg_output *resp;
607 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
608 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
610 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
611 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
612 if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
613 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
614 if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
615 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
616 req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
617 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
618 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
619 req.vnic_id = htole16(vnic->id);
620 req.dflt_ring_grp = htole16(vnic->def_ring_grp);
621 req.rss_rule = htole16(vnic->rss_id);
622 req.cos_rule = htole16(vnic->cos_rule);
623 req.lb_rule = htole16(vnic->lb_rule);
624 req.mru = htole16(vnic->mru);
626 return hwrm_send_message(softc, &req, sizeof(req));
630 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
632 struct hwrm_vnic_alloc_input req = {0};
633 struct hwrm_vnic_alloc_output *resp =
634 (void *)softc->hwrm_cmd_resp.idi_vaddr;
637 if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
638 device_printf(softc->dev,
639 "Attempt to re-allocate vnic %04x\n", vnic->id);
643 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
645 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
646 req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
648 BNXT_HWRM_LOCK(softc);
649 rc = _hwrm_send_message(softc, &req, sizeof(req));
653 vnic->id = le32toh(resp->vnic_id);
656 BNXT_HWRM_UNLOCK(softc);
661 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
663 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
664 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
665 (void *)softc->hwrm_cmd_resp.idi_vaddr;
668 if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
669 device_printf(softc->dev,
670 "Attempt to re-allocate vnic ctx %04x\n", *ctx_id);
674 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
676 BNXT_HWRM_LOCK(softc);
677 rc = _hwrm_send_message(softc, &req, sizeof(req));
681 *ctx_id = le32toh(resp->rss_cos_lb_ctx_id);
684 BNXT_HWRM_UNLOCK(softc);
689 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
691 struct hwrm_ring_grp_alloc_input req = {0};
692 struct hwrm_ring_grp_alloc_output *resp;
695 if (grp->grp_id != (uint16_t)HWRM_NA_SIGNATURE) {
696 device_printf(softc->dev,
697 "Attempt to re-allocate ring group %04x\n", grp->grp_id);
701 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
702 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
703 req.cr = htole16(grp->cp_ring_id);
704 req.rr = htole16(grp->rx_ring_id);
705 req.ar = htole16(grp->ag_ring_id);
706 req.sc = htole16(grp->stats_ctx);
708 BNXT_HWRM_LOCK(softc);
709 rc = _hwrm_send_message(softc, &req, sizeof(req));
713 grp->grp_id = le32toh(resp->ring_group_id);
716 BNXT_HWRM_UNLOCK(softc);
721 * Ring allocation message to the firmware
724 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
725 struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id,
728 struct hwrm_ring_alloc_input req = {0};
729 struct hwrm_ring_alloc_output *resp;
732 if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
733 device_printf(softc->dev,
734 "Attempt to re-allocate ring %04x\n", ring->phys_id);
738 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
739 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
740 req.enables = htole32(0);
741 req.fbo = htole32(0);
743 if (stat_ctx_id != HWRM_NA_SIGNATURE) {
744 req.enables |= htole32(
745 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
746 req.stat_ctx_id = htole32(stat_ctx_id);
748 req.ring_type = type;
749 req.page_tbl_addr = htole64(ring->paddr);
750 req.length = htole32(ring->ring_size);
751 req.logical_id = htole16(ring->id);
752 req.cmpl_ring_id = htole16(cmpl_ring_id);
753 req.queue_id = htole16(softc->q_info[0].id);
755 /* MODE_POLL appears to crash the firmware */
757 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
759 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_POLL;
761 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
763 BNXT_HWRM_LOCK(softc);
764 rc = _hwrm_send_message(softc, &req, sizeof(req));
768 ring->phys_id = le16toh(resp->ring_id);
771 BNXT_HWRM_UNLOCK(softc);
776 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
779 struct hwrm_stat_ctx_alloc_input req = {0};
780 struct hwrm_stat_ctx_alloc_output *resp;
783 if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
784 device_printf(softc->dev,
785 "Attempt to re-allocate stats ctx %08x\n",
790 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
791 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
793 req.update_period_ms = htole32(1000);
794 req.stats_dma_addr = htole64(paddr);
796 BNXT_HWRM_LOCK(softc);
797 rc = _hwrm_send_message(softc, &req, sizeof(req));
801 cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
804 BNXT_HWRM_UNLOCK(softc);
810 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
812 struct hwrm_port_qstats_input req = {0};
815 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
817 req.port_id = htole16(softc->pf.port_id);
818 req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
819 req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
821 BNXT_HWRM_LOCK(softc);
822 rc = _hwrm_send_message(softc, &req, sizeof(req));
823 BNXT_HWRM_UNLOCK(softc);
829 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
830 struct bnxt_vnic_info *vnic)
832 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
833 struct bnxt_vlan_tag *tag;
835 uint32_t num_vlan_tags = 0;;
837 uint32_t mask = vnic->rx_mask;
840 SLIST_FOREACH(tag, &vnic->vlan_tags, next)
845 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN)) {
846 if (!vnic->vlan_only)
847 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
850 HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
852 if (vnic->vlan_tag_list.idi_vaddr) {
853 iflib_dma_free(&vnic->vlan_tag_list);
854 vnic->vlan_tag_list.idi_vaddr = NULL;
856 rc = iflib_dma_alloc(softc->ctx, 4 * num_vlan_tags,
857 &vnic->vlan_tag_list, BUS_DMA_NOWAIT);
860 tags = (uint32_t *)vnic->vlan_tag_list.idi_vaddr;
863 SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
864 tags[i] = htole32((tag->tpid << 16) | tag->tag);
868 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
870 req.vnic_id = htole32(vnic->id);
871 req.mask = htole32(mask);
872 req.mc_tbl_addr = htole64(vnic->mc_list.idi_paddr);
873 req.num_mc_entries = htole32(vnic->mc_list_count);
874 req.vlan_tag_tbl_addr = htole64(vnic->vlan_tag_list.idi_paddr);
875 req.num_vlan_tags = htole32(num_vlan_tags);
876 return hwrm_send_message(softc, &req, sizeof(req));
881 bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
883 struct hwrm_cfa_l2_filter_alloc_input req = {0};
884 struct hwrm_cfa_l2_filter_alloc_output *resp;
885 uint32_t enables = 0;
888 if (vnic->filter_id != -1) {
889 device_printf(softc->dev,
890 "Attempt to re-allocate l2 ctx filter\n");
894 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
895 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
897 req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX);
898 enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
899 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
900 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
901 req.enables = htole32(enables);
902 req.dst_id = htole16(vnic->id);
903 memcpy(req.l2_addr, if_getlladdr(iflib_get_ifp(softc->ctx)),
905 memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
907 BNXT_HWRM_LOCK(softc);
908 rc = _hwrm_send_message(softc, &req, sizeof(req));
912 vnic->filter_id = le64toh(resp->l2_filter_id);
913 vnic->flow_id = le64toh(resp->flow_id);
916 BNXT_HWRM_UNLOCK(softc);
921 bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
924 struct hwrm_vnic_rss_cfg_input req = {0};
925 struct hwrm_vnic_rss_cfg_output *resp;
927 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
928 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
930 req.hash_type = htole32(hash_type);
931 req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
932 req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
933 req.rss_ctx_idx = htole16(vnic->rss_id);
935 return hwrm_send_message(softc, &req, sizeof(req));
939 bnxt_cfg_async_cr(struct bnxt_softc *softc)
943 if (BNXT_PF(softc)) {
944 struct hwrm_func_cfg_input req = {0};
946 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
949 req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
950 req.async_event_cr = softc->def_cp_ring.ring.phys_id;
952 rc = hwrm_send_message(softc, &req, sizeof(req));
955 struct hwrm_func_vf_cfg_input req = {0};
957 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG);
959 req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
960 req.async_event_cr = softc->def_cp_ring.ring.phys_id;
962 rc = hwrm_send_message(softc, &req, sizeof(req));
968 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
970 softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
972 softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
974 softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
975 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
977 softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
978 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
980 softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
984 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
986 struct hwrm_vnic_tpa_cfg_input req = {0};
989 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
991 if (softc->hw_lro.enable) {
992 flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
993 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
994 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
995 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
997 if (softc->hw_lro.is_mode_gro)
998 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
1000 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
1002 req.flags = htole32(flags);
1004 req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1005 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1006 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1008 req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
1009 req.max_aggs = htole16(softc->hw_lro.max_aggs);
1010 req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
1013 req.vnic_id = htole16(softc->vnic_info.id);
1015 return hwrm_send_message(softc, &req, sizeof(req));
1019 bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc *softc, uint16_t type,
1020 uint16_t *ordinal, uint16_t ext, uint16_t *index, bool use_index,
1021 uint8_t search_opt, uint32_t *data_length, uint32_t *item_length,
1024 struct hwrm_nvm_find_dir_entry_input req = {0};
1025 struct hwrm_nvm_find_dir_entry_output *resp =
1026 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1032 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_FIND_DIR_ENTRY);
1034 req.enables = htole32(
1035 HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID);
1036 req.dir_idx = htole16(*index);
1038 req.dir_type = htole16(type);
1039 req.dir_ordinal = htole16(*ordinal);
1040 req.dir_ext = htole16(ext);
1041 req.opt_ordinal = search_opt;
1043 BNXT_HWRM_LOCK(softc);
1044 old_timeo = softc->hwrm_cmd_timeo;
1045 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1046 rc = _hwrm_send_message(softc, &req, sizeof(req));
1047 softc->hwrm_cmd_timeo = old_timeo;
1052 *item_length = le32toh(resp->dir_item_length);
1054 *data_length = le32toh(resp->dir_data_length);
1056 *fw_ver = le32toh(resp->fw_ver);
1057 *ordinal = le16toh(resp->dir_ordinal);
1059 *index = le16toh(resp->dir_idx);
1062 BNXT_HWRM_UNLOCK(softc);
1067 bnxt_hwrm_nvm_read(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
1068 uint32_t length, struct iflib_dma_info *data)
1070 struct hwrm_nvm_read_input req = {0};
1074 if (length > data->idi_size) {
1078 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_READ);
1079 req.host_dest_addr = htole64(data->idi_paddr);
1080 req.dir_idx = htole16(index);
1081 req.offset = htole32(offset);
1082 req.len = htole32(length);
1083 BNXT_HWRM_LOCK(softc);
1084 old_timeo = softc->hwrm_cmd_timeo;
1085 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1086 rc = _hwrm_send_message(softc, &req, sizeof(req));
1087 softc->hwrm_cmd_timeo = old_timeo;
1088 BNXT_HWRM_UNLOCK(softc);
1091 bus_dmamap_sync(data->idi_tag, data->idi_map, BUS_DMASYNC_POSTREAD);
1100 bnxt_hwrm_nvm_modify(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
1101 void *data, bool cpyin, uint32_t length)
1103 struct hwrm_nvm_modify_input req = {0};
1104 struct iflib_dma_info dma_data;
1108 if (length == 0 || !data)
1110 rc = iflib_dma_alloc(softc->ctx, length, &dma_data,
1115 rc = copyin(data, dma_data.idi_vaddr, length);
1120 memcpy(dma_data.idi_vaddr, data, length);
1121 bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
1122 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1124 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_MODIFY);
1125 req.host_src_addr = htole64(dma_data.idi_paddr);
1126 req.dir_idx = htole16(index);
1127 req.offset = htole32(offset);
1128 req.len = htole32(length);
1129 BNXT_HWRM_LOCK(softc);
1130 old_timeo = softc->hwrm_cmd_timeo;
1131 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1132 rc = _hwrm_send_message(softc, &req, sizeof(req));
1133 softc->hwrm_cmd_timeo = old_timeo;
1134 BNXT_HWRM_UNLOCK(softc);
1137 iflib_dma_free(&dma_data);
1142 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
1145 struct hwrm_fw_reset_input req = {0};
1146 struct hwrm_fw_reset_output *resp =
1147 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1152 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
1153 req.embedded_proc_type = processor;
1154 req.selfrst_status = *selfreset;
1156 BNXT_HWRM_LOCK(softc);
1157 rc = _hwrm_send_message(softc, &req, sizeof(req));
1160 *selfreset = resp->selfrst_status;
1163 BNXT_HWRM_UNLOCK(softc);
1168 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
1170 struct hwrm_fw_qstatus_input req = {0};
1171 struct hwrm_fw_qstatus_output *resp =
1172 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1177 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
1178 req.embedded_proc_type = type;
1180 BNXT_HWRM_LOCK(softc);
1181 rc = _hwrm_send_message(softc, &req, sizeof(req));
1184 *selfreset = resp->selfrst_status;
1187 BNXT_HWRM_UNLOCK(softc);
1192 bnxt_hwrm_nvm_write(struct bnxt_softc *softc, void *data, bool cpyin,
1193 uint16_t type, uint16_t ordinal, uint16_t ext, uint16_t attr,
1194 uint16_t option, uint32_t data_length, bool keep, uint32_t *item_length,
1197 struct hwrm_nvm_write_input req = {0};
1198 struct hwrm_nvm_write_output *resp =
1199 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1200 struct iflib_dma_info dma_data;
1205 rc = iflib_dma_alloc(softc->ctx, data_length, &dma_data,
1210 rc = copyin(data, dma_data.idi_vaddr, data_length);
1215 memcpy(dma_data.idi_vaddr, data, data_length);
1216 bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
1217 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1220 dma_data.idi_paddr = 0;
1222 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_WRITE);
1224 req.host_src_addr = htole64(dma_data.idi_paddr);
1225 req.dir_type = htole16(type);
1226 req.dir_ordinal = htole16(ordinal);
1227 req.dir_ext = htole16(ext);
1228 req.dir_attr = htole16(attr);
1229 req.dir_data_length = htole32(data_length);
1230 req.option = htole16(option);
1233 htole16(HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG);
1236 req.dir_item_length = htole32(*item_length);
1238 BNXT_HWRM_LOCK(softc);
1239 old_timeo = softc->hwrm_cmd_timeo;
1240 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1241 rc = _hwrm_send_message(softc, &req, sizeof(req));
1242 softc->hwrm_cmd_timeo = old_timeo;
1246 *item_length = le32toh(resp->dir_item_length);
1248 *index = le16toh(resp->dir_idx);
1251 BNXT_HWRM_UNLOCK(softc);
1254 iflib_dma_free(&dma_data);
1259 bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc *softc, uint16_t index)
1261 struct hwrm_nvm_erase_dir_entry_input req = {0};
1265 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_ERASE_DIR_ENTRY);
1266 req.dir_idx = htole16(index);
1267 BNXT_HWRM_LOCK(softc);
1268 old_timeo = softc->hwrm_cmd_timeo;
1269 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1270 rc = _hwrm_send_message(softc, &req, sizeof(req));
1271 softc->hwrm_cmd_timeo = old_timeo;
1272 BNXT_HWRM_UNLOCK(softc);
1277 bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc *softc, uint32_t *entries,
1278 uint32_t *entry_length)
1280 struct hwrm_nvm_get_dir_info_input req = {0};
1281 struct hwrm_nvm_get_dir_info_output *resp =
1282 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1286 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_INFO);
1288 BNXT_HWRM_LOCK(softc);
1289 old_timeo = softc->hwrm_cmd_timeo;
1290 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1291 rc = _hwrm_send_message(softc, &req, sizeof(req));
1292 softc->hwrm_cmd_timeo = old_timeo;
1297 *entries = le32toh(resp->entries);
1299 *entry_length = le32toh(resp->entry_length);
1302 BNXT_HWRM_UNLOCK(softc);
1307 bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc *softc, uint32_t *entries,
1308 uint32_t *entry_length, struct iflib_dma_info *dma_data)
1310 struct hwrm_nvm_get_dir_entries_input req = {0};
1319 entry_length = &ent_len;
1321 rc = bnxt_hwrm_nvm_get_dir_info(softc, entries, entry_length);
1324 if (*entries * *entry_length > dma_data->idi_size) {
1330 * TODO: There's a race condition here that could blow up DMA memory...
1331 * we need to allocate the max size, not the currently in use
1332 * size. The command should totally have a max size here.
1334 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_ENTRIES);
1335 req.host_dest_addr = htole64(dma_data->idi_paddr);
1336 BNXT_HWRM_LOCK(softc);
1337 old_timeo = softc->hwrm_cmd_timeo;
1338 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1339 rc = _hwrm_send_message(softc, &req, sizeof(req));
1340 softc->hwrm_cmd_timeo = old_timeo;
1341 BNXT_HWRM_UNLOCK(softc);
1344 bus_dmamap_sync(dma_data->idi_tag, dma_data->idi_map,
1345 BUS_DMASYNC_POSTWRITE);
1352 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
1353 uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
1354 uint32_t *reserved_size, uint32_t *available_size)
1356 struct hwrm_nvm_get_dev_info_input req = {0};
1357 struct hwrm_nvm_get_dev_info_output *resp =
1358 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1362 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
1364 BNXT_HWRM_LOCK(softc);
1365 old_timeo = softc->hwrm_cmd_timeo;
1366 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1367 rc = _hwrm_send_message(softc, &req, sizeof(req));
1368 softc->hwrm_cmd_timeo = old_timeo;
1373 *mfg_id = le16toh(resp->manufacturer_id);
1375 *device_id = le16toh(resp->device_id);
1377 *sector_size = le32toh(resp->sector_size);
1379 *nvram_size = le32toh(resp->nvram_size);
1381 *reserved_size = le32toh(resp->reserved_size);
1383 *available_size = le32toh(resp->available_size);
1386 BNXT_HWRM_UNLOCK(softc);
1391 bnxt_hwrm_nvm_install_update(struct bnxt_softc *softc,
1392 uint32_t install_type, uint64_t *installed_items, uint8_t *result,
1393 uint8_t *problem_item, uint8_t *reset_required)
1395 struct hwrm_nvm_install_update_input req = {0};
1396 struct hwrm_nvm_install_update_output *resp =
1397 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1401 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_INSTALL_UPDATE);
1402 req.install_type = htole32(install_type);
1404 BNXT_HWRM_LOCK(softc);
1405 old_timeo = softc->hwrm_cmd_timeo;
1406 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1407 rc = _hwrm_send_message(softc, &req, sizeof(req));
1408 softc->hwrm_cmd_timeo = old_timeo;
1412 if (installed_items)
1413 *installed_items = le32toh(resp->installed_items);
1415 *result = resp->result;
1417 *problem_item = resp->problem_item;
1419 *reset_required = resp->reset_required;
1422 BNXT_HWRM_UNLOCK(softc);
1427 bnxt_hwrm_nvm_verify_update(struct bnxt_softc *softc, uint16_t type,
1428 uint16_t ordinal, uint16_t ext)
1430 struct hwrm_nvm_verify_update_input req = {0};
1434 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_VERIFY_UPDATE);
1436 req.dir_type = htole16(type);
1437 req.dir_ordinal = htole16(ordinal);
1438 req.dir_ext = htole16(ext);
1440 BNXT_HWRM_LOCK(softc);
1441 old_timeo = softc->hwrm_cmd_timeo;
1442 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1443 rc = _hwrm_send_message(softc, &req, sizeof(req));
1444 softc->hwrm_cmd_timeo = old_timeo;
1445 BNXT_HWRM_UNLOCK(softc);
1450 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
1451 uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
1452 uint16_t *millisecond, uint16_t *zone)
1454 struct hwrm_fw_get_time_input req = {0};
1455 struct hwrm_fw_get_time_output *resp =
1456 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1459 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
1461 BNXT_HWRM_LOCK(softc);
1462 rc = _hwrm_send_message(softc, &req, sizeof(req));
1467 *year = le16toh(resp->year);
1469 *month = resp->month;
1475 *minute = resp->minute;
1477 *second = resp->second;
1479 *millisecond = le16toh(resp->millisecond);
1481 *zone = le16toh(resp->zone);
1484 BNXT_HWRM_UNLOCK(softc);
1489 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
1490 uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
1491 uint16_t millisecond, uint16_t zone)
1493 struct hwrm_fw_set_time_input req = {0};
1495 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
1497 req.year = htole16(year);
1501 req.minute = minute;
1502 req.second = second;
1503 req.millisecond = htole16(millisecond);
1504 req.zone = htole16(zone);
1505 return hwrm_send_message(softc, &req, sizeof(req));
1509 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc)
1511 struct bnxt_link_info *link_info = &softc->link_info;
1512 struct hwrm_port_phy_qcfg_input req = {0};
1513 struct hwrm_port_phy_qcfg_output *resp =
1514 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1517 BNXT_HWRM_LOCK(softc);
1518 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
1520 rc = _hwrm_send_message(softc, &req, sizeof(req));
1524 link_info->phy_link_status = resp->link;
1525 link_info->duplex = resp->duplex_cfg;
1526 link_info->auto_mode = resp->auto_mode;
1529 * When AUTO_PAUSE_AUTONEG_PAUSE bit is set to 1,
1530 * the advertisement of pause is enabled.
1531 * 1. When the auto_mode is not set to none and this flag is set to 1,
1532 * then the auto_pause bits on this port are being advertised and
1533 * autoneg pause results are being interpreted.
1534 * 2. When the auto_mode is not set to none and this flag is set to 0,
1535 * the pause is forced as indicated in force_pause, and also
1536 * advertised as auto_pause bits, but the autoneg results are not
1537 * interpreted since the pause configuration is being forced.
1538 * 3. When the auto_mode is set to none and this flag is set to 1,
1539 * auto_pause bits should be ignored and should be set to 0.
1542 link_info->flow_ctrl.autoneg = false;
1543 link_info->flow_ctrl.tx = false;
1544 link_info->flow_ctrl.rx = false;
1546 if ((resp->auto_mode) &&
1547 (resp->auto_pause & BNXT_AUTO_PAUSE_AUTONEG_PAUSE)) {
1548 link_info->flow_ctrl.autoneg = true;
1551 if (link_info->flow_ctrl.autoneg) {
1552 if (resp->auto_pause & BNXT_PAUSE_TX)
1553 link_info->flow_ctrl.tx = true;
1554 if (resp->auto_pause & BNXT_PAUSE_RX)
1555 link_info->flow_ctrl.rx = true;
1557 if (resp->force_pause & BNXT_PAUSE_TX)
1558 link_info->flow_ctrl.tx = true;
1559 if (resp->force_pause & BNXT_PAUSE_RX)
1560 link_info->flow_ctrl.rx = true;
1563 link_info->duplex_setting = resp->duplex_cfg;
1564 if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
1565 link_info->link_speed = le16toh(resp->link_speed);
1567 link_info->link_speed = 0;
1568 link_info->force_link_speed = le16toh(resp->force_link_speed);
1569 link_info->auto_link_speed = le16toh(resp->auto_link_speed);
1570 link_info->support_speeds = le16toh(resp->support_speeds);
1571 link_info->auto_link_speeds = le16toh(resp->auto_link_speed_mask);
1572 link_info->preemphasis = le32toh(resp->preemphasis);
1573 link_info->phy_ver[0] = resp->phy_maj;
1574 link_info->phy_ver[1] = resp->phy_min;
1575 link_info->phy_ver[2] = resp->phy_bld;
1576 snprintf(softc->ver_info->phy_ver, sizeof(softc->ver_info->phy_ver),
1577 "%d.%d.%d", link_info->phy_ver[0], link_info->phy_ver[1],
1578 link_info->phy_ver[2]);
1579 strlcpy(softc->ver_info->phy_vendor, resp->phy_vendor_name,
1581 strlcpy(softc->ver_info->phy_partnumber, resp->phy_vendor_partnumber,
1583 link_info->media_type = resp->media_type;
1584 link_info->phy_type = resp->phy_type;
1585 link_info->transceiver = resp->xcvr_pkg_type;
1586 link_info->phy_addr = resp->eee_config_phy_addr &
1587 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK;
1590 BNXT_HWRM_UNLOCK(softc);
1595 bnxt_hwrm_get_wol_fltrs(struct bnxt_softc *softc, uint16_t handle)
1597 struct hwrm_wol_filter_qcfg_input req = {0};
1598 struct hwrm_wol_filter_qcfg_output *resp =
1599 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1600 uint16_t next_handle = 0;
1603 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_QCFG);
1604 req.port_id = htole16(softc->pf.port_id);
1605 req.handle = htole16(handle);
1606 rc = hwrm_send_message(softc, &req, sizeof(req));
1608 next_handle = le16toh(resp->next_handle);
1609 if (next_handle != 0) {
1610 if (resp->wol_type ==
1611 HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT) {
1613 softc->wol_filter_id = resp->wol_filter_id;
1621 bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc *softc)
1623 struct hwrm_wol_filter_alloc_input req = {0};
1624 struct hwrm_wol_filter_alloc_output *resp =
1625 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1628 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_ALLOC);
1629 req.port_id = htole16(softc->pf.port_id);
1630 req.wol_type = HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT;
1632 htole32(HWRM_WOL_FILTER_ALLOC_INPUT_ENABLES_MAC_ADDRESS);
1633 memcpy(req.mac_address, softc->func.mac_addr, ETHER_ADDR_LEN);
1634 rc = hwrm_send_message(softc, &req, sizeof(req));
1636 softc->wol_filter_id = resp->wol_filter_id;
1642 bnxt_hwrm_free_wol_fltr(struct bnxt_softc *softc)
1644 struct hwrm_wol_filter_free_input req = {0};
1646 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_FREE);
1647 req.port_id = htole16(softc->pf.port_id);
1649 htole32(HWRM_WOL_FILTER_FREE_INPUT_ENABLES_WOL_FILTER_ID);
1650 req.wol_filter_id = softc->wol_filter_id;
1651 return hwrm_send_message(softc, &req, sizeof(req));
1654 static void bnxt_hwrm_set_coal_params(struct bnxt_softc *softc, uint32_t max_frames,
1655 uint32_t buf_tmrs, uint16_t flags,
1656 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
1658 req->flags = htole16(flags);
1659 req->num_cmpl_dma_aggr = htole16((uint16_t)max_frames);
1660 req->num_cmpl_dma_aggr_during_int = htole16(max_frames >> 16);
1661 req->cmpl_aggr_dma_tmr = htole16((uint16_t)buf_tmrs);
1662 req->cmpl_aggr_dma_tmr_during_int = htole16(buf_tmrs >> 16);
1663 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
1664 req->int_lat_tmr_min = htole16((uint16_t)buf_tmrs * 2);
1665 req->int_lat_tmr_max = htole16((uint16_t)buf_tmrs * 4);
1666 req->num_cmpl_aggr_int = htole16((uint16_t)max_frames * 4);
1670 int bnxt_hwrm_set_coal(struct bnxt_softc *softc)
1673 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
1675 uint16_t max_buf, max_buf_irq;
1676 uint16_t buf_tmr, buf_tmr_irq;
1679 bnxt_hwrm_cmd_hdr_init(softc, &req_rx,
1680 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
1681 bnxt_hwrm_cmd_hdr_init(softc, &req_tx,
1682 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
1684 /* Each rx completion (2 records) should be DMAed immediately.
1685 * DMA 1/4 of the completion buffers at a time.
1687 max_buf = min_t(uint16_t, softc->rx_coal_frames / 4, 2);
1688 /* max_buf must not be zero */
1689 max_buf = clamp_t(uint16_t, max_buf, 1, 63);
1690 max_buf_irq = clamp_t(uint16_t, softc->rx_coal_frames_irq, 1, 63);
1691 buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs);
1692 /* buf timer set to 1/4 of interrupt timer */
1693 buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
1694 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs_irq);
1695 buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
1697 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
1699 /* RING_IDLE generates more IRQs for lower latency. Enable it only
1700 * if coal_usecs is less than 25 us.
1702 if (softc->rx_coal_usecs < 25)
1703 flags |= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
1705 bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
1706 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
1708 /* max_buf must not be zero */
1709 max_buf = clamp_t(uint16_t, softc->tx_coal_frames, 1, 63);
1710 max_buf_irq = clamp_t(uint16_t, softc->tx_coal_frames_irq, 1, 63);
1711 buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs);
1712 /* buf timer set to 1/4 of interrupt timer */
1713 buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
1714 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs_irq);
1715 buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
1716 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
1717 bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
1718 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
1720 for (i = 0; i < softc->nrxqsets; i++) {
1726 * Check if Tx also needs to be done
1727 * So far, Tx processing has been done in softirq contest
1731 req->ring_id = htole16(softc->grp_info[i].cp_ring_id);
1733 rc = hwrm_send_message(softc, req, sizeof(*req));
1742 int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc, unsigned long *bmap,
1745 struct hwrm_func_drv_rgtr_input req = {0};
1746 bitstr_t *async_events_bmap;
1750 async_events_bmap = bit_alloc(256, M_DEVBUF, M_WAITOK|M_ZERO);
1751 events = (uint32_t *)async_events_bmap;
1753 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
1756 htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
1758 memset(async_events_bmap, 0, sizeof(256 / 8));
1760 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
1761 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
1762 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED);
1763 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE);
1764 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
1766 if (bmap && bmap_size) {
1767 for (i = 0; i < bmap_size; i++) {
1768 if (bit_test(bmap, i))
1769 bit_set(async_events_bmap, i);
1773 for (i = 0; i < 8; i++)
1774 req.async_event_fwd[i] |= htole32(events[i]);
1776 free(async_events_bmap, M_DEVBUF);
1778 return hwrm_send_message(softc, &req, sizeof(req));