2 * Broadcom NetXtreme-C/E network driver.
4 * Copyright (c) 2016 Broadcom, All Rights Reserved.
5 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 #include <sys/endian.h>
33 #include "bnxt_hwrm.h"
34 #include "hsi_struct_def.h"
36 static int bnxt_hwrm_err_map(uint16_t err);
37 static inline int _is_valid_ether_addr(uint8_t *);
38 static inline void get_random_ether_addr(uint8_t *);
39 static void bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
40 struct hwrm_port_phy_cfg_input *req);
41 static void bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
42 struct hwrm_port_phy_cfg_input *req);
43 static void bnxt_hwrm_set_eee(struct bnxt_softc *softc,
44 struct hwrm_port_phy_cfg_input *req);
45 static int _hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
46 static int hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
47 static void bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
49 /* NVRam stuff has a five minute timeout */
50 #define BNXT_NVM_TIMEO (5 * 60 * 1000)
53 bnxt_hwrm_err_map(uint16_t err)
58 case HWRM_ERR_CODE_SUCCESS:
60 case HWRM_ERR_CODE_INVALID_PARAMS:
61 case HWRM_ERR_CODE_INVALID_FLAGS:
62 case HWRM_ERR_CODE_INVALID_ENABLES:
64 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
66 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
68 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
70 case HWRM_ERR_CODE_FAIL:
72 case HWRM_ERR_CODE_HWRM_ERROR:
73 case HWRM_ERR_CODE_UNKNOWN_ERR:
82 bnxt_alloc_hwrm_dma_mem(struct bnxt_softc *softc)
86 rc = iflib_dma_alloc(softc->ctx, PAGE_SIZE, &softc->hwrm_cmd_resp,
92 bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc)
94 if (softc->hwrm_cmd_resp.idi_vaddr)
95 iflib_dma_free(&softc->hwrm_cmd_resp);
96 softc->hwrm_cmd_resp.idi_vaddr = NULL;
101 bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
104 struct input *req = request;
106 req->req_type = htole16(req_type);
107 req->cmpl_ring = 0xffff;
108 req->target_id = 0xffff;
109 req->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
113 _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
115 struct input *req = msg;
116 struct hwrm_err_output *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
117 uint32_t *data = msg;
121 uint16_t max_req_len = BNXT_HWRM_MAX_REQ_LEN;
122 struct hwrm_short_input short_input = {0};
124 /* TODO: DMASYNC in here. */
125 req->seq_id = htole16(softc->hwrm_cmd_seq++);
126 memset(resp, 0, PAGE_SIZE);
128 if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
129 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
130 void *short_cmd_req = softc->hwrm_short_cmd_req_addr.idi_vaddr;
131 uint16_t max_msg_len;
133 /* Set boundary for maximum extended request length for short
134 * cmd format. If passed up from device use the max supported
135 * internal req length.
138 max_msg_len = softc->hwrm_max_ext_req_len;
141 memcpy(short_cmd_req, req, msg_len);
142 if (msg_len < max_msg_len)
143 memset((uint8_t *) short_cmd_req + msg_len, 0,
144 max_msg_len - msg_len);
146 short_input.req_type = req->req_type;
147 short_input.signature =
148 htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
149 short_input.size = htole16(msg_len);
150 short_input.req_addr =
151 htole64(softc->hwrm_short_cmd_req_addr.idi_paddr);
153 data = (uint32_t *)&short_input;
154 msg_len = sizeof(short_input);
156 /* Sync memory write before updating doorbell */
159 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
162 /* Write request msg to hwrm channel */
163 for (i = 0; i < msg_len; i += 4) {
164 bus_space_write_4(softc->hwrm_bar.tag,
165 softc->hwrm_bar.handle,
170 /* Clear to the end of the request buffer */
171 for (i = msg_len; i < max_req_len; i += 4)
172 bus_space_write_4(softc->hwrm_bar.tag, softc->hwrm_bar.handle,
175 /* Ring channel doorbell */
176 bus_space_write_4(softc->hwrm_bar.tag,
177 softc->hwrm_bar.handle,
180 /* Check if response len is updated */
181 for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
182 if (resp->resp_len && resp->resp_len <= 4096)
186 if (i >= softc->hwrm_cmd_timeo) {
187 device_printf(softc->dev,
188 "Timeout sending %s: (timeout: %u) seq: %d\n",
189 GET_HWRM_REQ_TYPE(req->req_type), softc->hwrm_cmd_timeo,
190 le16toh(req->seq_id));
193 /* Last byte of resp contains the valid key */
194 valid = (uint8_t *)resp + resp->resp_len - 1;
195 for (i = 0; i < softc->hwrm_cmd_timeo; i++) {
196 if (*valid == HWRM_RESP_VALID_KEY)
200 if (i >= softc->hwrm_cmd_timeo) {
201 device_printf(softc->dev, "Timeout sending %s: "
202 "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
203 GET_HWRM_REQ_TYPE(req->req_type),
204 softc->hwrm_cmd_timeo, le16toh(req->req_type),
205 le16toh(req->seq_id), msg_len,
210 err = le16toh(resp->error_code);
212 /* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
213 if (err != HWRM_ERR_CODE_FAIL) {
214 device_printf(softc->dev,
215 "%s command returned %s error.\n",
216 GET_HWRM_REQ_TYPE(req->req_type),
217 GET_HWRM_ERROR_CODE(err));
219 return bnxt_hwrm_err_map(err);
226 hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
230 BNXT_HWRM_LOCK(softc);
231 rc = _hwrm_send_message(softc, msg, msg_len);
232 BNXT_HWRM_UNLOCK(softc);
237 bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
240 struct hwrm_queue_qportcfg_input req = {0};
241 struct hwrm_queue_qportcfg_output *resp =
242 (void *)softc->hwrm_cmd_resp.idi_vaddr;
246 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
248 BNXT_HWRM_LOCK(softc);
249 rc = _hwrm_send_message(softc, &req, sizeof(req));
253 if (!resp->max_configurable_queues) {
257 softc->max_tc = resp->max_configurable_queues;
258 softc->max_lltc = resp->max_configurable_lossless_queues;
259 if (softc->max_tc > BNXT_MAX_COS_QUEUE)
260 softc->max_tc = BNXT_MAX_COS_QUEUE;
262 /* Currently no RDMA support */
265 qptr = &resp->queue_id0;
266 for (i = 0, j = 0; i < softc->max_tc; i++) {
267 softc->q_info[j].id = *qptr;
268 softc->q_ids[i] = *qptr++;
269 softc->q_info[j].profile = *qptr++;
270 softc->tc_to_qidx[j] = j;
271 if (!BNXT_CNPQ(softc->q_info[j].profile) ||
272 (no_rdma && BNXT_PF(softc)))
275 softc->max_q = softc->max_tc;
276 softc->max_tc = max_t(uint32_t, j, 1);
278 if (resp->queue_cfg_info & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG)
281 if (softc->max_lltc > softc->max_tc)
282 softc->max_lltc = softc->max_tc;
285 BNXT_HWRM_UNLOCK(softc);
289 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc)
291 struct hwrm_func_backing_store_qcaps_input req = {0};
292 struct hwrm_func_backing_store_qcaps_output *resp =
293 (void *)softc->hwrm_cmd_resp.idi_vaddr;
296 if (softc->hwrm_spec_code < 0x10902 || BNXT_VF(softc) || softc->ctx_mem)
299 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_QCAPS);
300 BNXT_HWRM_LOCK(softc);
301 rc = _hwrm_send_message(softc, &req, sizeof(req));
303 struct bnxt_ctx_pg_info *ctx_pg;
304 struct bnxt_ctx_mem_info *ctx;
307 ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
312 ctx_pg = malloc(sizeof(*ctx_pg) * (softc->max_q + 1),
313 M_DEVBUF, M_NOWAIT | M_ZERO);
319 for (i = 0; i < softc->max_q + 1; i++, ctx_pg++)
320 ctx->tqm_mem[i] = ctx_pg;
322 softc->ctx_mem = ctx;
323 ctx->qp_max_entries = le32toh(resp->qp_max_entries);
324 ctx->qp_min_qp1_entries = le16toh(resp->qp_min_qp1_entries);
325 ctx->qp_max_l2_entries = le16toh(resp->qp_max_l2_entries);
326 ctx->qp_entry_size = le16toh(resp->qp_entry_size);
327 ctx->srq_max_l2_entries = le16toh(resp->srq_max_l2_entries);
328 ctx->srq_max_entries = le32toh(resp->srq_max_entries);
329 ctx->srq_entry_size = le16toh(resp->srq_entry_size);
330 ctx->cq_max_l2_entries = le16toh(resp->cq_max_l2_entries);
331 ctx->cq_max_entries = le32toh(resp->cq_max_entries);
332 ctx->cq_entry_size = le16toh(resp->cq_entry_size);
333 ctx->vnic_max_vnic_entries =
334 le16toh(resp->vnic_max_vnic_entries);
335 ctx->vnic_max_ring_table_entries =
336 le16toh(resp->vnic_max_ring_table_entries);
337 ctx->vnic_entry_size = le16toh(resp->vnic_entry_size);
338 ctx->stat_max_entries = le32toh(resp->stat_max_entries);
339 ctx->stat_entry_size = le16toh(resp->stat_entry_size);
340 ctx->tqm_entry_size = le16toh(resp->tqm_entry_size);
341 ctx->tqm_min_entries_per_ring =
342 le32toh(resp->tqm_min_entries_per_ring);
343 ctx->tqm_max_entries_per_ring =
344 le32toh(resp->tqm_max_entries_per_ring);
345 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
346 if (!ctx->tqm_entries_multiple)
347 ctx->tqm_entries_multiple = 1;
348 ctx->mrav_max_entries = le32toh(resp->mrav_max_entries);
349 ctx->mrav_entry_size = le16toh(resp->mrav_entry_size);
350 ctx->tim_entry_size = le16toh(resp->tim_entry_size);
351 ctx->tim_max_entries = le32toh(resp->tim_max_entries);
352 ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
357 BNXT_HWRM_UNLOCK(softc);
361 #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \
362 (HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP | \
363 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \
364 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ | \
365 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
366 HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
368 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, uint8_t *pg_attr,
373 if (BNXT_PAGE_SHIFT == 13)
375 else if (BNXT_PAGE_SIZE == 16)
379 if (rmem->depth >= 1) {
380 if (rmem->depth == 2)
381 *pg_attr |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2;
383 *pg_attr |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1;
384 *pg_dir = htole64(rmem->pg_tbl.idi_paddr);
386 *pg_dir = htole64(rmem->pg_arr[0].idi_paddr);
390 int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t enables)
392 struct hwrm_func_backing_store_cfg_input req = {0};
393 struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
394 struct bnxt_ctx_pg_info *ctx_pg;
395 uint32_t *num_entries, req_len = sizeof(req);
404 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG);
405 req.enables = htole32(enables);
407 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
408 ctx_pg = &ctx->qp_mem;
409 req.qp_num_entries = htole32(ctx_pg->entries);
410 req.qp_num_qp1_entries = htole16(ctx->qp_min_qp1_entries);
411 req.qp_num_l2_entries = htole16(ctx->qp_max_l2_entries);
412 req.qp_entry_size = htole16(ctx->qp_entry_size);
413 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
414 &req.qpc_pg_size_qpc_lvl,
417 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
418 ctx_pg = &ctx->srq_mem;
419 req.srq_num_entries = htole32(ctx_pg->entries);
420 req.srq_num_l2_entries = htole16(ctx->srq_max_l2_entries);
421 req.srq_entry_size = htole16(ctx->srq_entry_size);
422 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
423 &req.srq_pg_size_srq_lvl,
426 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
427 ctx_pg = &ctx->cq_mem;
428 req.cq_num_entries = htole32(ctx_pg->entries);
429 req.cq_num_l2_entries = htole16(ctx->cq_max_l2_entries);
430 req.cq_entry_size = htole16(ctx->cq_entry_size);
431 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
434 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV) {
435 ctx_pg = &ctx->mrav_mem;
436 req.mrav_num_entries = htole32(ctx_pg->entries);
437 req.mrav_entry_size = htole16(ctx->mrav_entry_size);
438 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
439 &req.mrav_pg_size_mrav_lvl,
442 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM) {
443 ctx_pg = &ctx->tim_mem;
444 req.tim_num_entries = htole32(ctx_pg->entries);
445 req.tim_entry_size = htole16(ctx->tim_entry_size);
446 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
447 &req.tim_pg_size_tim_lvl,
450 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
451 ctx_pg = &ctx->vnic_mem;
452 req.vnic_num_vnic_entries =
453 htole16(ctx->vnic_max_vnic_entries);
454 req.vnic_num_ring_table_entries =
455 htole16(ctx->vnic_max_ring_table_entries);
456 req.vnic_entry_size = htole16(ctx->vnic_entry_size);
457 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
458 &req.vnic_pg_size_vnic_lvl,
461 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
462 ctx_pg = &ctx->stat_mem;
463 req.stat_num_entries = htole32(ctx->stat_max_entries);
464 req.stat_entry_size = htole16(ctx->stat_entry_size);
465 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
466 &req.stat_pg_size_stat_lvl,
469 for (i = 0, num_entries = &req.tqm_sp_num_entries,
470 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
471 pg_dir = &req.tqm_sp_page_dir,
472 ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
473 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
474 if (!(enables & ena))
477 req.tqm_entry_size = htole16(ctx->tqm_entry_size);
478 ctx_pg = ctx->tqm_mem[i];
479 *num_entries = htole32(ctx_pg->entries);
480 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
483 if (req_len > softc->hwrm_max_ext_req_len)
484 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
486 rc = hwrm_send_message(softc, &req, req_len);
492 int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all)
494 struct hwrm_func_resource_qcaps_output *resp =
495 (void *)softc->hwrm_cmd_resp.idi_vaddr;
496 struct hwrm_func_resource_qcaps_input req = {0};
497 struct bnxt_hw_resc *hw_resc = &softc->hw_resc;
500 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESOURCE_QCAPS);
501 req.fid = htole16(0xffff);
503 BNXT_HWRM_LOCK(softc);
504 rc = _hwrm_send_message(softc, &req, sizeof(req));
507 goto hwrm_func_resc_qcaps_exit;
510 hw_resc->max_tx_sch_inputs = le16toh(resp->max_tx_scheduler_inputs);
512 goto hwrm_func_resc_qcaps_exit;
514 hw_resc->min_rsscos_ctxs = le16toh(resp->min_rsscos_ctx);
515 hw_resc->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
516 hw_resc->min_cp_rings = le16toh(resp->min_cmpl_rings);
517 hw_resc->max_cp_rings = le16toh(resp->max_cmpl_rings);
518 hw_resc->min_tx_rings = le16toh(resp->min_tx_rings);
519 hw_resc->max_tx_rings = le16toh(resp->max_tx_rings);
520 hw_resc->min_rx_rings = le16toh(resp->min_rx_rings);
521 hw_resc->max_rx_rings = le16toh(resp->max_rx_rings);
522 hw_resc->min_hw_ring_grps = le16toh(resp->min_hw_ring_grps);
523 hw_resc->max_hw_ring_grps = le16toh(resp->max_hw_ring_grps);
524 hw_resc->min_l2_ctxs = le16toh(resp->min_l2_ctxs);
525 hw_resc->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
526 hw_resc->min_vnics = le16toh(resp->min_vnics);
527 hw_resc->max_vnics = le16toh(resp->max_vnics);
528 hw_resc->min_stat_ctxs = le16toh(resp->min_stat_ctx);
529 hw_resc->max_stat_ctxs = le16toh(resp->max_stat_ctx);
531 if (BNXT_CHIP_P5(softc)) {
532 hw_resc->max_nqs = le16toh(resp->max_msix);
533 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
536 hwrm_func_resc_qcaps_exit:
537 BNXT_HWRM_UNLOCK(softc);
542 bnxt_hwrm_passthrough(struct bnxt_softc *softc, void *req, uint32_t req_len,
543 void *resp, uint32_t resp_len, uint32_t app_timeout)
546 void *output = (void *)softc->hwrm_cmd_resp.idi_vaddr;
547 struct input *input = req;
550 input->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
551 BNXT_HWRM_LOCK(softc);
552 old_timeo = softc->hwrm_cmd_timeo;
553 if (input->req_type == HWRM_NVM_INSTALL_UPDATE)
554 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
556 softc->hwrm_cmd_timeo = max(app_timeout, softc->hwrm_cmd_timeo);
557 rc = _hwrm_send_message(softc, req, req_len);
558 softc->hwrm_cmd_timeo = old_timeo;
560 device_printf(softc->dev, "%s: %s command failed with rc: 0x%x\n",
561 __FUNCTION__, GET_HWRM_REQ_TYPE(input->req_type), rc);
565 memcpy(resp, output, resp_len);
567 BNXT_HWRM_UNLOCK(softc);
573 bnxt_hwrm_ver_get(struct bnxt_softc *softc)
575 struct hwrm_ver_get_input req = {0};
576 struct hwrm_ver_get_output *resp =
577 (void *)softc->hwrm_cmd_resp.idi_vaddr;
579 const char nastr[] = "<not installed>";
580 const char naver[] = "<N/A>";
581 uint32_t dev_caps_cfg;
582 uint16_t fw_maj, fw_min, fw_bld, fw_rsv, len;
584 softc->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
585 softc->hwrm_cmd_timeo = 1000;
586 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
588 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
589 req.hwrm_intf_min = HWRM_VERSION_MINOR;
590 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
592 BNXT_HWRM_LOCK(softc);
593 rc = _hwrm_send_message(softc, &req, sizeof(req));
597 snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
598 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b);
599 softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj_8b;
600 softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min_8b;
601 softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd_8b;
602 snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
603 resp->hwrm_fw_major, resp->hwrm_fw_minor, resp->hwrm_fw_build);
604 strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
606 strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
609 softc->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
610 resp->hwrm_intf_min_8b << 8 |
611 resp->hwrm_intf_upd_8b;
612 if (resp->hwrm_intf_maj_8b < 1) {
613 device_printf(softc->dev, "HWRM interface %d.%d.%d is older "
614 "than 1.0.0.\n", resp->hwrm_intf_maj_8b,
615 resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b);
616 device_printf(softc->dev, "Please update firmware with HWRM "
617 "interface 1.0.0 or newer.\n");
619 if (resp->mgmt_fw_major == 0 && resp->mgmt_fw_minor == 0 &&
620 resp->mgmt_fw_build == 0) {
621 strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
622 strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
625 snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
626 "%d.%d.%d", resp->mgmt_fw_major, resp->mgmt_fw_minor,
627 resp->mgmt_fw_build);
628 strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
631 if (resp->netctrl_fw_major == 0 && resp->netctrl_fw_minor == 0 &&
632 resp->netctrl_fw_build == 0) {
633 strlcpy(softc->ver_info->netctrl_fw_ver, naver,
635 strlcpy(softc->ver_info->netctrl_fw_name, nastr,
639 snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
640 "%d.%d.%d", resp->netctrl_fw_major, resp->netctrl_fw_minor,
641 resp->netctrl_fw_build);
642 strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
645 if (resp->roce_fw_major == 0 && resp->roce_fw_minor == 0 &&
646 resp->roce_fw_build == 0) {
647 strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
648 strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
651 snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
652 "%d.%d.%d", resp->roce_fw_major, resp->roce_fw_minor,
653 resp->roce_fw_build);
654 strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
658 fw_maj = le32toh(resp->hwrm_fw_major);
659 if (softc->hwrm_spec_code > 0x10803 && fw_maj) {
660 fw_min = le16toh(resp->hwrm_fw_minor);
661 fw_bld = le16toh(resp->hwrm_fw_build);
662 fw_rsv = le16toh(resp->hwrm_fw_patch);
663 len = FW_VER_STR_LEN;
665 fw_maj = resp->hwrm_fw_maj_8b;
666 fw_min = resp->hwrm_fw_min_8b;
667 fw_bld = resp->hwrm_fw_bld_8b;
668 fw_rsv = resp->hwrm_fw_rsvd_8b;
669 len = BC_HWRM_STR_LEN;
672 snprintf (softc->ver_info->fw_ver_str, len, "%d.%d.%d.%d",
673 fw_maj, fw_min, fw_bld, fw_rsv);
675 if (strlen(resp->active_pkg_name)) {
676 int fw_ver_len = strlen (softc->ver_info->fw_ver_str);
678 snprintf(softc->ver_info->fw_ver_str + fw_ver_len,
679 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
680 resp->active_pkg_name);
683 softc->ver_info->chip_num = le16toh(resp->chip_num);
684 softc->ver_info->chip_rev = resp->chip_rev;
685 softc->ver_info->chip_metal = resp->chip_metal;
686 softc->ver_info->chip_bond_id = resp->chip_bond_id;
687 softc->ver_info->chip_type = resp->chip_platform_type;
689 if (resp->hwrm_intf_maj_8b >= 1) {
690 softc->hwrm_max_req_len = le16toh(resp->max_req_win_len);
691 softc->hwrm_max_ext_req_len = le16toh(resp->max_ext_req_len);
693 #define DFLT_HWRM_CMD_TIMEOUT 500
694 softc->hwrm_cmd_timeo = le16toh(resp->def_req_timeout);
695 if (!softc->hwrm_cmd_timeo)
696 softc->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
698 dev_caps_cfg = le32toh(resp->dev_caps_cfg);
699 if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
700 (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
701 softc->flags |= BNXT_FLAG_SHORT_CMD;
704 BNXT_HWRM_UNLOCK(softc);
709 bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
711 struct hwrm_func_drv_rgtr_input req = {0};
713 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
715 req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
716 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
717 req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
719 req.ver_maj = __FreeBSD_version / 100000;
720 req.ver_min = (__FreeBSD_version / 1000) % 100;
721 req.ver_upd = (__FreeBSD_version / 100) % 10;
723 return hwrm_send_message(softc, &req, sizeof(req));
727 bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
729 struct hwrm_func_drv_unrgtr_input req = {0};
731 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
732 if (shutdown == true)
734 HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
735 return hwrm_send_message(softc, &req, sizeof(req));
739 _is_valid_ether_addr(uint8_t *addr)
741 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
743 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
750 get_random_ether_addr(uint8_t *addr)
752 uint8_t temp[ETHER_ADDR_LEN];
754 arc4rand(&temp, sizeof(temp), 0);
757 bcopy(temp, addr, sizeof(temp));
761 bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
764 struct hwrm_func_qcaps_input req = {0};
765 struct hwrm_func_qcaps_output *resp =
766 (void *)softc->hwrm_cmd_resp.idi_vaddr;
767 struct bnxt_func_info *func = &softc->func;
769 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
770 req.fid = htole16(0xffff);
772 BNXT_HWRM_LOCK(softc);
773 rc = _hwrm_send_message(softc, &req, sizeof(req));
778 htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
779 softc->flags |= BNXT_FLAG_WOL_CAP;
781 func->fw_fid = le16toh(resp->fid);
782 memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
783 func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
784 func->max_cp_rings = le16toh(resp->max_cmpl_rings);
785 func->max_tx_rings = le16toh(resp->max_tx_rings);
786 func->max_rx_rings = le16toh(resp->max_rx_rings);
787 func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
788 if (!func->max_hw_ring_grps)
789 func->max_hw_ring_grps = func->max_tx_rings;
790 func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
791 func->max_vnics = le16toh(resp->max_vnics);
792 func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
793 if (BNXT_PF(softc)) {
794 struct bnxt_pf_info *pf = &softc->pf;
796 pf->port_id = le16toh(resp->port_id);
797 pf->first_vf_id = le16toh(resp->first_vf_id);
798 pf->max_vfs = le16toh(resp->max_vfs);
799 pf->max_encap_records = le32toh(resp->max_encap_records);
800 pf->max_decap_records = le32toh(resp->max_decap_records);
801 pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
802 pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
803 pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
804 pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
806 if (!_is_valid_ether_addr(func->mac_addr)) {
807 device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
808 get_random_ether_addr(func->mac_addr);
812 BNXT_HWRM_UNLOCK(softc);
817 bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
819 struct hwrm_func_qcfg_input req = {0};
820 struct hwrm_func_qcfg_output *resp =
821 (void *)softc->hwrm_cmd_resp.idi_vaddr;
822 struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg;
825 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
826 req.fid = htole16(0xffff);
827 BNXT_HWRM_LOCK(softc);
828 rc = _hwrm_send_message(softc, &req, sizeof(req));
832 fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
833 fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
834 fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
835 fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
837 BNXT_HWRM_UNLOCK(softc);
842 bnxt_hwrm_func_reset(struct bnxt_softc *softc)
844 struct hwrm_func_reset_input req = {0};
846 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
849 return hwrm_send_message(softc, &req, sizeof(req));
853 bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
854 struct hwrm_port_phy_cfg_input *req)
856 uint8_t autoneg = softc->link_info.autoneg;
857 uint16_t fw_link_speed = softc->link_info.req_link_speed;
859 if (autoneg & BNXT_AUTONEG_SPEED) {
861 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
864 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
866 htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
868 req->force_link_speed = htole16(fw_link_speed);
869 req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
872 /* tell chimp that the setting takes effect immediately */
873 req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
877 bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
878 struct hwrm_port_phy_cfg_input *req)
880 struct bnxt_link_info *link_info = &softc->link_info;
882 if (link_info->flow_ctrl.autoneg) {
884 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE;
885 if (link_info->flow_ctrl.rx)
887 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX;
888 if (link_info->flow_ctrl.tx)
890 HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX;
892 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
894 if (link_info->flow_ctrl.rx)
896 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX;
897 if (link_info->flow_ctrl.tx)
899 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX;
901 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE);
902 req->auto_pause = req->force_pause;
904 htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE);
908 /* JFV this needs interface connection */
910 bnxt_hwrm_set_eee(struct bnxt_softc *softc, struct hwrm_port_phy_cfg_input *req)
912 /* struct ethtool_eee *eee = &softc->eee; */
913 bool eee_enabled = false;
918 uint32_t flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE;
920 if (eee->tx_lpi_enabled)
921 flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI;
923 req->flags |= htole32(flags);
924 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
925 req->eee_link_speed_mask = htole16(eee_speeds);
926 req->tx_lpi_timer = htole32(eee->tx_lpi_timer);
930 htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE);
935 bnxt_hwrm_set_link_setting(struct bnxt_softc *softc, bool set_pause,
936 bool set_eee, bool set_link)
938 struct hwrm_port_phy_cfg_input req = {0};
941 if (softc->flags & BNXT_FLAG_NPAR)
944 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_CFG);
947 bnxt_hwrm_set_pause_common(softc, &req);
949 if (softc->link_info.flow_ctrl.autoneg)
954 bnxt_hwrm_set_link_common(softc, &req);
957 bnxt_hwrm_set_eee(softc, &req);
959 BNXT_HWRM_LOCK(softc);
960 rc = _hwrm_send_message(softc, &req, sizeof(req));
964 /* since changing of 'force pause' setting doesn't
965 * trigger any link change event, the driver needs to
966 * update the current pause result upon successfully i
967 * return of the phy_cfg command */
968 if (!softc->link_info.flow_ctrl.autoneg)
969 bnxt_report_link(softc);
972 BNXT_HWRM_UNLOCK(softc);
977 bnxt_hwrm_vnic_set_hds(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
979 struct hwrm_vnic_plcmodes_cfg_input req = {0};
981 if (!BNXT_CHIP_P5(softc))
984 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
987 * TBD -- Explore these flags
988 * 1. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4
989 * 2. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6
990 * 3. req.jumbo_thresh
991 * 4. req.hds_threshold
993 req.flags = htole32(HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
994 req.vnic_id = htole16(vnic->id);
996 return hwrm_send_message(softc, &req, sizeof(req));
1000 bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1002 struct hwrm_vnic_cfg_input req = {0};
1004 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
1006 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
1007 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1008 if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
1009 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1010 if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
1011 req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1012 if (BNXT_CHIP_P5 (softc)) {
1013 req.default_rx_ring_id =
1014 htole16(softc->rx_rings[0].phys_id);
1015 req.default_cmpl_ring_id =
1016 htole16(softc->rx_cp_rings[0].ring.phys_id);
1018 htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1019 HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID);
1020 req.vnic_id = htole16(vnic->id);
1022 req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1023 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE);
1024 req.vnic_id = htole16(vnic->id);
1025 req.dflt_ring_grp = htole16(vnic->def_ring_grp);
1027 req.rss_rule = htole16(vnic->rss_id);
1028 req.cos_rule = htole16(vnic->cos_rule);
1029 req.lb_rule = htole16(vnic->lb_rule);
1030 req.enables |= htole32(HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1031 req.mru = htole16(vnic->mru);
1033 return hwrm_send_message(softc, &req, sizeof(req));
1037 bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1039 struct hwrm_vnic_free_input req = {0};
1042 if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE)
1045 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
1047 req.vnic_id = htole32(vnic->id);
1049 BNXT_HWRM_LOCK(softc);
1050 rc = _hwrm_send_message(softc, &req, sizeof(req));
1055 BNXT_HWRM_UNLOCK(softc);
1060 bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
1062 struct hwrm_vnic_alloc_input req = {0};
1063 struct hwrm_vnic_alloc_output *resp =
1064 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1067 if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
1068 device_printf(softc->dev,
1069 "Attempt to re-allocate vnic %04x\n", vnic->id);
1073 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
1075 if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
1076 req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1078 BNXT_HWRM_LOCK(softc);
1079 rc = _hwrm_send_message(softc, &req, sizeof(req));
1083 vnic->id = le32toh(resp->vnic_id);
1086 BNXT_HWRM_UNLOCK(softc);
1091 bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t ctx_id)
1093 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
1096 if (ctx_id == (uint16_t)HWRM_NA_SIGNATURE)
1099 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
1100 req.rss_cos_lb_ctx_id = htole16(ctx_id);
1101 BNXT_HWRM_LOCK(softc);
1102 rc = _hwrm_send_message(softc, &req, sizeof(req));
1107 BNXT_HWRM_UNLOCK(softc);
1112 bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
1114 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
1115 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1116 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1119 if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
1120 device_printf(softc->dev,
1121 "Attempt to re-allocate vnic ctx %04x\n", *ctx_id);
1125 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
1127 BNXT_HWRM_LOCK(softc);
1128 rc = _hwrm_send_message(softc, &req, sizeof(req));
1132 *ctx_id = le32toh(resp->rss_cos_lb_ctx_id);
1135 BNXT_HWRM_UNLOCK(softc);
1140 bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
1142 struct hwrm_ring_grp_alloc_input req = {0};
1143 struct hwrm_ring_grp_alloc_output *resp;
1146 if (grp->grp_id != (uint16_t)HWRM_NA_SIGNATURE) {
1147 device_printf(softc->dev,
1148 "Attempt to re-allocate ring group %04x\n", grp->grp_id);
1152 if (BNXT_CHIP_P5 (softc))
1155 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1156 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
1157 req.cr = htole16(grp->cp_ring_id);
1158 req.rr = htole16(grp->rx_ring_id);
1159 req.ar = htole16(grp->ag_ring_id);
1160 req.sc = htole16(grp->stats_ctx);
1162 BNXT_HWRM_LOCK(softc);
1163 rc = _hwrm_send_message(softc, &req, sizeof(req));
1167 grp->grp_id = le32toh(resp->ring_group_id);
1170 BNXT_HWRM_UNLOCK(softc);
1175 bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
1177 struct hwrm_ring_grp_free_input req = {0};
1180 if (grp->grp_id == (uint16_t)HWRM_NA_SIGNATURE)
1183 if (BNXT_CHIP_P5 (softc))
1186 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
1188 req.ring_group_id = htole32(grp->grp_id);
1190 BNXT_HWRM_LOCK(softc);
1191 rc = _hwrm_send_message(softc, &req, sizeof(req));
1196 BNXT_HWRM_UNLOCK(softc);
1200 int bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint32_t ring_type,
1201 struct bnxt_ring *ring, int cmpl_ring_id)
1203 struct hwrm_ring_free_input req = {0};
1204 struct hwrm_ring_free_output *resp;
1206 uint16_t error_code;
1208 if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE)
1211 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1212 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
1213 req.cmpl_ring = htole16(cmpl_ring_id);
1214 req.ring_type = ring_type;
1215 req.ring_id = htole16(ring->phys_id);
1217 BNXT_HWRM_LOCK(softc);
1218 rc = _hwrm_send_message(softc, &req, sizeof(req));
1219 error_code = le16toh(resp->error_code);
1221 if (rc || error_code) {
1222 device_printf(softc->dev, "hwrm_ring_free type %d failed. "
1223 "rc:%x err:%x\n", ring_type, rc, error_code);
1228 BNXT_HWRM_UNLOCK(softc);
1233 * Ring allocation message to the firmware
1236 bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
1237 struct bnxt_ring *ring)
1239 struct hwrm_ring_alloc_input req = {0};
1240 struct hwrm_ring_alloc_output *resp;
1241 uint16_t idx = ring->idx;
1242 struct bnxt_cp_ring *cp_ring;
1245 if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
1246 device_printf(softc->dev,
1247 "Attempt to re-allocate ring %04x\n", ring->phys_id);
1251 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1252 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
1253 req.enables = htole32(0);
1254 req.fbo = htole32(0);
1255 req.ring_type = type;
1256 req.page_tbl_addr = htole64(ring->paddr);
1257 req.logical_id = htole16(ring->id);
1258 req.length = htole32(ring->ring_size);
1261 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1262 cp_ring = &softc->tx_cp_rings[idx];
1264 req.cmpl_ring_id = htole16(cp_ring->ring.phys_id);
1265 /* queue_id - what CoS queue the TX ring is associated with */
1266 req.queue_id = htole16(softc->q_info[0].id);
1268 req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1269 req.enables |= htole32(
1270 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1272 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1273 if (!BNXT_CHIP_P5(softc))
1276 cp_ring = &softc->rx_cp_rings[idx];
1278 req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1279 req.rx_buf_size = htole16(softc->rx_buf_size);
1280 req.enables |= htole32(
1281 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1282 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1284 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1285 if (!BNXT_CHIP_P5(softc)) {
1286 req.ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
1290 cp_ring = &softc->rx_cp_rings[idx];
1292 req.rx_ring_id = htole16(softc->rx_rings[idx].phys_id);
1293 req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
1294 req.rx_buf_size = htole16(softc->rx_buf_size);
1295 req.enables |= htole32(
1296 HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1297 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1298 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
1300 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1301 if (!BNXT_CHIP_P5(softc)) {
1302 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1306 req.cq_handle = htole64(ring->id);
1307 req.nq_ring_id = htole16(softc->nq_rings[idx].ring.phys_id);
1308 req.enables |= htole32(
1309 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID);
1311 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1312 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1315 printf("hwrm alloc invalid ring type %d\n", type);
1319 BNXT_HWRM_LOCK(softc);
1320 rc = _hwrm_send_message(softc, &req, sizeof(req));
1324 ring->phys_id = le16toh(resp->ring_id);
1327 BNXT_HWRM_UNLOCK(softc);
1332 bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
1334 struct hwrm_stat_ctx_free_input req = {0};
1337 if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE)
1340 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
1342 req.stat_ctx_id = htole16(cpr->stats_ctx_id);
1343 BNXT_HWRM_LOCK(softc);
1344 rc = _hwrm_send_message(softc, &req, sizeof(req));
1349 BNXT_HWRM_UNLOCK(softc);
1355 bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
1358 struct hwrm_stat_ctx_alloc_input req = {0};
1359 struct hwrm_stat_ctx_alloc_output *resp;
1362 if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
1363 device_printf(softc->dev,
1364 "Attempt to re-allocate stats ctx %08x\n",
1369 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1370 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
1372 req.update_period_ms = htole32(1000);
1373 req.stats_dma_addr = htole64(paddr);
1374 if (BNXT_CHIP_P5(softc))
1375 req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats_ext) - 8);
1377 req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats));
1379 BNXT_HWRM_LOCK(softc);
1380 rc = _hwrm_send_message(softc, &req, sizeof(req));
1384 cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
1387 BNXT_HWRM_UNLOCK(softc);
1393 bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
1395 struct hwrm_port_qstats_input req = {0};
1398 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
1400 req.port_id = htole16(softc->pf.port_id);
1401 req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
1402 req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
1404 BNXT_HWRM_LOCK(softc);
1405 rc = _hwrm_send_message(softc, &req, sizeof(req));
1406 BNXT_HWRM_UNLOCK(softc);
1412 bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
1413 struct bnxt_vnic_info *vnic)
1415 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
1416 uint32_t mask = vnic->rx_mask;
1418 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
1420 req.vnic_id = htole32(vnic->id);
1421 req.mask = htole32(mask);
1422 req.mc_tbl_addr = htole64(vnic->mc_list.idi_paddr);
1423 req.num_mc_entries = htole32(vnic->mc_list_count);
1424 return hwrm_send_message(softc, &req, sizeof(req));
1428 bnxt_hwrm_l2_filter_free(struct bnxt_softc *softc, uint64_t filter_id)
1430 struct hwrm_cfa_l2_filter_free_input req = {0};
1433 if (filter_id == -1)
1436 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
1438 req.l2_filter_id = htole64(filter_id);
1440 BNXT_HWRM_LOCK(softc);
1441 rc = _hwrm_send_message(softc, &req, sizeof(req));
1446 BNXT_HWRM_UNLOCK(softc);
1451 bnxt_hwrm_free_filter(struct bnxt_softc *softc)
1453 struct bnxt_vnic_info *vnic = &softc->vnic_info;
1454 struct bnxt_vlan_tag *tag;
1457 rc = bnxt_hwrm_l2_filter_free(softc, softc->vnic_info.filter_id);
1461 SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
1462 rc = bnxt_hwrm_l2_filter_free(softc, tag->filter_id);
1465 tag->filter_id = -1;
1473 bnxt_hwrm_l2_filter_alloc(struct bnxt_softc *softc, uint16_t vlan_tag,
1474 uint64_t *filter_id)
1476 struct hwrm_cfa_l2_filter_alloc_input req = {0};
1477 struct hwrm_cfa_l2_filter_alloc_output *resp;
1478 struct bnxt_vnic_info *vnic = &softc->vnic_info;
1479 uint32_t enables = 0;
1482 if (*filter_id != -1) {
1483 device_printf(softc->dev, "Attempt to re-allocate l2 ctx "
1484 "filter (fid: 0x%jx)\n", (uintmax_t)*filter_id);
1488 resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
1489 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
1491 req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX);
1492 enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
1493 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
1494 | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
1496 if (vlan_tag != 0xffff) {
1498 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
1499 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK |
1500 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_NUM_VLANS;
1501 req.l2_ivlan_mask = 0xffff;
1502 req.l2_ivlan = vlan_tag;
1506 req.enables = htole32(enables);
1507 req.dst_id = htole16(vnic->id);
1508 memcpy(req.l2_addr, if_getlladdr(iflib_get_ifp(softc->ctx)),
1510 memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
1512 BNXT_HWRM_LOCK(softc);
1513 rc = _hwrm_send_message(softc, &req, sizeof(req));
1517 *filter_id = le64toh(resp->l2_filter_id);
1519 BNXT_HWRM_UNLOCK(softc);
1524 bnxt_hwrm_set_filter(struct bnxt_softc *softc)
1526 struct bnxt_vnic_info *vnic = &softc->vnic_info;
1527 struct bnxt_vlan_tag *tag;
1530 rc = bnxt_hwrm_l2_filter_alloc(softc, 0xffff, &vnic->filter_id);
1534 SLIST_FOREACH(tag, &vnic->vlan_tags, next) {
1535 rc = bnxt_hwrm_l2_filter_alloc(softc, tag->tag,
1546 bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
1549 struct hwrm_vnic_rss_cfg_input req = {0};
1551 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
1553 req.hash_type = htole32(hash_type);
1554 req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
1555 req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
1556 req.rss_ctx_idx = htole16(vnic->rss_id);
1557 req.hash_mode_flags = HWRM_FUNC_SPD_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
1558 if (BNXT_CHIP_P5(softc)) {
1559 req.vnic_id = htole16(vnic->id);
1560 req.ring_table_pair_index = 0x0;
1563 return hwrm_send_message(softc, &req, sizeof(req));
1567 bnxt_hwrm_reserve_pf_rings(struct bnxt_softc *softc)
1569 struct hwrm_func_cfg_input req = {0};
1571 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
1573 req.fid = htole16(0xffff);
1574 req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS);
1575 req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS);
1576 req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS);
1577 req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS);
1578 req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS);
1579 req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX);
1580 req.enables |= htole32(HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS);
1581 req.num_msix = htole16(BNXT_MAX_NUM_QUEUES);
1582 req.num_rsscos_ctxs = htole16(0x8);
1583 req.num_cmpl_rings = htole16(BNXT_MAX_NUM_QUEUES * 2);
1584 req.num_tx_rings = htole16(BNXT_MAX_NUM_QUEUES);
1585 req.num_rx_rings = htole16(BNXT_MAX_NUM_QUEUES);
1586 req.num_vnics = htole16(BNXT_MAX_NUM_QUEUES);
1587 req.num_stat_ctxs = htole16(BNXT_MAX_NUM_QUEUES * 2);
1589 return hwrm_send_message(softc, &req, sizeof(req));
1593 bnxt_cfg_async_cr(struct bnxt_softc *softc)
1596 struct hwrm_func_cfg_input req = {0};
1598 if (!BNXT_PF(softc))
1601 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
1603 req.fid = htole16(0xffff);
1604 req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
1605 if (BNXT_CHIP_P5(softc))
1606 req.async_event_cr = htole16(softc->nq_rings[0].ring.phys_id);
1608 req.async_event_cr = htole16(softc->def_cp_ring.ring.phys_id);
1610 rc = hwrm_send_message(softc, &req, sizeof(req));
1616 bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
1618 softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
1620 softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
1622 softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
1623 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
1625 softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
1626 HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1628 softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
1632 bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
1634 struct hwrm_vnic_tpa_cfg_input req = {0};
1637 if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
1641 if (!(softc->flags & BNXT_FLAG_TPA))
1644 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
1646 if (softc->hw_lro.enable) {
1647 flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1648 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1649 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1650 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
1652 if (softc->hw_lro.is_mode_gro)
1653 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
1655 flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
1657 req.flags = htole32(flags);
1659 req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1660 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1661 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1663 req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
1664 req.max_aggs = htole16(softc->hw_lro.max_aggs);
1665 req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
1668 req.vnic_id = htole16(softc->vnic_info.id);
1670 return hwrm_send_message(softc, &req, sizeof(req));
1674 bnxt_hwrm_nvm_find_dir_entry(struct bnxt_softc *softc, uint16_t type,
1675 uint16_t *ordinal, uint16_t ext, uint16_t *index, bool use_index,
1676 uint8_t search_opt, uint32_t *data_length, uint32_t *item_length,
1679 struct hwrm_nvm_find_dir_entry_input req = {0};
1680 struct hwrm_nvm_find_dir_entry_output *resp =
1681 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1687 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_FIND_DIR_ENTRY);
1689 req.enables = htole32(
1690 HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID);
1691 req.dir_idx = htole16(*index);
1693 req.dir_type = htole16(type);
1694 req.dir_ordinal = htole16(*ordinal);
1695 req.dir_ext = htole16(ext);
1696 req.opt_ordinal = search_opt;
1698 BNXT_HWRM_LOCK(softc);
1699 old_timeo = softc->hwrm_cmd_timeo;
1700 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1701 rc = _hwrm_send_message(softc, &req, sizeof(req));
1702 softc->hwrm_cmd_timeo = old_timeo;
1707 *item_length = le32toh(resp->dir_item_length);
1709 *data_length = le32toh(resp->dir_data_length);
1711 *fw_ver = le32toh(resp->fw_ver);
1712 *ordinal = le16toh(resp->dir_ordinal);
1714 *index = le16toh(resp->dir_idx);
1717 BNXT_HWRM_UNLOCK(softc);
1722 bnxt_hwrm_nvm_read(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
1723 uint32_t length, struct iflib_dma_info *data)
1725 struct hwrm_nvm_read_input req = {0};
1729 if (length > data->idi_size) {
1733 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_READ);
1734 req.host_dest_addr = htole64(data->idi_paddr);
1735 req.dir_idx = htole16(index);
1736 req.offset = htole32(offset);
1737 req.len = htole32(length);
1738 BNXT_HWRM_LOCK(softc);
1739 old_timeo = softc->hwrm_cmd_timeo;
1740 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1741 rc = _hwrm_send_message(softc, &req, sizeof(req));
1742 softc->hwrm_cmd_timeo = old_timeo;
1743 BNXT_HWRM_UNLOCK(softc);
1746 bus_dmamap_sync(data->idi_tag, data->idi_map, BUS_DMASYNC_POSTREAD);
1755 bnxt_hwrm_nvm_modify(struct bnxt_softc *softc, uint16_t index, uint32_t offset,
1756 void *data, bool cpyin, uint32_t length)
1758 struct hwrm_nvm_modify_input req = {0};
1759 struct iflib_dma_info dma_data;
1763 if (length == 0 || !data)
1765 rc = iflib_dma_alloc(softc->ctx, length, &dma_data,
1770 rc = copyin(data, dma_data.idi_vaddr, length);
1775 memcpy(dma_data.idi_vaddr, data, length);
1776 bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
1777 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1779 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_MODIFY);
1780 req.host_src_addr = htole64(dma_data.idi_paddr);
1781 req.dir_idx = htole16(index);
1782 req.offset = htole32(offset);
1783 req.len = htole32(length);
1784 BNXT_HWRM_LOCK(softc);
1785 old_timeo = softc->hwrm_cmd_timeo;
1786 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1787 rc = _hwrm_send_message(softc, &req, sizeof(req));
1788 softc->hwrm_cmd_timeo = old_timeo;
1789 BNXT_HWRM_UNLOCK(softc);
1792 iflib_dma_free(&dma_data);
1797 bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
1800 struct hwrm_fw_reset_input req = {0};
1801 struct hwrm_fw_reset_output *resp =
1802 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1807 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
1808 req.embedded_proc_type = processor;
1809 req.selfrst_status = *selfreset;
1811 BNXT_HWRM_LOCK(softc);
1812 rc = _hwrm_send_message(softc, &req, sizeof(req));
1815 *selfreset = resp->selfrst_status;
1818 BNXT_HWRM_UNLOCK(softc);
1823 bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
1825 struct hwrm_fw_qstatus_input req = {0};
1826 struct hwrm_fw_qstatus_output *resp =
1827 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1832 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
1833 req.embedded_proc_type = type;
1835 BNXT_HWRM_LOCK(softc);
1836 rc = _hwrm_send_message(softc, &req, sizeof(req));
1839 *selfreset = resp->selfrst_status;
1842 BNXT_HWRM_UNLOCK(softc);
1847 bnxt_hwrm_nvm_write(struct bnxt_softc *softc, void *data, bool cpyin,
1848 uint16_t type, uint16_t ordinal, uint16_t ext, uint16_t attr,
1849 uint16_t option, uint32_t data_length, bool keep, uint32_t *item_length,
1852 struct hwrm_nvm_write_input req = {0};
1853 struct hwrm_nvm_write_output *resp =
1854 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1855 struct iflib_dma_info dma_data;
1860 rc = iflib_dma_alloc(softc->ctx, data_length, &dma_data,
1865 rc = copyin(data, dma_data.idi_vaddr, data_length);
1870 memcpy(dma_data.idi_vaddr, data, data_length);
1871 bus_dmamap_sync(dma_data.idi_tag, dma_data.idi_map,
1872 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1875 dma_data.idi_paddr = 0;
1877 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_WRITE);
1879 req.host_src_addr = htole64(dma_data.idi_paddr);
1880 req.dir_type = htole16(type);
1881 req.dir_ordinal = htole16(ordinal);
1882 req.dir_ext = htole16(ext);
1883 req.dir_attr = htole16(attr);
1884 req.dir_data_length = htole32(data_length);
1885 req.option = htole16(option);
1888 htole16(HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG);
1891 req.dir_item_length = htole32(*item_length);
1893 BNXT_HWRM_LOCK(softc);
1894 old_timeo = softc->hwrm_cmd_timeo;
1895 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1896 rc = _hwrm_send_message(softc, &req, sizeof(req));
1897 softc->hwrm_cmd_timeo = old_timeo;
1901 *item_length = le32toh(resp->dir_item_length);
1903 *index = le16toh(resp->dir_idx);
1906 BNXT_HWRM_UNLOCK(softc);
1909 iflib_dma_free(&dma_data);
1914 bnxt_hwrm_nvm_erase_dir_entry(struct bnxt_softc *softc, uint16_t index)
1916 struct hwrm_nvm_erase_dir_entry_input req = {0};
1920 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_ERASE_DIR_ENTRY);
1921 req.dir_idx = htole16(index);
1922 BNXT_HWRM_LOCK(softc);
1923 old_timeo = softc->hwrm_cmd_timeo;
1924 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1925 rc = _hwrm_send_message(softc, &req, sizeof(req));
1926 softc->hwrm_cmd_timeo = old_timeo;
1927 BNXT_HWRM_UNLOCK(softc);
1932 bnxt_hwrm_nvm_get_dir_info(struct bnxt_softc *softc, uint32_t *entries,
1933 uint32_t *entry_length)
1935 struct hwrm_nvm_get_dir_info_input req = {0};
1936 struct hwrm_nvm_get_dir_info_output *resp =
1937 (void *)softc->hwrm_cmd_resp.idi_vaddr;
1941 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_INFO);
1943 BNXT_HWRM_LOCK(softc);
1944 old_timeo = softc->hwrm_cmd_timeo;
1945 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1946 rc = _hwrm_send_message(softc, &req, sizeof(req));
1947 softc->hwrm_cmd_timeo = old_timeo;
1952 *entries = le32toh(resp->entries);
1954 *entry_length = le32toh(resp->entry_length);
1957 BNXT_HWRM_UNLOCK(softc);
1962 bnxt_hwrm_nvm_get_dir_entries(struct bnxt_softc *softc, uint32_t *entries,
1963 uint32_t *entry_length, struct iflib_dma_info *dma_data)
1965 struct hwrm_nvm_get_dir_entries_input req = {0};
1974 entry_length = &ent_len;
1976 rc = bnxt_hwrm_nvm_get_dir_info(softc, entries, entry_length);
1979 if (*entries * *entry_length > dma_data->idi_size) {
1985 * TODO: There's a race condition here that could blow up DMA memory...
1986 * we need to allocate the max size, not the currently in use
1987 * size. The command should totally have a max size here.
1989 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DIR_ENTRIES);
1990 req.host_dest_addr = htole64(dma_data->idi_paddr);
1991 BNXT_HWRM_LOCK(softc);
1992 old_timeo = softc->hwrm_cmd_timeo;
1993 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
1994 rc = _hwrm_send_message(softc, &req, sizeof(req));
1995 softc->hwrm_cmd_timeo = old_timeo;
1996 BNXT_HWRM_UNLOCK(softc);
1999 bus_dmamap_sync(dma_data->idi_tag, dma_data->idi_map,
2000 BUS_DMASYNC_POSTWRITE);
2007 bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
2008 uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
2009 uint32_t *reserved_size, uint32_t *available_size)
2011 struct hwrm_nvm_get_dev_info_input req = {0};
2012 struct hwrm_nvm_get_dev_info_output *resp =
2013 (void *)softc->hwrm_cmd_resp.idi_vaddr;
2017 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
2019 BNXT_HWRM_LOCK(softc);
2020 old_timeo = softc->hwrm_cmd_timeo;
2021 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2022 rc = _hwrm_send_message(softc, &req, sizeof(req));
2023 softc->hwrm_cmd_timeo = old_timeo;
2028 *mfg_id = le16toh(resp->manufacturer_id);
2030 *device_id = le16toh(resp->device_id);
2032 *sector_size = le32toh(resp->sector_size);
2034 *nvram_size = le32toh(resp->nvram_size);
2036 *reserved_size = le32toh(resp->reserved_size);
2038 *available_size = le32toh(resp->available_size);
2041 BNXT_HWRM_UNLOCK(softc);
2046 bnxt_hwrm_nvm_install_update(struct bnxt_softc *softc,
2047 uint32_t install_type, uint64_t *installed_items, uint8_t *result,
2048 uint8_t *problem_item, uint8_t *reset_required)
2050 struct hwrm_nvm_install_update_input req = {0};
2051 struct hwrm_nvm_install_update_output *resp =
2052 (void *)softc->hwrm_cmd_resp.idi_vaddr;
2056 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_INSTALL_UPDATE);
2057 req.install_type = htole32(install_type);
2059 BNXT_HWRM_LOCK(softc);
2060 old_timeo = softc->hwrm_cmd_timeo;
2061 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2062 rc = _hwrm_send_message(softc, &req, sizeof(req));
2063 softc->hwrm_cmd_timeo = old_timeo;
2067 if (installed_items)
2068 *installed_items = le32toh(resp->installed_items);
2070 *result = resp->result;
2072 *problem_item = resp->problem_item;
2074 *reset_required = resp->reset_required;
2077 BNXT_HWRM_UNLOCK(softc);
2082 bnxt_hwrm_nvm_verify_update(struct bnxt_softc *softc, uint16_t type,
2083 uint16_t ordinal, uint16_t ext)
2085 struct hwrm_nvm_verify_update_input req = {0};
2089 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_VERIFY_UPDATE);
2091 req.dir_type = htole16(type);
2092 req.dir_ordinal = htole16(ordinal);
2093 req.dir_ext = htole16(ext);
2095 BNXT_HWRM_LOCK(softc);
2096 old_timeo = softc->hwrm_cmd_timeo;
2097 softc->hwrm_cmd_timeo = BNXT_NVM_TIMEO;
2098 rc = _hwrm_send_message(softc, &req, sizeof(req));
2099 softc->hwrm_cmd_timeo = old_timeo;
2100 BNXT_HWRM_UNLOCK(softc);
2105 bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
2106 uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
2107 uint16_t *millisecond, uint16_t *zone)
2109 struct hwrm_fw_get_time_input req = {0};
2110 struct hwrm_fw_get_time_output *resp =
2111 (void *)softc->hwrm_cmd_resp.idi_vaddr;
2114 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
2116 BNXT_HWRM_LOCK(softc);
2117 rc = _hwrm_send_message(softc, &req, sizeof(req));
2122 *year = le16toh(resp->year);
2124 *month = resp->month;
2130 *minute = resp->minute;
2132 *second = resp->second;
2134 *millisecond = le16toh(resp->millisecond);
2136 *zone = le16toh(resp->zone);
2139 BNXT_HWRM_UNLOCK(softc);
2144 bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
2145 uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
2146 uint16_t millisecond, uint16_t zone)
2148 struct hwrm_fw_set_time_input req = {0};
2150 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
2152 req.year = htole16(year);
2156 req.minute = minute;
2157 req.second = second;
2158 req.millisecond = htole16(millisecond);
2159 req.zone = htole16(zone);
2160 return hwrm_send_message(softc, &req, sizeof(req));
2164 bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc)
2166 struct bnxt_link_info *link_info = &softc->link_info;
2167 struct hwrm_port_phy_qcfg_input req = {0};
2168 struct hwrm_port_phy_qcfg_output *resp =
2169 (void *)softc->hwrm_cmd_resp.idi_vaddr;
2172 BNXT_HWRM_LOCK(softc);
2173 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
2175 rc = _hwrm_send_message(softc, &req, sizeof(req));
2179 link_info->phy_link_status = resp->link;
2180 link_info->duplex = resp->duplex_cfg;
2181 link_info->auto_mode = resp->auto_mode;
2184 * When AUTO_PAUSE_AUTONEG_PAUSE bit is set to 1,
2185 * the advertisement of pause is enabled.
2186 * 1. When the auto_mode is not set to none and this flag is set to 1,
2187 * then the auto_pause bits on this port are being advertised and
2188 * autoneg pause results are being interpreted.
2189 * 2. When the auto_mode is not set to none and this flag is set to 0,
2190 * the pause is forced as indicated in force_pause, and also
2191 * advertised as auto_pause bits, but the autoneg results are not
2192 * interpreted since the pause configuration is being forced.
2193 * 3. When the auto_mode is set to none and this flag is set to 1,
2194 * auto_pause bits should be ignored and should be set to 0.
2197 link_info->flow_ctrl.autoneg = false;
2198 link_info->flow_ctrl.tx = false;
2199 link_info->flow_ctrl.rx = false;
2201 if ((resp->auto_mode) &&
2202 (resp->auto_pause & BNXT_AUTO_PAUSE_AUTONEG_PAUSE)) {
2203 link_info->flow_ctrl.autoneg = true;
2206 if (link_info->flow_ctrl.autoneg) {
2207 if (resp->auto_pause & BNXT_PAUSE_TX)
2208 link_info->flow_ctrl.tx = true;
2209 if (resp->auto_pause & BNXT_PAUSE_RX)
2210 link_info->flow_ctrl.rx = true;
2212 if (resp->force_pause & BNXT_PAUSE_TX)
2213 link_info->flow_ctrl.tx = true;
2214 if (resp->force_pause & BNXT_PAUSE_RX)
2215 link_info->flow_ctrl.rx = true;
2218 link_info->duplex_setting = resp->duplex_cfg;
2219 if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK)
2220 link_info->link_speed = le16toh(resp->link_speed);
2222 link_info->link_speed = 0;
2223 link_info->force_link_speed = le16toh(resp->force_link_speed);
2224 link_info->auto_link_speed = le16toh(resp->auto_link_speed);
2225 link_info->support_speeds = le16toh(resp->support_speeds);
2226 link_info->auto_link_speeds = le16toh(resp->auto_link_speed_mask);
2227 link_info->preemphasis = le32toh(resp->preemphasis);
2228 link_info->phy_ver[0] = resp->phy_maj;
2229 link_info->phy_ver[1] = resp->phy_min;
2230 link_info->phy_ver[2] = resp->phy_bld;
2231 snprintf(softc->ver_info->phy_ver, sizeof(softc->ver_info->phy_ver),
2232 "%d.%d.%d", link_info->phy_ver[0], link_info->phy_ver[1],
2233 link_info->phy_ver[2]);
2234 strlcpy(softc->ver_info->phy_vendor, resp->phy_vendor_name,
2236 strlcpy(softc->ver_info->phy_partnumber, resp->phy_vendor_partnumber,
2238 link_info->media_type = resp->media_type;
2239 link_info->phy_type = resp->phy_type;
2240 link_info->transceiver = resp->xcvr_pkg_type;
2241 link_info->phy_addr = resp->eee_config_phy_addr &
2242 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK;
2245 BNXT_HWRM_UNLOCK(softc);
2250 bnxt_hwrm_get_wol_fltrs(struct bnxt_softc *softc, uint16_t handle)
2252 struct hwrm_wol_filter_qcfg_input req = {0};
2253 struct hwrm_wol_filter_qcfg_output *resp =
2254 (void *)softc->hwrm_cmd_resp.idi_vaddr;
2255 uint16_t next_handle = 0;
2258 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_QCFG);
2259 req.port_id = htole16(softc->pf.port_id);
2260 req.handle = htole16(handle);
2261 rc = hwrm_send_message(softc, &req, sizeof(req));
2263 next_handle = le16toh(resp->next_handle);
2264 if (next_handle != 0) {
2265 if (resp->wol_type ==
2266 HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT) {
2268 softc->wol_filter_id = resp->wol_filter_id;
2276 bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc *softc)
2278 struct hwrm_wol_filter_alloc_input req = {0};
2279 struct hwrm_wol_filter_alloc_output *resp =
2280 (void *)softc->hwrm_cmd_resp.idi_vaddr;
2283 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_ALLOC);
2284 req.port_id = htole16(softc->pf.port_id);
2285 req.wol_type = HWRM_WOL_FILTER_ALLOC_INPUT_WOL_TYPE_MAGICPKT;
2287 htole32(HWRM_WOL_FILTER_ALLOC_INPUT_ENABLES_MAC_ADDRESS);
2288 memcpy(req.mac_address, softc->func.mac_addr, ETHER_ADDR_LEN);
2289 rc = hwrm_send_message(softc, &req, sizeof(req));
2291 softc->wol_filter_id = resp->wol_filter_id;
2297 bnxt_hwrm_free_wol_fltr(struct bnxt_softc *softc)
2299 struct hwrm_wol_filter_free_input req = {0};
2301 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_WOL_FILTER_FREE);
2302 req.port_id = htole16(softc->pf.port_id);
2304 htole32(HWRM_WOL_FILTER_FREE_INPUT_ENABLES_WOL_FILTER_ID);
2305 req.wol_filter_id = softc->wol_filter_id;
2306 return hwrm_send_message(softc, &req, sizeof(req));
2309 static void bnxt_hwrm_set_coal_params(struct bnxt_softc *softc, uint32_t max_frames,
2310 uint32_t buf_tmrs, uint16_t flags,
2311 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
2313 req->flags = htole16(flags);
2314 req->num_cmpl_dma_aggr = htole16((uint16_t)max_frames);
2315 req->num_cmpl_dma_aggr_during_int = htole16(max_frames >> 16);
2316 req->cmpl_aggr_dma_tmr = htole16((uint16_t)buf_tmrs);
2317 req->cmpl_aggr_dma_tmr_during_int = htole16(buf_tmrs >> 16);
2318 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
2319 req->int_lat_tmr_min = htole16((uint16_t)buf_tmrs * 2);
2320 req->int_lat_tmr_max = htole16((uint16_t)buf_tmrs * 4);
2321 req->num_cmpl_aggr_int = htole16((uint16_t)max_frames * 4);
2324 int bnxt_hwrm_set_coal(struct bnxt_softc *softc)
2327 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
2329 uint16_t max_buf, max_buf_irq;
2330 uint16_t buf_tmr, buf_tmr_irq;
2333 bnxt_hwrm_cmd_hdr_init(softc, &req_rx,
2334 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
2335 bnxt_hwrm_cmd_hdr_init(softc, &req_tx,
2336 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
2338 /* Each rx completion (2 records) should be DMAed immediately.
2339 * DMA 1/4 of the completion buffers at a time.
2341 max_buf = min_t(uint16_t, softc->rx_coal_frames / 4, 2);
2342 /* max_buf must not be zero */
2343 max_buf = clamp_t(uint16_t, max_buf, 1, 63);
2344 max_buf_irq = clamp_t(uint16_t, softc->rx_coal_frames_irq, 1, 63);
2345 buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs);
2346 /* buf timer set to 1/4 of interrupt timer */
2347 buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
2348 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs_irq);
2349 buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
2351 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
2353 /* RING_IDLE generates more IRQs for lower latency. Enable it only
2354 * if coal_usecs is less than 25 us.
2356 if (softc->rx_coal_usecs < 25)
2357 flags |= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
2359 bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
2360 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
2362 /* max_buf must not be zero */
2363 max_buf = clamp_t(uint16_t, softc->tx_coal_frames, 1, 63);
2364 max_buf_irq = clamp_t(uint16_t, softc->tx_coal_frames_irq, 1, 63);
2365 buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs);
2366 /* buf timer set to 1/4 of interrupt timer */
2367 buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
2368 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs_irq);
2369 buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
2370 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
2371 bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
2372 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
2374 for (i = 0; i < softc->nrxqsets; i++) {
2379 * Check if Tx also needs to be done
2380 * So far, Tx processing has been done in softirq contest
2384 req->ring_id = htole16(softc->grp_info[i].cp_ring_id);
2386 rc = hwrm_send_message(softc, req, sizeof(*req));
2393 int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc, unsigned long *bmap,
2396 struct hwrm_func_drv_rgtr_input req = {0};
2397 bitstr_t *async_events_bmap;
2401 #define BNXT_MAX_NUM_ASYNC_EVENTS 256
2402 async_events_bmap = bit_alloc(BNXT_MAX_NUM_ASYNC_EVENTS, M_DEVBUF,
2404 events = (uint32_t *)async_events_bmap;
2406 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
2409 htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
2411 memset(async_events_bmap, 0, sizeof(BNXT_MAX_NUM_ASYNC_EVENTS / 8));
2413 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
2414 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
2415 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED);
2416 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE);
2417 bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
2419 if (bmap && bmap_size) {
2420 for (i = 0; i < bmap_size; i++) {
2421 if (bit_test(bmap, i))
2422 bit_set(async_events_bmap, i);
2426 for (i = 0; i < 8; i++)
2427 req.async_event_fwd[i] |= htole32(events[i]);
2429 free(async_events_bmap, M_DEVBUF);
2431 return hwrm_send_message(softc, &req, sizeof(req));
2434 void bnxt_hwrm_ring_info_get(struct bnxt_softc *softc, uint8_t ring_type,
2435 uint32_t ring_id, uint32_t *prod, uint32_t *cons)
2437 hwrm_dbg_ring_info_get_input_t req = {0};
2438 hwrm_dbg_ring_info_get_output_t *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
2441 *prod = *cons = 0xffffffff;
2442 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_DBG_RING_INFO_GET);
2443 req.ring_type = le32toh(ring_type);
2444 req.fw_ring_id = le32toh(ring_id);
2445 rc = hwrm_send_message(softc, &req, sizeof(req));
2447 *prod = resp->producer_index;
2448 *cons = resp->consumer_index;