2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
36 #if EFSYS_OPT_MON_MCDI
40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
42 #include "ef10_tlv_layout.h"
44 __checkReturn efx_rc_t
45 efx_mcdi_get_port_assignment(
47 __out uint32_t *portp)
50 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
51 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
54 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
55 enp->en_family == EFX_FAMILY_MEDFORD ||
56 enp->en_family == EFX_FAMILY_MEDFORD2);
58 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
59 req.emr_in_buf = payload;
60 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
61 req.emr_out_buf = payload;
62 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
64 efx_mcdi_execute(enp, &req);
66 if (req.emr_rc != 0) {
71 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
76 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
83 EFSYS_PROBE1(fail1, efx_rc_t, rc);
88 __checkReturn efx_rc_t
89 efx_mcdi_get_port_modes(
91 __out uint32_t *modesp,
92 __out_opt uint32_t *current_modep,
93 __out_opt uint32_t *default_modep)
96 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN,
97 MC_CMD_GET_PORT_MODES_OUT_LEN);
100 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
101 enp->en_family == EFX_FAMILY_MEDFORD ||
102 enp->en_family == EFX_FAMILY_MEDFORD2);
104 req.emr_cmd = MC_CMD_GET_PORT_MODES;
105 req.emr_in_buf = payload;
106 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
107 req.emr_out_buf = payload;
108 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
110 efx_mcdi_execute(enp, &req);
112 if (req.emr_rc != 0) {
118 * Require only Modes and DefaultMode fields, unless the current mode
119 * was requested (CurrentMode field was added for Medford).
121 if (req.emr_out_length_used <
122 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
126 if ((current_modep != NULL) && (req.emr_out_length_used <
127 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
132 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
134 if (current_modep != NULL) {
135 *current_modep = MCDI_OUT_DWORD(req,
136 GET_PORT_MODES_OUT_CURRENT_MODE);
139 if (default_modep != NULL) {
140 *default_modep = MCDI_OUT_DWORD(req,
141 GET_PORT_MODES_OUT_DEFAULT_MODE);
151 EFSYS_PROBE1(fail1, efx_rc_t, rc);
156 __checkReturn efx_rc_t
157 ef10_nic_get_port_mode_bandwidth(
159 __out uint32_t *bandwidth_mbpsp)
162 uint32_t current_mode;
163 efx_port_t *epp = &(enp->en_port);
165 uint32_t single_lane;
171 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
172 ¤t_mode, NULL)) != 0) {
173 /* No port mode info available. */
177 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_25000FDX))
182 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_50000FDX))
187 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_100000FDX))
192 switch (current_mode) {
193 case TLV_PORT_MODE_1x1_NA: /* mode 0 */
194 bandwidth = single_lane;
196 case TLV_PORT_MODE_1x2_NA: /* mode 10 */
197 case TLV_PORT_MODE_NA_1x2: /* mode 11 */
198 bandwidth = dual_lane;
200 case TLV_PORT_MODE_1x1_1x1: /* mode 2 */
201 bandwidth = single_lane + single_lane;
203 case TLV_PORT_MODE_4x1_NA: /* mode 4 */
204 case TLV_PORT_MODE_NA_4x1: /* mode 8 */
205 bandwidth = 4 * single_lane;
207 case TLV_PORT_MODE_2x1_2x1: /* mode 5 */
208 bandwidth = (2 * single_lane) + (2 * single_lane);
210 case TLV_PORT_MODE_1x2_1x2: /* mode 12 */
211 bandwidth = dual_lane + dual_lane;
213 case TLV_PORT_MODE_1x2_2x1: /* mode 17 */
214 case TLV_PORT_MODE_2x1_1x2: /* mode 18 */
215 bandwidth = dual_lane + (2 * single_lane);
217 /* Legacy Medford-only mode. Do not use (see bug63270) */
218 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2: /* mode 9 */
219 bandwidth = 4 * single_lane;
221 case TLV_PORT_MODE_1x4_NA: /* mode 1 */
222 case TLV_PORT_MODE_NA_1x4: /* mode 22 */
223 bandwidth = quad_lane;
225 case TLV_PORT_MODE_2x2_NA: /* mode 13 */
226 case TLV_PORT_MODE_NA_2x2: /* mode 14 */
227 bandwidth = 2 * dual_lane;
229 case TLV_PORT_MODE_1x4_2x1: /* mode 6 */
230 case TLV_PORT_MODE_2x1_1x4: /* mode 7 */
231 bandwidth = quad_lane + (2 * single_lane);
233 case TLV_PORT_MODE_1x4_1x2: /* mode 15 */
234 case TLV_PORT_MODE_1x2_1x4: /* mode 16 */
235 bandwidth = quad_lane + dual_lane;
237 case TLV_PORT_MODE_1x4_1x4: /* mode 3 */
238 bandwidth = quad_lane + quad_lane;
245 *bandwidth_mbpsp = bandwidth;
252 EFSYS_PROBE1(fail1, efx_rc_t, rc);
257 static __checkReturn efx_rc_t
258 efx_mcdi_vadaptor_alloc(
260 __in uint32_t port_id)
263 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN,
264 MC_CMD_VADAPTOR_ALLOC_OUT_LEN);
267 EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
269 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
270 req.emr_in_buf = payload;
271 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
272 req.emr_out_buf = payload;
273 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
275 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
276 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
277 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
278 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
280 efx_mcdi_execute(enp, &req);
282 if (req.emr_rc != 0) {
290 EFSYS_PROBE1(fail1, efx_rc_t, rc);
295 static __checkReturn efx_rc_t
296 efx_mcdi_vadaptor_free(
298 __in uint32_t port_id)
301 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN,
302 MC_CMD_VADAPTOR_FREE_OUT_LEN);
305 req.emr_cmd = MC_CMD_VADAPTOR_FREE;
306 req.emr_in_buf = payload;
307 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
308 req.emr_out_buf = payload;
309 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
311 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
313 efx_mcdi_execute(enp, &req);
315 if (req.emr_rc != 0) {
323 EFSYS_PROBE1(fail1, efx_rc_t, rc);
328 __checkReturn efx_rc_t
329 efx_mcdi_get_mac_address_pf(
331 __out_ecount_opt(6) uint8_t mac_addrp[6])
334 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
335 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
338 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
339 enp->en_family == EFX_FAMILY_MEDFORD ||
340 enp->en_family == EFX_FAMILY_MEDFORD2);
342 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
343 req.emr_in_buf = payload;
344 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
345 req.emr_out_buf = payload;
346 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
348 efx_mcdi_execute(enp, &req);
350 if (req.emr_rc != 0) {
355 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
360 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
365 if (mac_addrp != NULL) {
368 addrp = MCDI_OUT2(req, uint8_t,
369 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
371 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
381 EFSYS_PROBE1(fail1, efx_rc_t, rc);
386 __checkReturn efx_rc_t
387 efx_mcdi_get_mac_address_vf(
389 __out_ecount_opt(6) uint8_t mac_addrp[6])
392 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
393 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
396 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
397 enp->en_family == EFX_FAMILY_MEDFORD ||
398 enp->en_family == EFX_FAMILY_MEDFORD2);
400 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
401 req.emr_in_buf = payload;
402 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
403 req.emr_out_buf = payload;
404 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
406 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
407 EVB_PORT_ID_ASSIGNED);
409 efx_mcdi_execute(enp, &req);
411 if (req.emr_rc != 0) {
416 if (req.emr_out_length_used <
417 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
422 if (MCDI_OUT_DWORD(req,
423 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
428 if (mac_addrp != NULL) {
431 addrp = MCDI_OUT2(req, uint8_t,
432 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
434 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
444 EFSYS_PROBE1(fail1, efx_rc_t, rc);
449 __checkReturn efx_rc_t
452 __out uint32_t *sys_freqp,
453 __out uint32_t *dpcpu_freqp)
456 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN,
457 MC_CMD_GET_CLOCK_OUT_LEN);
460 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
461 enp->en_family == EFX_FAMILY_MEDFORD ||
462 enp->en_family == EFX_FAMILY_MEDFORD2);
464 req.emr_cmd = MC_CMD_GET_CLOCK;
465 req.emr_in_buf = payload;
466 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
467 req.emr_out_buf = payload;
468 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
470 efx_mcdi_execute(enp, &req);
472 if (req.emr_rc != 0) {
477 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
482 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
483 if (*sys_freqp == 0) {
487 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
488 if (*dpcpu_freqp == 0) {
502 EFSYS_PROBE1(fail1, efx_rc_t, rc);
507 __checkReturn efx_rc_t
508 efx_mcdi_get_rxdp_config(
510 __out uint32_t *end_paddingp)
513 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN,
514 MC_CMD_GET_RXDP_CONFIG_OUT_LEN);
515 uint32_t end_padding;
518 req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
519 req.emr_in_buf = payload;
520 req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
521 req.emr_out_buf = payload;
522 req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
524 efx_mcdi_execute(enp, &req);
525 if (req.emr_rc != 0) {
530 if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
531 GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
532 /* RX DMA end padding is disabled */
535 switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
536 GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
537 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
540 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
543 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
552 *end_paddingp = end_padding;
559 EFSYS_PROBE1(fail1, efx_rc_t, rc);
564 __checkReturn efx_rc_t
565 efx_mcdi_get_vector_cfg(
567 __out_opt uint32_t *vec_basep,
568 __out_opt uint32_t *pf_nvecp,
569 __out_opt uint32_t *vf_nvecp)
572 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN,
573 MC_CMD_GET_VECTOR_CFG_OUT_LEN);
576 req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
577 req.emr_in_buf = payload;
578 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
579 req.emr_out_buf = payload;
580 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
582 efx_mcdi_execute(enp, &req);
584 if (req.emr_rc != 0) {
589 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
594 if (vec_basep != NULL)
595 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
596 if (pf_nvecp != NULL)
597 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
598 if (vf_nvecp != NULL)
599 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
606 EFSYS_PROBE1(fail1, efx_rc_t, rc);
611 static __checkReturn efx_rc_t
614 __in uint32_t min_vi_count,
615 __in uint32_t max_vi_count,
616 __out uint32_t *vi_basep,
617 __out uint32_t *vi_countp,
618 __out uint32_t *vi_shiftp)
621 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN,
622 MC_CMD_ALLOC_VIS_EXT_OUT_LEN);
625 if (vi_countp == NULL) {
630 req.emr_cmd = MC_CMD_ALLOC_VIS;
631 req.emr_in_buf = payload;
632 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
633 req.emr_out_buf = payload;
634 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
636 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
637 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
639 efx_mcdi_execute(enp, &req);
641 if (req.emr_rc != 0) {
646 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
651 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
652 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
654 /* Report VI_SHIFT if available (always zero for Huntington) */
655 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
658 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
667 EFSYS_PROBE1(fail1, efx_rc_t, rc);
673 static __checkReturn efx_rc_t
680 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
681 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
683 req.emr_cmd = MC_CMD_FREE_VIS;
684 req.emr_in_buf = NULL;
685 req.emr_in_length = 0;
686 req.emr_out_buf = NULL;
687 req.emr_out_length = 0;
689 efx_mcdi_execute_quiet(enp, &req);
691 /* Ignore ELREADY (no allocated VIs, so nothing to free) */
692 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
700 EFSYS_PROBE1(fail1, efx_rc_t, rc);
706 static __checkReturn efx_rc_t
707 efx_mcdi_alloc_piobuf(
709 __out efx_piobuf_handle_t *handlep)
712 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN,
713 MC_CMD_ALLOC_PIOBUF_OUT_LEN);
716 if (handlep == NULL) {
721 req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
722 req.emr_in_buf = payload;
723 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
724 req.emr_out_buf = payload;
725 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
727 efx_mcdi_execute_quiet(enp, &req);
729 if (req.emr_rc != 0) {
734 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
739 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
748 EFSYS_PROBE1(fail1, efx_rc_t, rc);
753 static __checkReturn efx_rc_t
754 efx_mcdi_free_piobuf(
756 __in efx_piobuf_handle_t handle)
759 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN,
760 MC_CMD_FREE_PIOBUF_OUT_LEN);
763 req.emr_cmd = MC_CMD_FREE_PIOBUF;
764 req.emr_in_buf = payload;
765 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
766 req.emr_out_buf = payload;
767 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
769 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
771 efx_mcdi_execute_quiet(enp, &req);
773 if (req.emr_rc != 0) {
781 EFSYS_PROBE1(fail1, efx_rc_t, rc);
786 static __checkReturn efx_rc_t
787 efx_mcdi_link_piobuf(
789 __in uint32_t vi_index,
790 __in efx_piobuf_handle_t handle)
793 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN,
794 MC_CMD_LINK_PIOBUF_OUT_LEN);
797 req.emr_cmd = MC_CMD_LINK_PIOBUF;
798 req.emr_in_buf = payload;
799 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
800 req.emr_out_buf = payload;
801 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
803 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
804 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
806 efx_mcdi_execute(enp, &req);
808 if (req.emr_rc != 0) {
816 EFSYS_PROBE1(fail1, efx_rc_t, rc);
821 static __checkReturn efx_rc_t
822 efx_mcdi_unlink_piobuf(
824 __in uint32_t vi_index)
827 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN,
828 MC_CMD_UNLINK_PIOBUF_OUT_LEN);
831 req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
832 req.emr_in_buf = payload;
833 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
834 req.emr_out_buf = payload;
835 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
837 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
839 efx_mcdi_execute_quiet(enp, &req);
841 if (req.emr_rc != 0) {
849 EFSYS_PROBE1(fail1, efx_rc_t, rc);
855 ef10_nic_alloc_piobufs(
857 __in uint32_t max_piobuf_count)
859 efx_piobuf_handle_t *handlep;
862 EFSYS_ASSERT3U(max_piobuf_count, <=,
863 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
865 enp->en_arch.ef10.ena_piobuf_count = 0;
867 for (i = 0; i < max_piobuf_count; i++) {
868 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
870 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
873 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
874 enp->en_arch.ef10.ena_piobuf_count++;
880 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
881 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
883 (void) efx_mcdi_free_piobuf(enp, *handlep);
884 *handlep = EFX_PIOBUF_HANDLE_INVALID;
886 enp->en_arch.ef10.ena_piobuf_count = 0;
891 ef10_nic_free_piobufs(
894 efx_piobuf_handle_t *handlep;
897 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
898 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
900 (void) efx_mcdi_free_piobuf(enp, *handlep);
901 *handlep = EFX_PIOBUF_HANDLE_INVALID;
903 enp->en_arch.ef10.ena_piobuf_count = 0;
906 /* Sub-allocate a block from a piobuf */
907 __checkReturn efx_rc_t
909 __inout efx_nic_t *enp,
910 __out uint32_t *bufnump,
911 __out efx_piobuf_handle_t *handlep,
912 __out uint32_t *blknump,
913 __out uint32_t *offsetp,
916 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
917 efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
918 uint32_t blk_per_buf;
922 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
923 enp->en_family == EFX_FAMILY_MEDFORD ||
924 enp->en_family == EFX_FAMILY_MEDFORD2);
925 EFSYS_ASSERT(bufnump);
926 EFSYS_ASSERT(handlep);
927 EFSYS_ASSERT(blknump);
928 EFSYS_ASSERT(offsetp);
931 if ((edcp->edc_pio_alloc_size == 0) ||
932 (enp->en_arch.ef10.ena_piobuf_count == 0)) {
936 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
938 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
939 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
944 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
945 for (blk = 0; blk < blk_per_buf; blk++) {
946 if ((*map & (1u << blk)) == 0) {
956 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
959 *sizep = edcp->edc_pio_alloc_size;
960 *offsetp = blk * (*sizep);
967 EFSYS_PROBE1(fail1, efx_rc_t, rc);
972 /* Free a piobuf sub-allocated block */
973 __checkReturn efx_rc_t
975 __inout efx_nic_t *enp,
976 __in uint32_t bufnum,
977 __in uint32_t blknum)
982 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
983 (blknum >= (8 * sizeof (*map)))) {
988 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
989 if ((*map & (1u << blknum)) == 0) {
993 *map &= ~(1u << blknum);
1000 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1005 __checkReturn efx_rc_t
1007 __inout efx_nic_t *enp,
1008 __in uint32_t vi_index,
1009 __in efx_piobuf_handle_t handle)
1011 return (efx_mcdi_link_piobuf(enp, vi_index, handle));
1014 __checkReturn efx_rc_t
1015 ef10_nic_pio_unlink(
1016 __inout efx_nic_t *enp,
1017 __in uint32_t vi_index)
1019 return (efx_mcdi_unlink_piobuf(enp, vi_index));
1022 static __checkReturn efx_rc_t
1023 ef10_mcdi_get_pf_count(
1024 __in efx_nic_t *enp,
1025 __out uint32_t *pf_countp)
1028 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN,
1029 MC_CMD_GET_PF_COUNT_OUT_LEN);
1032 req.emr_cmd = MC_CMD_GET_PF_COUNT;
1033 req.emr_in_buf = payload;
1034 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
1035 req.emr_out_buf = payload;
1036 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
1038 efx_mcdi_execute(enp, &req);
1040 if (req.emr_rc != 0) {
1045 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
1050 *pf_countp = *MCDI_OUT(req, uint8_t,
1051 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
1053 EFSYS_ASSERT(*pf_countp != 0);
1060 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1065 static __checkReturn efx_rc_t
1066 ef10_get_datapath_caps(
1067 __in efx_nic_t *enp)
1069 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1071 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
1072 MC_CMD_GET_CAPABILITIES_V5_OUT_LEN);
1075 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
1079 req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1080 req.emr_in_buf = payload;
1081 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1082 req.emr_out_buf = payload;
1083 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN;
1085 efx_mcdi_execute_quiet(enp, &req);
1087 if (req.emr_rc != 0) {
1092 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1097 #define CAP_FLAGS1(_req, _flag) \
1098 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
1099 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
1101 #define CAP_FLAGS2(_req, _flag) \
1102 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
1103 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
1104 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
1107 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
1108 * We only support the 14 byte prefix here.
1110 if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) {
1114 encp->enc_rx_prefix_size = 14;
1116 #if EFSYS_OPT_RX_SCALE
1117 /* Check if the firmware supports additional RSS modes */
1118 if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
1119 encp->enc_rx_scale_additional_modes_supported = B_TRUE;
1121 encp->enc_rx_scale_additional_modes_supported = B_FALSE;
1122 #endif /* EFSYS_OPT_RX_SCALE */
1124 /* Check if the firmware supports TSO */
1125 if (CAP_FLAGS1(req, TX_TSO))
1126 encp->enc_fw_assisted_tso_enabled = B_TRUE;
1128 encp->enc_fw_assisted_tso_enabled = B_FALSE;
1130 /* Check if the firmware supports FATSOv2 */
1131 if (CAP_FLAGS2(req, TX_TSO_V2)) {
1132 encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
1133 encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
1134 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
1136 encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
1137 encp->enc_fw_assisted_tso_v2_n_contexts = 0;
1140 /* Check if the firmware supports FATSOv2 encap */
1141 if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
1142 encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
1144 encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
1146 /* Check if the firmware has vadapter/vport/vswitch support */
1147 if (CAP_FLAGS1(req, EVB))
1148 encp->enc_datapath_cap_evb = B_TRUE;
1150 encp->enc_datapath_cap_evb = B_FALSE;
1152 /* Check if the firmware supports VLAN insertion */
1153 if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
1154 encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
1156 encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
1158 /* Check if the firmware supports RX event batching */
1159 if (CAP_FLAGS1(req, RX_BATCHING))
1160 encp->enc_rx_batching_enabled = B_TRUE;
1162 encp->enc_rx_batching_enabled = B_FALSE;
1165 * Even if batching isn't reported as supported, we may still get
1168 encp->enc_rx_batch_max = 16;
1170 /* Check if the firmware supports disabling scatter on RXQs */
1171 if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
1172 encp->enc_rx_disable_scatter_supported = B_TRUE;
1174 encp->enc_rx_disable_scatter_supported = B_FALSE;
1176 /* Check if the firmware supports packed stream mode */
1177 if (CAP_FLAGS1(req, RX_PACKED_STREAM))
1178 encp->enc_rx_packed_stream_supported = B_TRUE;
1180 encp->enc_rx_packed_stream_supported = B_FALSE;
1183 * Check if the firmware supports configurable buffer sizes
1184 * for packed stream mode (otherwise buffer size is 1Mbyte)
1186 if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
1187 encp->enc_rx_var_packed_stream_supported = B_TRUE;
1189 encp->enc_rx_var_packed_stream_supported = B_FALSE;
1191 /* Check if the firmware supports equal stride super-buffer mode */
1192 if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
1193 encp->enc_rx_es_super_buffer_supported = B_TRUE;
1195 encp->enc_rx_es_super_buffer_supported = B_FALSE;
1197 /* Check if the firmware supports FW subvariant w/o Tx checksumming */
1198 if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
1199 encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
1201 encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
1203 /* Check if the firmware supports set mac with running filters */
1204 if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
1205 encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
1207 encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
1210 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1211 * specifying which parameters to configure.
1213 if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
1214 encp->enc_enhanced_set_mac_supported = B_TRUE;
1216 encp->enc_enhanced_set_mac_supported = B_FALSE;
1219 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1220 * us to let the firmware choose the settings to use on an EVQ.
1222 if (CAP_FLAGS2(req, INIT_EVQ_V2))
1223 encp->enc_init_evq_v2_supported = B_TRUE;
1225 encp->enc_init_evq_v2_supported = B_FALSE;
1228 * Check if firmware-verified NVRAM updates must be used.
1230 * The firmware trusted installer requires all NVRAM updates to use
1231 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1232 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1233 * partition and report the result).
1235 if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
1236 encp->enc_nvram_update_verify_result_supported = B_TRUE;
1238 encp->enc_nvram_update_verify_result_supported = B_FALSE;
1241 * Check if firmware provides packet memory and Rx datapath
1244 if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
1245 encp->enc_pm_and_rxdp_counters = B_TRUE;
1247 encp->enc_pm_and_rxdp_counters = B_FALSE;
1250 * Check if the 40G MAC hardware is capable of reporting
1251 * statistics for Tx size bins.
1253 if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
1254 encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
1256 encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
1259 * Check if firmware supports VXLAN and NVGRE tunnels.
1260 * The capability indicates Geneve protocol support as well.
1262 if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
1263 encp->enc_tunnel_encapsulations_supported =
1264 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1265 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1266 (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1268 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1269 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1270 encp->enc_tunnel_config_udp_entries_max =
1271 EFX_TUNNEL_MAXNENTRIES;
1273 encp->enc_tunnel_config_udp_entries_max = 0;
1277 * Check if firmware reports the VI window mode.
1278 * Medford2 has a variable VI window size (8K, 16K or 64K).
1279 * Medford and Huntington have a fixed 8K VI window size.
1281 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
1283 MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
1286 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
1287 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1289 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
1290 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
1292 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
1293 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
1296 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1299 } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
1300 (enp->en_family == EFX_FAMILY_MEDFORD)) {
1301 /* Huntington and Medford have fixed 8K window size */
1302 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1304 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1307 /* Check if firmware supports extended MAC stats. */
1308 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
1309 /* Extended stats buffer supported */
1310 encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
1311 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
1313 /* Use Siena-compatible legacy MAC stats */
1314 encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
1317 if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
1318 encp->enc_fec_counters = B_TRUE;
1320 encp->enc_fec_counters = B_FALSE;
1322 /* Check if the firmware provides head-of-line blocking counters */
1323 if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
1324 encp->enc_hlb_counters = B_TRUE;
1326 encp->enc_hlb_counters = B_FALSE;
1328 #if EFSYS_OPT_RX_SCALE
1329 if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
1330 /* Only one exclusive RSS context is available per port. */
1331 encp->enc_rx_scale_max_exclusive_contexts = 1;
1333 switch (enp->en_family) {
1334 case EFX_FAMILY_MEDFORD2:
1335 encp->enc_rx_scale_hash_alg_mask =
1336 (1U << EFX_RX_HASHALG_TOEPLITZ);
1339 case EFX_FAMILY_MEDFORD:
1340 case EFX_FAMILY_HUNTINGTON:
1342 * Packed stream firmware variant maintains a
1343 * non-standard algorithm for hash computation.
1344 * It implies explicit XORing together
1345 * source + destination IP addresses (or last
1346 * four bytes in the case of IPv6) and using the
1347 * resulting value as the input to a Toeplitz hash.
1349 encp->enc_rx_scale_hash_alg_mask =
1350 (1U << EFX_RX_HASHALG_PACKED_STREAM);
1358 /* Port numbers cannot contribute to the hash value */
1359 encp->enc_rx_scale_l4_hash_supported = B_FALSE;
1362 * Maximum number of exclusive RSS contexts.
1363 * EF10 hardware supports 64 in total, but 6 are reserved
1364 * for shared contexts. They are a global resource so
1365 * not all may be available.
1367 encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
1369 encp->enc_rx_scale_hash_alg_mask =
1370 (1U << EFX_RX_HASHALG_TOEPLITZ);
1373 * It is possible to use port numbers as
1374 * the input data for hash computation.
1376 encp->enc_rx_scale_l4_hash_supported = B_TRUE;
1378 #endif /* EFSYS_OPT_RX_SCALE */
1380 /* Check if the firmware supports "FLAG" and "MARK" filter actions */
1381 if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
1382 encp->enc_filter_action_flag_supported = B_TRUE;
1384 encp->enc_filter_action_flag_supported = B_FALSE;
1386 if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
1387 encp->enc_filter_action_mark_supported = B_TRUE;
1389 encp->enc_filter_action_mark_supported = B_FALSE;
1391 /* Get maximum supported value for "MARK" filter action */
1392 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
1393 encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
1394 GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
1396 encp->enc_filter_action_mark_max = 0;
1403 #if EFSYS_OPT_RX_SCALE
1406 #endif /* EFSYS_OPT_RX_SCALE */
1414 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1420 #define EF10_LEGACY_PF_PRIVILEGE_MASK \
1421 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
1422 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
1423 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
1424 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
1425 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
1426 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
1427 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
1428 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
1429 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
1430 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
1431 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1433 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0
1436 __checkReturn efx_rc_t
1437 ef10_get_privilege_mask(
1438 __in efx_nic_t *enp,
1439 __out uint32_t *maskp)
1441 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1445 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1450 /* Fallback for old firmware without privilege mask support */
1451 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1452 /* Assume PF has admin privilege */
1453 mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1455 /* VF is always unprivileged by default */
1456 mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1465 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1471 #define EFX_EXT_PORT_MAX 4
1472 #define EFX_EXT_PORT_NA 0xFF
1475 * Table of mapping schemes from port number to external number.
1477 * Each port number ultimately corresponds to a connector: either as part of
1478 * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
1479 * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
1480 * "Salina"). In general:
1482 * Port number (0-based)
1484 * port mapping (n:1)
1487 * External port number (1-based)
1489 * fixed (1:1) or cable assembly (1:m)
1494 * The external numbering refers to the cages or magjacks on the board,
1495 * as visibly annotated on the board or back panel. This table describes
1496 * how to determine which external cage/magjack corresponds to the port
1497 * numbers used by the driver.
1499 * The count of consecutive port numbers that map to each external number,
1500 * is determined by the chip family and the current port mode.
1502 * For the Huntington family, the current port mode cannot be discovered,
1503 * but a single mapping is used by all modes for a given chip variant,
1504 * so the mapping used is instead the last match in the table to the full
1505 * set of port modes to which the NIC can be configured. Therefore the
1506 * ordering of entries in the mapping table is significant.
1508 static struct ef10_external_port_map_s {
1509 efx_family_t family;
1510 uint32_t modes_mask;
1511 uint8_t base_port[EFX_EXT_PORT_MAX];
1512 } __ef10_external_port_mappings[] = {
1514 * Modes used by Huntington family controllers where each port
1515 * number maps to a separate cage.
1516 * SFN7x22F (Torino):
1526 EFX_FAMILY_HUNTINGTON,
1527 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1528 (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
1529 (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
1533 * Modes which for Huntington identify a chip variant where 2
1534 * adjacent port numbers map to each cage.
1542 EFX_FAMILY_HUNTINGTON,
1543 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1544 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1545 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1546 (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
1547 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1550 * Modes that on Medford allocate each port number to a separate
1559 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1560 (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */
1564 * Modes that on Medford allocate 2 adjacent port numbers to each
1573 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1574 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1575 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
1576 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1577 /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
1578 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
1579 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1582 * Modes that on Medford allocate 4 adjacent port numbers to each
1583 * connector, starting on cage 1.
1591 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 5 */
1592 /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
1593 (1U << TLV_PORT_MODE_4x1_NA), /* mode 4 */
1594 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1597 * Modes that on Medford allocate 4 adjacent port numbers to each
1598 * connector, starting on cage 2.
1606 (1U << TLV_PORT_MODE_NA_4x1), /* mode 8 */
1607 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1610 * Modes that on Medford2 allocate each port number to a separate
1618 EFX_FAMILY_MEDFORD2,
1619 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1620 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1621 (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */
1622 (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */
1623 (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */
1624 (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */
1625 (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */
1629 * Modes that on Medford2 allocate 1 port to cage 1 and the rest
1636 EFX_FAMILY_MEDFORD2,
1637 (1U << TLV_PORT_MODE_1x2_2x1) | /* mode 17 */
1638 (1U << TLV_PORT_MODE_1x4_2x1), /* mode 6 */
1639 { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1642 * Modes that on Medford2 allocate 2 adjacent port numbers to each
1643 * cage, starting on cage 1.
1650 EFX_FAMILY_MEDFORD2,
1651 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1652 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */
1653 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
1654 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1655 (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */
1656 (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */
1657 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1660 * Modes that on Medford2 allocate 2 adjacent port numbers to each
1661 * cage, starting on cage 2.
1666 EFX_FAMILY_MEDFORD2,
1667 (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
1668 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1671 * Modes that on Medford2 allocate 4 adjacent port numbers to each
1672 * connector, starting on cage 1.
1679 EFX_FAMILY_MEDFORD2,
1680 (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */
1681 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1684 * Modes that on Medford2 allocate 4 adjacent port numbers to each
1685 * connector, starting on cage 2.
1692 EFX_FAMILY_MEDFORD2,
1693 (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */
1694 (1U << TLV_PORT_MODE_NA_1x2), /* mode 11 */
1695 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1699 static __checkReturn efx_rc_t
1700 ef10_external_port_mapping(
1701 __in efx_nic_t *enp,
1703 __out uint8_t *external_portp)
1707 uint32_t port_modes;
1710 struct ef10_external_port_map_s *mapp = NULL;
1711 int ext_index = port; /* Default 1-1 mapping */
1713 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t,
1716 * No current port mode information (i.e. Huntington)
1717 * - infer mapping from available modes
1719 if ((rc = efx_mcdi_get_port_modes(enp,
1720 &port_modes, NULL, NULL)) != 0) {
1722 * No port mode information available
1723 * - use default mapping
1728 /* Only need to scan the current mode */
1729 port_modes = 1 << current;
1733 * Infer the internal port -> external number mapping from
1734 * the possible port modes for this NIC.
1736 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1737 struct ef10_external_port_map_s *eepmp =
1738 &__ef10_external_port_mappings[i];
1739 if (eepmp->family != enp->en_family)
1741 matches = (eepmp->modes_mask & port_modes);
1744 * Some modes match. For some Huntington boards
1745 * there will be multiple matches. The mapping on the
1746 * last match is used.
1749 port_modes &= ~matches;
1753 if (port_modes != 0) {
1754 /* Some advertised modes are not supported */
1762 * External ports are assigned a sequence of consecutive
1763 * port numbers, so find the one with the closest base_port.
1765 uint32_t delta = EFX_EXT_PORT_NA;
1767 for (i = 0; i < EFX_EXT_PORT_MAX; i++) {
1768 uint32_t base = mapp->base_port[i];
1769 if ((base != EFX_EXT_PORT_NA) && (base <= port)) {
1770 if ((port - base) < delta) {
1771 delta = (port - base);
1777 *external_portp = (uint8_t)(ext_index + 1);
1782 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1787 static __checkReturn efx_rc_t
1789 __in efx_nic_t *enp)
1791 const efx_nic_ops_t *enop = enp->en_enop;
1792 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1793 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1794 ef10_link_state_t els;
1795 efx_port_t *epp = &(enp->en_port);
1796 uint32_t board_type = 0;
1797 uint32_t base, nvec;
1802 uint8_t mac_addr[6] = { 0 };
1805 /* Get the (zero-based) MCDI port number */
1806 if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
1809 /* EFX MCDI interface uses one-based port numbers */
1810 emip->emi_port = port + 1;
1812 if ((rc = ef10_external_port_mapping(enp, port,
1813 &encp->enc_external_port)) != 0)
1817 * Get PCIe function number from firmware (used for
1818 * per-function privilege and dynamic config info).
1819 * - PCIe PF: pf = PF number, vf = 0xffff.
1820 * - PCIe VF: pf = parent PF, vf = VF number.
1822 if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
1828 /* MAC address for this function */
1829 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1830 rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
1831 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
1833 * Disable static config checking, ONLY for manufacturing test
1834 * and setup at the factory, to allow the static config to be
1837 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1838 if ((rc == 0) && (mac_addr[0] & 0x02)) {
1840 * If the static config does not include a global MAC
1841 * address pool then the board may return a locally
1842 * administered MAC address (this should only happen on
1843 * incorrectly programmed boards).
1847 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1849 rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
1854 EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
1856 /* Board configuration (legacy) */
1857 rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
1859 /* Unprivileged functions may not be able to read board cfg */
1866 encp->enc_board_type = board_type;
1867 encp->enc_clk_mult = 1; /* not used for EF10 */
1869 /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
1870 if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
1874 * Firmware with support for *_FEC capability bits does not
1875 * report that the corresponding *_FEC_REQUESTED bits are supported.
1876 * Add them here so that drivers understand that they are supported.
1878 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC))
1879 epp->ep_phy_cap_mask |=
1880 (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED);
1881 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC))
1882 epp->ep_phy_cap_mask |=
1883 (1u << EFX_PHY_CAP_RS_FEC_REQUESTED);
1884 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC))
1885 epp->ep_phy_cap_mask |=
1886 (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
1888 /* Obtain the default PHY advertised capabilities */
1889 if ((rc = ef10_phy_get_link(enp, &els)) != 0)
1891 epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask;
1892 epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask;
1894 /* Check capabilities of running datapath firmware */
1895 if ((rc = ef10_get_datapath_caps(enp)) != 0)
1898 /* Alignment for WPTR updates */
1899 encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
1901 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
1902 /* No boundary crossing limits */
1903 encp->enc_tx_dma_desc_boundary = 0;
1906 * Maximum number of bytes into the frame the TCP header can start for
1907 * firmware assisted TSO to work.
1909 encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
1912 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
1913 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
1914 * resources (allocated to this PCIe function), which is zero until
1915 * after we have allocated VIs.
1917 encp->enc_evq_limit = 1024;
1918 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
1919 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
1921 encp->enc_buftbl_limit = 0xFFFFFFFF;
1923 /* Get interrupt vector limits */
1924 if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
1925 if (EFX_PCI_FUNCTION_IS_PF(encp))
1928 /* Ignore error (cannot query vector limits from a VF). */
1932 encp->enc_intr_vec_base = base;
1933 encp->enc_intr_limit = nvec;
1936 * Get the current privilege mask. Note that this may be modified
1937 * dynamically, so this value is informational only. DO NOT use
1938 * the privilege mask to check for sufficient privileges, as that
1939 * can result in time-of-check/time-of-use bugs.
1941 if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
1943 encp->enc_privilege_mask = mask;
1945 /* Get remaining controller-specific board config */
1946 if ((rc = enop->eno_board_cfg(enp)) != 0)
1953 EFSYS_PROBE(fail11);
1955 EFSYS_PROBE(fail10);
1973 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1978 __checkReturn efx_rc_t
1980 __in efx_nic_t *enp)
1982 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1983 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1986 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1987 enp->en_family == EFX_FAMILY_MEDFORD ||
1988 enp->en_family == EFX_FAMILY_MEDFORD2);
1990 /* Read and clear any assertion state */
1991 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
1994 /* Exit the assertion handler */
1995 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
1999 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
2002 if ((rc = ef10_nic_board_cfg(enp)) != 0)
2006 * Set default driver config limits (based on board config).
2008 * FIXME: For now allocate a fixed number of VIs which is likely to be
2009 * sufficient and small enough to allow multiple functions on the same
2012 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
2013 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
2015 /* The client driver must configure and enable PIO buffer support */
2016 edcp->edc_max_piobuf_count = 0;
2017 edcp->edc_pio_alloc_size = 0;
2019 #if EFSYS_OPT_MAC_STATS
2020 /* Wipe the MAC statistics */
2021 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
2025 #if EFSYS_OPT_LOOPBACK
2026 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
2030 #if EFSYS_OPT_MON_STATS
2031 if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
2032 /* Unprivileged functions do not have access to sensors */
2038 encp->enc_features = enp->en_features;
2042 #if EFSYS_OPT_MON_STATS
2046 #if EFSYS_OPT_LOOPBACK
2050 #if EFSYS_OPT_MAC_STATS
2061 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2066 __checkReturn efx_rc_t
2067 ef10_nic_set_drv_limits(
2068 __inout efx_nic_t *enp,
2069 __in efx_drv_limits_t *edlp)
2071 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2072 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2073 uint32_t min_evq_count, max_evq_count;
2074 uint32_t min_rxq_count, max_rxq_count;
2075 uint32_t min_txq_count, max_txq_count;
2083 /* Get minimum required and maximum usable VI limits */
2084 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
2085 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
2086 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
2088 edcp->edc_min_vi_count =
2089 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
2091 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
2092 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
2093 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
2095 edcp->edc_max_vi_count =
2096 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
2099 * Check limits for sub-allocated piobuf blocks.
2100 * PIO is optional, so don't fail if the limits are incorrect.
2102 if ((encp->enc_piobuf_size == 0) ||
2103 (encp->enc_piobuf_limit == 0) ||
2104 (edlp->edl_min_pio_alloc_size == 0) ||
2105 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
2107 edcp->edc_max_piobuf_count = 0;
2108 edcp->edc_pio_alloc_size = 0;
2110 uint32_t blk_size, blk_count, blks_per_piobuf;
2113 MAX(edlp->edl_min_pio_alloc_size,
2114 encp->enc_piobuf_min_alloc_size);
2116 blks_per_piobuf = encp->enc_piobuf_size / blk_size;
2117 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
2119 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
2121 /* A zero max pio alloc count means unlimited */
2122 if ((edlp->edl_max_pio_alloc_count > 0) &&
2123 (edlp->edl_max_pio_alloc_count < blk_count)) {
2124 blk_count = edlp->edl_max_pio_alloc_count;
2127 edcp->edc_pio_alloc_size = blk_size;
2128 edcp->edc_max_piobuf_count =
2129 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
2135 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2141 __checkReturn efx_rc_t
2143 __in efx_nic_t *enp)
2146 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
2147 MC_CMD_ENTITY_RESET_OUT_LEN);
2150 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
2151 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2153 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2156 req.emr_cmd = MC_CMD_ENTITY_RESET;
2157 req.emr_in_buf = payload;
2158 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
2159 req.emr_out_buf = payload;
2160 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
2162 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
2163 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
2165 efx_mcdi_execute(enp, &req);
2167 if (req.emr_rc != 0) {
2172 /* Clear RX/TX DMA queue errors */
2173 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
2182 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2187 __checkReturn efx_rc_t
2189 __in efx_nic_t *enp)
2191 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2192 uint32_t min_vi_count, max_vi_count;
2193 uint32_t vi_count, vi_base, vi_shift;
2197 uint32_t vi_window_size;
2200 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2201 enp->en_family == EFX_FAMILY_MEDFORD ||
2202 enp->en_family == EFX_FAMILY_MEDFORD2);
2204 /* Enable reporting of some events (e.g. link change) */
2205 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
2208 /* Allocate (optional) on-chip PIO buffers */
2209 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
2212 * For best performance, PIO writes should use a write-combined
2213 * (WC) memory mapping. Using a separate WC mapping for the PIO
2214 * aperture of each VI would be a burden to drivers (and not
2215 * possible if the host page size is >4Kbyte).
2217 * To avoid this we use a single uncached (UC) mapping for VI
2218 * register access, and a single WC mapping for extra VIs used
2221 * Each piobuf must be linked to a VI in the WC mapping, and to
2222 * each VI that is using a sub-allocated block from the piobuf.
2224 min_vi_count = edcp->edc_min_vi_count;
2226 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
2228 /* Ensure that the previously attached driver's VIs are freed */
2229 if ((rc = efx_mcdi_free_vis(enp)) != 0)
2233 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
2234 * fails then retrying the request for fewer VI resources may succeed.
2237 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
2238 &vi_base, &vi_count, &vi_shift)) != 0)
2241 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
2243 if (vi_count < min_vi_count) {
2248 enp->en_arch.ef10.ena_vi_base = vi_base;
2249 enp->en_arch.ef10.ena_vi_count = vi_count;
2250 enp->en_arch.ef10.ena_vi_shift = vi_shift;
2252 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
2253 /* Not enough extra VIs to map piobufs */
2254 ef10_nic_free_piobufs(enp);
2257 enp->en_arch.ef10.ena_pio_write_vi_base =
2258 vi_count - enp->en_arch.ef10.ena_piobuf_count;
2260 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
2261 EFX_VI_WINDOW_SHIFT_INVALID);
2262 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
2263 EFX_VI_WINDOW_SHIFT_64K);
2264 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
2266 /* Save UC memory mapping details */
2267 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
2268 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2269 enp->en_arch.ef10.ena_uc_mem_map_size =
2271 enp->en_arch.ef10.ena_pio_write_vi_base);
2273 enp->en_arch.ef10.ena_uc_mem_map_size =
2275 enp->en_arch.ef10.ena_vi_count);
2278 /* Save WC memory mapping details */
2279 enp->en_arch.ef10.ena_wc_mem_map_offset =
2280 enp->en_arch.ef10.ena_uc_mem_map_offset +
2281 enp->en_arch.ef10.ena_uc_mem_map_size;
2283 enp->en_arch.ef10.ena_wc_mem_map_size =
2285 enp->en_arch.ef10.ena_piobuf_count);
2287 /* Link piobufs to extra VIs in WC mapping */
2288 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2289 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2290 rc = efx_mcdi_link_piobuf(enp,
2291 enp->en_arch.ef10.ena_pio_write_vi_base + i,
2292 enp->en_arch.ef10.ena_piobuf_handle[i]);
2299 * Allocate a vAdaptor attached to our upstream vPort/pPort.
2301 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
2302 * driver has yet to bring up the EVB port. See bug 56147. In this case,
2303 * retry the request several times after waiting a while. The wait time
2304 * between retries starts small (10ms) and exponentially increases.
2305 * Total wait time is a little over two seconds. Retry logic in the
2306 * client driver may mean this whole loop is repeated if it continues to
2311 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
2312 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
2315 * Do not retry alloc for PF, or for other errors on
2321 /* VF startup before PF is ready. Retry allocation. */
2323 /* Too many attempts */
2327 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
2328 EFSYS_SLEEP(delay_us);
2330 if (delay_us < 500000)
2334 enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
2335 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
2350 ef10_nic_free_piobufs(enp);
2353 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2358 __checkReturn efx_rc_t
2359 ef10_nic_get_vi_pool(
2360 __in efx_nic_t *enp,
2361 __out uint32_t *vi_countp)
2363 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2364 enp->en_family == EFX_FAMILY_MEDFORD ||
2365 enp->en_family == EFX_FAMILY_MEDFORD2);
2368 * Report VIs that the client driver can use.
2369 * Do not include VIs used for PIO buffer writes.
2371 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
2376 __checkReturn efx_rc_t
2377 ef10_nic_get_bar_region(
2378 __in efx_nic_t *enp,
2379 __in efx_nic_region_t region,
2380 __out uint32_t *offsetp,
2381 __out size_t *sizep)
2385 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2386 enp->en_family == EFX_FAMILY_MEDFORD ||
2387 enp->en_family == EFX_FAMILY_MEDFORD2);
2390 * TODO: Specify host memory mapping alignment and granularity
2391 * in efx_drv_limits_t so that they can be taken into account
2392 * when allocating extra VIs for PIO writes.
2396 /* UC mapped memory BAR region for VI registers */
2397 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
2398 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
2401 case EFX_REGION_PIO_WRITE_VI:
2402 /* WC mapped memory BAR region for piobuf writes */
2403 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
2404 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
2415 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2420 __checkReturn boolean_t
2421 ef10_nic_hw_unavailable(
2422 __in efx_nic_t *enp)
2426 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
2429 EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE);
2430 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
2436 ef10_nic_set_hw_unavailable(enp);
2442 ef10_nic_set_hw_unavailable(
2443 __in efx_nic_t *enp)
2445 EFSYS_PROBE(hw_unavail);
2446 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
2452 __in efx_nic_t *enp)
2457 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
2458 enp->en_vport_id = 0;
2460 /* Unlink piobufs from extra VIs in WC mapping */
2461 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2462 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2463 rc = efx_mcdi_unlink_piobuf(enp,
2464 enp->en_arch.ef10.ena_pio_write_vi_base + i);
2470 ef10_nic_free_piobufs(enp);
2472 (void) efx_mcdi_free_vis(enp);
2473 enp->en_arch.ef10.ena_vi_count = 0;
2478 __in efx_nic_t *enp)
2480 #if EFSYS_OPT_MON_STATS
2481 mcdi_mon_cfg_free(enp);
2482 #endif /* EFSYS_OPT_MON_STATS */
2483 (void) efx_mcdi_drv_attach(enp, B_FALSE);
2488 __checkReturn efx_rc_t
2489 ef10_nic_register_test(
2490 __in efx_nic_t *enp)
2495 _NOTE(ARGUNUSED(enp))
2496 _NOTE(CONSTANTCONDITION)
2506 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2511 #endif /* EFSYS_OPT_DIAG */
2513 #if EFSYS_OPT_FW_SUBVARIANT_AWARE
2515 __checkReturn efx_rc_t
2516 efx_mcdi_get_nic_global(
2517 __in efx_nic_t *enp,
2519 __out uint32_t *valuep)
2522 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN,
2523 MC_CMD_GET_NIC_GLOBAL_OUT_LEN);
2526 req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
2527 req.emr_in_buf = payload;
2528 req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
2529 req.emr_out_buf = payload;
2530 req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
2532 MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
2534 efx_mcdi_execute(enp, &req);
2536 if (req.emr_rc != 0) {
2541 if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
2546 *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
2553 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2558 __checkReturn efx_rc_t
2559 efx_mcdi_set_nic_global(
2560 __in efx_nic_t *enp,
2562 __in uint32_t value)
2565 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0);
2568 req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
2569 req.emr_in_buf = payload;
2570 req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
2571 req.emr_out_buf = NULL;
2572 req.emr_out_length = 0;
2574 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
2575 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
2577 efx_mcdi_execute(enp, &req);
2579 if (req.emr_rc != 0) {
2587 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2592 #endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
2594 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */