2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2021 Alstom Group.
5 * Copyright (c) 2021 Semihalf.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
37 #include <sys/socket.h>
38 #include <sys/sockio.h>
40 #include <machine/bus.h>
41 #include <machine/resource.h>
43 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_var.h>
47 #include <net/if_types.h>
48 #include <net/if_media.h>
49 #include <net/iflib.h>
51 #include <dev/enetc/enetc_hw.h>
52 #include <dev/enetc/enetc.h>
53 #include <dev/enetc/enetc_mdio.h>
54 #include <dev/mii/mii.h>
55 #include <dev/mii/miivar.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
63 #include "miibus_if.h"
65 static device_register_t enetc_register;
67 static ifdi_attach_pre_t enetc_attach_pre;
68 static ifdi_attach_post_t enetc_attach_post;
69 static ifdi_detach_t enetc_detach;
71 static ifdi_tx_queues_alloc_t enetc_tx_queues_alloc;
72 static ifdi_rx_queues_alloc_t enetc_rx_queues_alloc;
73 static ifdi_queues_free_t enetc_queues_free;
75 static ifdi_init_t enetc_init;
76 static ifdi_stop_t enetc_stop;
78 static ifdi_msix_intr_assign_t enetc_msix_intr_assign;
79 static ifdi_tx_queue_intr_enable_t enetc_tx_queue_intr_enable;
80 static ifdi_rx_queue_intr_enable_t enetc_rx_queue_intr_enable;
81 static ifdi_intr_enable_t enetc_intr_enable;
82 static ifdi_intr_disable_t enetc_intr_disable;
84 static int enetc_isc_txd_encap(void*, if_pkt_info_t);
85 static void enetc_isc_txd_flush(void*, uint16_t, qidx_t);
86 static int enetc_isc_txd_credits_update(void*, uint16_t, bool);
87 static int enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t);
88 static int enetc_isc_rxd_pkt_get(void*, if_rxd_info_t);
89 static void enetc_isc_rxd_refill(void*, if_rxd_update_t);
90 static void enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t);
92 static void enetc_vlan_register(if_ctx_t, uint16_t);
93 static void enetc_vlan_unregister(if_ctx_t, uint16_t);
95 static uint64_t enetc_get_counter(if_ctx_t, ift_counter);
96 static int enetc_promisc_set(if_ctx_t, int);
97 static int enetc_mtu_set(if_ctx_t, uint32_t);
98 static void enetc_setup_multicast(if_ctx_t);
99 static void enetc_timer(if_ctx_t, uint16_t);
100 static void enetc_update_admin_status(if_ctx_t);
102 static miibus_readreg_t enetc_miibus_readreg;
103 static miibus_writereg_t enetc_miibus_writereg;
104 static miibus_linkchg_t enetc_miibus_linkchg;
105 static miibus_statchg_t enetc_miibus_statchg;
107 static int enetc_media_change(if_t);
108 static void enetc_media_status(if_t, struct ifmediareq*);
110 static int enetc_fixed_media_change(if_t);
111 static void enetc_fixed_media_status(if_t, struct ifmediareq*);
113 static void enetc_max_nqueues(struct enetc_softc*, int*, int*);
114 static int enetc_setup_phy(struct enetc_softc*);
116 static void enetc_get_hwaddr(struct enetc_softc*);
117 static void enetc_set_hwaddr(struct enetc_softc*);
118 static int enetc_setup_rss(struct enetc_softc*);
120 static void enetc_init_hw(struct enetc_softc*);
121 static void enetc_init_ctrl(struct enetc_softc*);
122 static void enetc_init_tx(struct enetc_softc*);
123 static void enetc_init_rx(struct enetc_softc*);
125 static int enetc_ctrl_send(struct enetc_softc*,
126 uint16_t, uint16_t, iflib_dma_info_t);
128 static const char enetc_driver_version[] = "1.0.0";
130 static pci_vendor_info_t enetc_vendor_info_array[] = {
131 PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF,
132 "Freescale ENETC PCIe Gigabit Ethernet Controller"),
136 #define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \
137 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)
139 static device_method_t enetc_methods[] = {
140 DEVMETHOD(device_register, enetc_register),
141 DEVMETHOD(device_probe, iflib_device_probe),
142 DEVMETHOD(device_attach, iflib_device_attach),
143 DEVMETHOD(device_detach, iflib_device_detach),
144 DEVMETHOD(device_shutdown, iflib_device_shutdown),
145 DEVMETHOD(device_suspend, iflib_device_suspend),
146 DEVMETHOD(device_resume, iflib_device_resume),
148 DEVMETHOD(miibus_readreg, enetc_miibus_readreg),
149 DEVMETHOD(miibus_writereg, enetc_miibus_writereg),
150 DEVMETHOD(miibus_linkchg, enetc_miibus_linkchg),
151 DEVMETHOD(miibus_statchg, enetc_miibus_statchg),
156 static driver_t enetc_driver = {
157 "enetc", enetc_methods, sizeof(struct enetc_softc)
160 static devclass_t enetc_devclass;
161 DRIVER_MODULE(enetc, pci, enetc_driver, enetc_devclass, NULL, NULL);
162 DRIVER_MODULE(miibus, enetc, miibus_driver, miibus_devclass, NULL, NULL);
163 MODULE_VERSION(enetc, 1);
165 IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array);
167 MODULE_DEPEND(enetc, ether, 1, 1, 1);
168 MODULE_DEPEND(enetc, iflib, 1, 1, 1);
169 MODULE_DEPEND(enetc, miibus, 1, 1, 1);
171 static device_method_t enetc_iflib_methods[] = {
172 DEVMETHOD(ifdi_attach_pre, enetc_attach_pre),
173 DEVMETHOD(ifdi_attach_post, enetc_attach_post),
174 DEVMETHOD(ifdi_detach, enetc_detach),
176 DEVMETHOD(ifdi_init, enetc_init),
177 DEVMETHOD(ifdi_stop, enetc_stop),
179 DEVMETHOD(ifdi_tx_queues_alloc, enetc_tx_queues_alloc),
180 DEVMETHOD(ifdi_rx_queues_alloc, enetc_rx_queues_alloc),
181 DEVMETHOD(ifdi_queues_free, enetc_queues_free),
183 DEVMETHOD(ifdi_msix_intr_assign, enetc_msix_intr_assign),
184 DEVMETHOD(ifdi_tx_queue_intr_enable, enetc_tx_queue_intr_enable),
185 DEVMETHOD(ifdi_rx_queue_intr_enable, enetc_rx_queue_intr_enable),
186 DEVMETHOD(ifdi_intr_enable, enetc_intr_enable),
187 DEVMETHOD(ifdi_intr_disable, enetc_intr_disable),
189 DEVMETHOD(ifdi_vlan_register, enetc_vlan_register),
190 DEVMETHOD(ifdi_vlan_unregister, enetc_vlan_unregister),
192 DEVMETHOD(ifdi_get_counter, enetc_get_counter),
193 DEVMETHOD(ifdi_mtu_set, enetc_mtu_set),
194 DEVMETHOD(ifdi_multi_set, enetc_setup_multicast),
195 DEVMETHOD(ifdi_promisc_set, enetc_promisc_set),
196 DEVMETHOD(ifdi_timer, enetc_timer),
197 DEVMETHOD(ifdi_update_admin_status, enetc_update_admin_status),
202 static driver_t enetc_iflib_driver = {
203 "enetc", enetc_iflib_methods, sizeof(struct enetc_softc)
206 static struct if_txrx enetc_txrx = {
207 .ift_txd_encap = enetc_isc_txd_encap,
208 .ift_txd_flush = enetc_isc_txd_flush,
209 .ift_txd_credits_update = enetc_isc_txd_credits_update,
210 .ift_rxd_available = enetc_isc_rxd_available,
211 .ift_rxd_pkt_get = enetc_isc_rxd_pkt_get,
212 .ift_rxd_refill = enetc_isc_rxd_refill,
213 .ift_rxd_flush = enetc_isc_rxd_flush
216 static struct if_shared_ctx enetc_sctx_init = {
217 .isc_magic = IFLIB_MAGIC,
219 .isc_q_align = ENETC_RING_ALIGN,
221 .isc_tx_maxsize = ENETC_MAX_FRAME_LEN,
222 .isc_tx_maxsegsize = PAGE_SIZE,
224 .isc_rx_maxsize = ENETC_MAX_FRAME_LEN,
225 .isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN,
226 .isc_rx_nsegments = ENETC_MAX_SCATTER,
228 .isc_admin_intrcnt = 0,
234 .isc_vendor_info = enetc_vendor_info_array,
235 .isc_driver_version = enetc_driver_version,
236 .isc_driver = &enetc_iflib_driver,
238 .isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES,
239 .isc_ntxd_min = {ENETC_MIN_DESC},
240 .isc_ntxd_max = {ENETC_MAX_DESC},
241 .isc_ntxd_default = {ENETC_DEFAULT_DESC},
242 .isc_nrxd_min = {ENETC_MIN_DESC},
243 .isc_nrxd_max = {ENETC_MAX_DESC},
244 .isc_nrxd_default = {ENETC_DEFAULT_DESC}
248 enetc_register(device_t dev)
251 if (!ofw_bus_status_okay(dev))
254 return (&enetc_sctx_init);
258 enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues,
263 val = ENETC_PORT_RD4(sc, ENETC_PCAPR0);
264 *max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES);
265 *max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES);
269 enetc_setup_fixed(struct enetc_softc *sc, phandle_t node)
274 size = OF_getencprop(node, "speed", &speed, sizeof(speed));
276 device_printf(sc->dev,
277 "Device has fixed-link node without link speed specified\n");
291 device_printf(sc->dev, "Unsupported link speed value of %d\n",
297 if (OF_hasprop(node, "full-duplex"))
302 sc->fixed_link = true;
304 ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change,
305 enetc_fixed_media_status);
306 ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL);
307 ifmedia_set(&sc->fixed_ifmedia, speed);
308 sc->shared->isc_media = &sc->fixed_ifmedia;
314 enetc_setup_phy(struct enetc_softc *sc)
316 phandle_t node, fixed_link, phy_handle;
317 struct mii_data *miid;
321 node = ofw_bus_get_node(sc->dev);
322 fixed_link = ofw_bus_find_child(node, "fixed-link");
324 return (enetc_setup_fixed(sc, fixed_link));
326 size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle));
328 device_printf(sc->dev,
329 "Failed to acquire PHY handle from FDT.\n");
332 phy_handle = OF_node_from_xref(phy_handle);
333 size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr));
335 device_printf(sc->dev, "Failed to obtain PHY address\n");
338 error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx),
339 enetc_media_change, enetc_media_status,
340 BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
342 device_printf(sc->dev, "mii_attach failed\n");
345 miid = device_get_softc(sc->miibus);
346 sc->shared->isc_media = &miid->mii_media;
352 enetc_attach_pre(if_ctx_t ctx)
355 if_softc_ctx_t scctx;
356 struct enetc_softc *sc;
359 sc = iflib_get_softc(ctx);
360 scctx = iflib_get_softc_ctx(ctx);
362 sc->dev = iflib_get_dev(ctx);
364 ifp = iflib_get_ifp(ctx);
366 rid = PCIR_BAR(ENETC_BAR_REGS);
367 sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
368 if (sc->regs == NULL) {
369 device_printf(sc->dev,
370 "Failed to allocate BAR %d\n", ENETC_BAR_REGS);
374 error = iflib_dma_alloc_align(ctx,
375 ENETC_MIN_DESC * sizeof(struct enetc_cbd),
380 device_printf(sc->dev, "Failed to allocate control ring\n");
383 sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr;
385 scctx->isc_txrx = &enetc_txrx;
386 scctx->isc_tx_nsegments = ENETC_MAX_SCATTER;
387 enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max);
389 if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) {
390 device_printf(sc->dev,
391 "The number of TX descriptors has to be a multiple of %d\n",
396 if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) {
397 device_printf(sc->dev,
398 "The number of RX descriptors has to be a multiple of %d\n",
403 scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd);
404 scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd);
405 scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd);
406 scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd);
407 scctx->isc_tx_csum_flags = 0;
408 scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS;
410 error = enetc_mtu_set(ctx, ETHERMTU);
414 scctx->isc_msix_bar = pci_msix_table_bar(sc->dev);
416 error = enetc_setup_phy(sc);
420 enetc_get_hwaddr(sc);
429 enetc_attach_post(if_ctx_t ctx)
432 enetc_init_hw(iflib_get_softc(ctx));
437 enetc_detach(if_ctx_t ctx)
439 struct enetc_softc *sc;
442 sc = iflib_get_softc(ctx);
444 for (i = 0; i < sc->rx_num_queues; i++)
445 iflib_irq_free(ctx, &sc->rx_queues[i].irq);
447 if (sc->miibus != NULL)
448 device_delete_child(sc->dev, sc->miibus);
450 if (sc->regs != NULL)
451 error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
452 rman_get_rid(sc->regs), sc->regs);
454 if (sc->ctrl_queue.dma.idi_size != 0)
455 iflib_dma_free(&sc->ctrl_queue.dma);
461 enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
462 int ntxqs, int ntxqsets)
464 struct enetc_softc *sc;
465 struct enetc_tx_queue *queue;
468 sc = iflib_get_softc(ctx);
472 sc->tx_queues = mallocarray(sc->tx_num_queues,
473 sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
474 if (sc->tx_queues == NULL) {
475 device_printf(sc->dev,
476 "Failed to allocate memory for TX queues.\n");
480 for (i = 0; i < sc->tx_num_queues; i++) {
481 queue = &sc->tx_queues[i];
483 queue->ring = (union enetc_tx_bd*)(vaddrs[i]);
484 queue->ring_paddr = paddrs[i];
485 queue->next_to_clean = 0;
486 queue->ring_full = false;
493 enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
494 int nrxqs, int nrxqsets)
496 struct enetc_softc *sc;
497 struct enetc_rx_queue *queue;
500 sc = iflib_get_softc(ctx);
503 sc->rx_queues = mallocarray(sc->rx_num_queues,
504 sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO);
505 if (sc->rx_queues == NULL) {
506 device_printf(sc->dev,
507 "Failed to allocate memory for RX queues.\n");
511 for (i = 0; i < sc->rx_num_queues; i++) {
512 queue = &sc->rx_queues[i];
515 queue->ring = (union enetc_rx_bd*)(vaddrs[i]);
516 queue->ring_paddr = paddrs[i];
523 enetc_queues_free(if_ctx_t ctx)
525 struct enetc_softc *sc;
527 sc = iflib_get_softc(ctx);
529 if (sc->tx_queues != NULL) {
530 free(sc->tx_queues, M_DEVBUF);
531 sc->tx_queues = NULL;
533 if (sc->rx_queues != NULL) {
534 free(sc->rx_queues, M_DEVBUF);
535 sc->rx_queues = NULL;
540 enetc_get_hwaddr(struct enetc_softc *sc)
542 struct ether_addr hwaddr;
546 low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0));
547 high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0));
549 memcpy(&hwaddr.octet[0], &low, 4);
550 memcpy(&hwaddr.octet[4], &high, 2);
552 if (ETHER_IS_BROADCAST(hwaddr.octet) ||
553 ETHER_IS_MULTICAST(hwaddr.octet) ||
554 ETHER_IS_ZERO(hwaddr.octet)) {
555 ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr);
556 device_printf(sc->dev,
557 "Failed to obtain MAC address, using a random one\n");
558 memcpy(&low, &hwaddr.octet[0], 4);
559 memcpy(&high, &hwaddr.octet[4], 2);
562 iflib_set_mac(sc->ctx, hwaddr.octet);
566 enetc_set_hwaddr(struct enetc_softc *sc)
573 ifp = iflib_get_ifp(sc->ctx);
574 hwaddr = (uint8_t*)if_getlladdr(ifp);
575 low = *((uint32_t*)hwaddr);
576 high = *((uint16_t*)(hwaddr+4));
578 ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low);
579 ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high);
583 enetc_setup_rss(struct enetc_softc *sc)
585 struct iflib_dma_info dma;
586 int error, i, buckets_num = 0;
590 reg = ENETC_RD4(sc, ENETC_SIPCAPR0);
591 if (reg & ENETC_SIPCAPR0_RSS) {
592 reg = ENETC_RD4(sc, ENETC_SIRSSCAPR);
593 buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg);
595 if (buckets_num == 0)
598 for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) {
599 arc4rand((uint8_t *)®, sizeof(reg), 0);
600 ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg);
603 ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues);
605 error = iflib_dma_alloc_align(sc->ctx,
606 buckets_num * sizeof(*rss_table),
611 device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n");
614 rss_table = (uint8_t *)dma.idi_vaddr;
616 for (i = 0; i < buckets_num; i++)
617 rss_table[i] = i % sc->rx_num_queues;
619 error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE,
620 buckets_num * sizeof(*rss_table), &dma);
622 device_printf(sc->dev, "Failed to setup RSS table\n");
624 iflib_dma_free(&dma);
630 enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size,
631 iflib_dma_info_t dma)
633 struct enetc_ctrl_queue *queue;
634 struct enetc_cbd *desc;
637 queue = &sc->ctrl_queue;
638 desc = &queue->ring[queue->pidx];
640 if (++queue->pidx == ENETC_MIN_DESC)
643 desc->addr[0] = (uint32_t)dma->idi_paddr;
644 desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32);
646 desc->length = (uint16_t)size;
647 desc->cmd = (uint8_t)cmd;
648 desc->cls = (uint8_t)(cmd >> 8);
649 desc->status_flags = 0;
651 /* Sync command packet, */
652 bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE);
653 /* and the control ring. */
654 bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE);
655 ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
657 while (--timeout != 0) {
659 if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx)
666 bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD);
671 enetc_init_hw(struct enetc_softc *sc)
676 ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG,
677 ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
678 ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
679 ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
680 val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues);
681 val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues);
682 val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
683 ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val);
684 ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1));
685 ENETC_PORT_WR4(sc, ENETC_PVCLCTR, ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S);
686 ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
687 ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD);
688 ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M);
690 ENETC_WR4(sc, ENETC_SICAR0,
691 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
692 ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI);
693 ENETC_WR4(sc, ENETC_SICAR2,
694 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
697 error = enetc_setup_rss(sc);
699 ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN);
701 ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE);
706 enetc_init_ctrl(struct enetc_softc *sc)
708 struct enetc_ctrl_queue *queue = &sc->ctrl_queue;
710 ENETC_WR4(sc, ENETC_SICBDRBAR0,
711 (uint32_t)queue->dma.idi_paddr);
712 ENETC_WR4(sc, ENETC_SICBDRBAR1,
713 (uint32_t)(queue->dma.idi_paddr >> 32));
714 ENETC_WR4(sc, ENETC_SICBDRLENR,
715 queue->dma.idi_size / sizeof(struct enetc_cbd));
718 ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
719 ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx);
720 ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN);
724 enetc_init_tx(struct enetc_softc *sc)
726 struct enetc_tx_queue *queue;
729 for (i = 0; i < sc->tx_num_queues; i++) {
730 queue = &sc->tx_queues[i];
732 ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0,
733 (uint32_t)queue->ring_paddr);
734 ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1,
735 (uint32_t)(queue->ring_paddr >> 32));
736 ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size);
739 * Even though it is undoccumented resetting the TX ring
740 * indices results in TX hang.
741 * Do the same as Linux and simply keep those unchanged
742 * for the drivers lifetime.
745 ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0);
746 ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0);
748 ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN);
754 enetc_init_rx(struct enetc_softc *sc)
756 struct enetc_rx_queue *queue;
757 uint32_t rx_buf_size;
760 rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx);
762 for (i = 0; i < sc->rx_num_queues; i++) {
763 queue = &sc->rx_queues[i];
765 ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0,
766 (uint32_t)queue->ring_paddr);
767 ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1,
768 (uint32_t)(queue->ring_paddr >> 32));
769 ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size);
770 ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size);
771 ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0);
772 ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0);
773 queue->enabled = false;
778 enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt)
780 uint64_t *bitmap = arg;
781 uint64_t address = 0;
786 bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN);
789 * The six bit hash is calculated by xoring every
790 * 6th bit of the address.
791 * It is then used as an index in a bitmap that is
792 * written to the device.
794 for (i = 0; i < 6; i++) {
796 for (j = 0; j < 8; j++)
797 bit ^= address & BIT(i + j*6);
802 *bitmap |= (1 << hash);
807 enetc_setup_multicast(if_ctx_t ctx)
809 struct enetc_softc *sc;
814 sc = iflib_get_softc(ctx);
815 ifp = iflib_get_ifp(ctx);
816 revid = pci_get_revid(sc->dev);
818 if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap);
821 * In revid 1 of this chip the positions multicast and unicast
822 * hash filter registers are flipped.
824 ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX);
825 ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32);
830 enetc_hash_vid(uint16_t vid)
836 for (i = 0;i < 6;i++) {
838 bit ^= vid & BIT(i + 6);
846 enetc_vlan_register(if_ctx_t ctx, uint16_t vid)
848 struct enetc_softc *sc;
852 sc = iflib_get_softc(ctx);
853 hash = enetc_hash_vid(vid);
855 /* Check if hash is alredy present in the bitmap. */
856 if (++sc->vlan_bitmap[hash] != 1)
859 bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
860 bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
862 ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
863 ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
867 enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid)
869 struct enetc_softc *sc;
873 sc = iflib_get_softc(ctx);
874 hash = enetc_hash_vid(vid);
876 MPASS(sc->vlan_bitmap[hash] > 0);
877 if (--sc->vlan_bitmap[hash] != 0)
880 bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0));
881 bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32;
882 bitmap &= ~BIT(hash);
883 ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX);
884 ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32);
888 enetc_init(if_ctx_t ctx)
890 struct enetc_softc *sc;
891 struct mii_data *miid;
893 uint16_t max_frame_length;
896 sc = iflib_get_softc(ctx);
897 ifp = iflib_get_ifp(ctx);
899 max_frame_length = sc->shared->isc_max_frame_size;
900 MPASS(max_frame_length < ENETC_MAX_FRAME_LEN);
902 /* Set max RX and TX frame lengths. */
903 ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length);
904 ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length);
905 ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length);
907 /* Set "VLAN promiscious" mode if filtering is disabled. */
908 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
909 ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
910 ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1));
912 ENETC_PORT_WR4(sc, ENETC_PSIPVMR,
913 ENETC_PSIPVMR_SET_VUTA(1));
915 sc->rbmr = ENETC_RBMR_EN | ENETC_RBMR_AL;
917 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
918 sc->rbmr |= ENETC_RBMR_VTE;
920 /* Write MAC address to hardware. */
921 enetc_set_hwaddr(sc);
926 if (sc->fixed_link) {
927 baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media);
928 iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
931 * Can't return an error from this function, there is not much
932 * we can do if this fails.
934 miid = device_get_softc(sc->miibus);
935 (void)mii_mediachg(miid);
938 enetc_promisc_set(ctx, if_getflags(ifp));
942 enetc_stop(if_ctx_t ctx)
944 struct enetc_softc *sc;
947 sc = iflib_get_softc(ctx);
949 for (i = 0; i < sc->tx_num_queues; i++)
950 ENETC_TXQ_WR4(sc, i, ENETC_TBMR, 0);
952 for (i = 0; i < sc->rx_num_queues; i++)
953 ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0);
957 enetc_msix_intr_assign(if_ctx_t ctx, int msix)
959 struct enetc_softc *sc;
960 struct enetc_rx_queue *rx_queue;
961 struct enetc_tx_queue *tx_queue;
962 int vector = 0, i, error;
965 sc = iflib_get_softc(ctx);
967 MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT);
968 MPASS(sc->rx_num_queues == sc->tx_num_queues);
970 for (i = 0; i < sc->rx_num_queues; i++, vector++) {
971 rx_queue = &sc->rx_queues[i];
972 snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i);
973 error = iflib_irq_alloc_generic(ctx,
974 &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX,
975 NULL, rx_queue, i, irq_name);
979 ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector);
980 ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR);
981 ENETC_RXQ_WR4(sc, i, ENETC_RBICR0,
982 ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR));
985 for (i = 0;i < sc->tx_num_queues; i++, vector++) {
986 tx_queue = &sc->tx_queues[i];
987 snprintf(irq_name, sizeof(irq_name), "txq%d", i);
988 iflib_softirq_alloc_generic(ctx, &tx_queue->irq,
989 IFLIB_INTR_TX, tx_queue, i, irq_name);
991 ENETC_WR4(sc, ENETC_SIMSITRV(i), vector);
996 for (i = 0; i < sc->rx_num_queues; i++) {
997 rx_queue = &sc->rx_queues[i];
998 iflib_irq_free(ctx, &rx_queue->irq);
1004 enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1006 struct enetc_softc *sc;
1008 sc = iflib_get_softc(ctx);
1009 ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR);
1014 enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1016 struct enetc_softc *sc;
1018 sc = iflib_get_softc(ctx);
1019 ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR);
1023 enetc_intr_enable(if_ctx_t ctx)
1025 struct enetc_softc *sc;
1028 sc = iflib_get_softc(ctx);
1030 for (i = 0; i < sc->rx_num_queues; i++)
1031 ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1033 for (i = 0; i < sc->tx_num_queues; i++)
1034 ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF);
1038 enetc_intr_disable(if_ctx_t ctx)
1040 struct enetc_softc *sc;
1043 sc = iflib_get_softc(ctx);
1045 for (i = 0; i < sc->rx_num_queues; i++)
1046 ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0);
1048 for (i = 0; i < sc->tx_num_queues; i++)
1049 ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0);
1053 enetc_isc_txd_encap(void *data, if_pkt_info_t ipi)
1055 struct enetc_softc *sc = data;
1056 struct enetc_tx_queue *queue;
1057 union enetc_tx_bd *desc;
1058 bus_dma_segment_t *segs;
1059 qidx_t pidx, queue_len;
1062 queue = &sc->tx_queues[ipi->ipi_qsidx];
1063 segs = ipi->ipi_segs;
1064 pidx = ipi->ipi_pidx;
1065 queue_len = sc->tx_queue_size;
1068 * First descriptor is special. We use it to set frame
1069 * related information and offloads, e.g. VLAN tag.
1071 desc = &queue->ring[pidx];
1072 bzero(desc, sizeof(*desc));
1073 desc->frm_len = ipi->ipi_len;
1074 desc->addr = segs[i].ds_addr;
1075 desc->buf_len = segs[i].ds_len;
1076 if (ipi->ipi_flags & IPI_TX_INTR)
1077 desc->flags = ENETC_TXBD_FLAGS_FI;
1080 if (++pidx == queue_len)
1083 if (ipi->ipi_mflags & M_VLANTAG) {
1084 /* VLAN tag is inserted in a separate descriptor. */
1085 desc->flags |= ENETC_TXBD_FLAGS_EX;
1086 desc = &queue->ring[pidx];
1087 bzero(desc, sizeof(*desc));
1088 desc->ext.vid = ipi->ipi_vtag;
1089 desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS;
1090 if (++pidx == queue_len)
1094 /* Now add remaining descriptors. */
1095 for (;i < ipi->ipi_nsegs; i++) {
1096 desc = &queue->ring[pidx];
1097 bzero(desc, sizeof(*desc));
1098 desc->addr = segs[i].ds_addr;
1099 desc->buf_len = segs[i].ds_len;
1101 if (++pidx == queue_len)
1105 desc->flags |= ENETC_TXBD_FLAGS_F;
1106 ipi->ipi_new_pidx = pidx;
1107 if (pidx == queue->next_to_clean)
1108 queue->ring_full = true;
1114 enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx)
1116 struct enetc_softc *sc = data;
1118 ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx);
1122 enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear)
1124 struct enetc_softc *sc = data;
1125 struct enetc_tx_queue *queue;
1126 qidx_t next_to_clean, next_to_process;
1129 queue = &sc->tx_queues[qid];
1131 ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK;
1132 next_to_clean = queue->next_to_clean;
1134 if (next_to_clean == next_to_process && !queue->ring_full)
1140 clean_count = next_to_process - next_to_clean;
1141 if (clean_count <= 0)
1142 clean_count += sc->tx_queue_size;
1144 queue->next_to_clean = next_to_process;
1145 queue->ring_full = false;
1147 return (clean_count);
1151 enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget)
1153 struct enetc_softc *sc = data;
1154 struct enetc_rx_queue *queue;
1155 qidx_t hw_pidx, queue_len;
1156 union enetc_rx_bd *desc;
1159 queue = &sc->rx_queues[qid];
1160 desc = &queue->ring[pidx];
1161 queue_len = sc->rx_queue_size;
1163 if (desc->r.lstatus == 0)
1169 hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR);
1170 while (pidx != hw_pidx && count < budget) {
1171 desc = &queue->ring[pidx];
1172 if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1175 if (++pidx == queue_len)
1183 enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri)
1185 struct enetc_softc *sc = data;
1186 struct enetc_rx_queue *queue;
1187 union enetc_rx_bd *desc;
1188 uint16_t buf_len, pkt_size = 0;
1189 qidx_t cidx, queue_len;
1193 cidx = ri->iri_cidx;
1194 queue = &sc->rx_queues[ri->iri_qsidx];
1195 desc = &queue->ring[cidx];
1196 status = desc->r.lstatus;
1197 queue_len = sc->rx_queue_size;
1200 * Ready bit will be set only when all descriptors
1201 * in the chain have been processed.
1203 if ((status & ENETC_RXBD_LSTATUS_R) == 0)
1206 /* Pass RSS hash. */
1207 if (status & ENETC_RXBD_FLAG_RSSV) {
1208 ri->iri_flowid = desc->r.rss_hash;
1209 ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
1212 /* Pass IP checksum status. */
1213 ri->iri_csum_flags = CSUM_IP_CHECKED;
1214 if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0)
1215 ri->iri_csum_flags |= CSUM_IP_VALID;
1217 /* Pass extracted VLAN tag. */
1218 if (status & ENETC_RXBD_FLAG_VLAN) {
1219 ri->iri_vtag = desc->r.vlan_opt;
1220 ri->iri_flags = M_VLANTAG;
1223 for (i = 0; i < ENETC_MAX_SCATTER; i++) {
1224 buf_len = desc->r.buf_len;
1225 ri->iri_frags[i].irf_idx = cidx;
1226 ri->iri_frags[i].irf_len = buf_len;
1227 pkt_size += buf_len;
1228 if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F)
1231 if (++cidx == queue_len)
1234 desc = &queue->ring[cidx];
1236 ri->iri_nfrags = i + 1;
1237 ri->iri_len = pkt_size + ENETC_RX_IP_ALIGN;
1238 ri->iri_pad = ENETC_RX_IP_ALIGN;
1240 MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F);
1241 if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))
1248 enetc_isc_rxd_refill(void *data, if_rxd_update_t iru)
1250 struct enetc_softc *sc = data;
1251 struct enetc_rx_queue *queue;
1252 union enetc_rx_bd *desc;
1253 qidx_t pidx, queue_len;
1257 queue = &sc->rx_queues[iru->iru_qsidx];
1258 paddrs = iru->iru_paddrs;
1259 pidx = iru->iru_pidx;
1260 count = iru->iru_count;
1261 queue_len = sc->rx_queue_size;
1263 for (i = 0; i < count; i++) {
1264 desc = &queue->ring[pidx];
1265 bzero(desc, sizeof(*desc));
1267 desc->w.addr = paddrs[i];
1268 if (++pidx == queue_len)
1272 * After enabling the queue NIC will prefetch the first
1273 * 8 descriptors. It probably assumes that the RX is fully
1274 * refilled when cidx == pidx.
1275 * Enable it only if we have enough decriptors ready on the ring.
1277 if (!queue->enabled && pidx >= 8) {
1278 ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr);
1279 queue->enabled = true;
1284 enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx)
1286 struct enetc_softc *sc = data;
1288 ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx);
1292 enetc_get_counter(if_ctx_t ctx, ift_counter cnt)
1294 struct enetc_softc *sc;
1297 sc = iflib_get_softc(ctx);
1298 ifp = iflib_get_ifp(ctx);
1301 case IFCOUNTER_IERRORS:
1302 return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR));
1303 case IFCOUNTER_OERRORS:
1304 return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
1306 return (if_get_counter_default(ifp, cnt));
1311 enetc_mtu_set(if_ctx_t ctx, uint32_t mtu)
1313 struct enetc_softc *sc = iflib_get_softc(ctx);
1314 uint32_t max_frame_size;
1316 max_frame_size = mtu +
1319 sizeof(struct ether_vlan_header);
1321 if (max_frame_size > ENETC_MAX_FRAME_LEN)
1324 sc->shared->isc_max_frame_size = max_frame_size;
1330 enetc_promisc_set(if_ctx_t ctx, int flags)
1332 struct enetc_softc *sc;
1335 sc = iflib_get_softc(ctx);
1337 if (flags & IFF_PROMISC)
1338 reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
1339 else if (flags & IFF_ALLMULTI)
1340 reg = ENETC_PSIPMR_SET_MP(0);
1342 ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg);
1348 enetc_timer(if_ctx_t ctx, uint16_t qid)
1351 * Poll PHY status. Do this only for qid 0 to save
1355 iflib_admin_intr_deferred(ctx);
1359 enetc_update_admin_status(if_ctx_t ctx)
1361 struct enetc_softc *sc;
1362 struct mii_data *miid;
1364 sc = iflib_get_softc(ctx);
1366 if (!sc->fixed_link) {
1367 miid = device_get_softc(sc->miibus);
1373 enetc_miibus_readreg(device_t dev, int phy, int reg)
1375 struct enetc_softc *sc;
1377 sc = iflib_get_softc(device_get_softc(dev));
1378 return (enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1383 enetc_miibus_writereg(device_t dev, int phy, int reg, int data)
1385 struct enetc_softc *sc;
1387 sc = iflib_get_softc(device_get_softc(dev));
1388 return (enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE,
1393 enetc_miibus_linkchg(device_t dev)
1396 enetc_miibus_statchg(dev);
1400 enetc_miibus_statchg(device_t dev)
1402 struct enetc_softc *sc;
1403 struct mii_data *miid;
1404 int link_state, baudrate;
1406 sc = iflib_get_softc(device_get_softc(dev));
1407 miid = device_get_softc(sc->miibus);
1409 baudrate = ifmedia_baudrate(miid->mii_media_active);
1410 if (miid->mii_media_status & IFM_AVALID) {
1411 if (miid->mii_media_status & IFM_ACTIVE)
1412 link_state = LINK_STATE_UP;
1414 link_state = LINK_STATE_DOWN;
1416 link_state = LINK_STATE_UNKNOWN;
1419 iflib_link_state_change(sc->ctx, link_state, baudrate);
1424 enetc_media_change(if_t ifp)
1426 struct enetc_softc *sc;
1427 struct mii_data *miid;
1429 sc = iflib_get_softc(ifp->if_softc);
1430 miid = device_get_softc(sc->miibus);
1437 enetc_media_status(if_t ifp, struct ifmediareq* ifmr)
1439 struct enetc_softc *sc;
1440 struct mii_data *miid;
1442 sc = iflib_get_softc(ifp->if_softc);
1443 miid = device_get_softc(sc->miibus);
1447 ifmr->ifm_active = miid->mii_media_active;
1448 ifmr->ifm_status = miid->mii_media_status;
1452 enetc_fixed_media_change(if_t ifp)
1455 if_printf(ifp, "Can't change media in fixed-link mode.\n");
1459 enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
1461 struct enetc_softc *sc;
1463 sc = iflib_get_softc(ifp->if_softc);
1465 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1466 ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media;