2 * Copyright (c) 2003-2009 RMI Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * The XLR device supports upto four 10/100/1000 Ethernet MACs and upto
33 * two 10G Ethernet MACs (of XGMII). Alternatively, each 10G port can used
34 * as a SPI-4 interface, with 8 ports per such interface. The MACs are
35 * encapsulated in another hardware block referred to as network accelerator,
36 * such that there are three instances of these in a XLR. One of them controls
37 * the four 1G RGMII ports while one each of the others controls an XGMII port.
38 * Enabling MACs requires configuring the corresponding network accelerator
39 * and the individual port.
40 * The XLS device supports upto 8 10/100/1000 Ethernet MACs or max 2 10G
41 * Ethernet MACs. The 1G MACs are of SGMII and 10G MACs are of XAUI
42 * interface. These ports are part of two network accelerators.
43 * The nlge driver configures and initializes non-SPI4 Ethernet ports in the
44 * XLR/XLS devices and enables data transfer on them.
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
50 #ifdef HAVE_KERNEL_OPTION_HEADERS
51 #include "opt_device_polling.h"
54 #include <sys/endian.h>
55 #include <sys/systm.h>
56 #include <sys/sockio.h>
57 #include <sys/param.h>
59 #include <sys/mutex.h>
61 #include <sys/limits.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/module.h>
67 #include <sys/socket.h>
68 #define __RMAN_RESOURCE_VISIBLE
70 #include <sys/taskqueue.h>
72 #include <sys/sysctl.h>
75 #include <net/if_arp.h>
76 #include <net/ethernet.h>
77 #include <net/if_dl.h>
78 #include <net/if_media.h>
80 #include <net/if_types.h>
81 #include <net/if_vlan_var.h>
83 #include <netinet/in_systm.h>
84 #include <netinet/in.h>
85 #include <netinet/ip.h>
91 #include <machine/reg.h>
92 #include <machine/cpu.h>
93 #include <machine/mips_opcode.h>
94 #include <machine/asm.h>
95 #include <machine/cpuregs.h>
96 #include <machine/param.h>
97 #include <machine/intr_machdep.h>
98 #include <machine/clock.h> /* for DELAY */
99 #include <machine/bus.h>
100 #include <machine/resource.h>
102 #include <mips/rmi/interrupt.h>
103 #include <mips/rmi/msgring.h>
104 #include <mips/rmi/iomap.h>
105 #include <mips/rmi/pic.h>
106 #include <mips/rmi/board.h>
107 #include <mips/rmi/rmi_mips_exts.h>
108 #include <mips/rmi/rmi_boot_info.h>
109 #include <mips/rmi/dev/xlr/atx_cpld.h>
110 #include <mips/rmi/dev/xlr/xgmac_mdio.h>
112 #include <dev/mii/mii.h>
113 #include <dev/mii/miivar.h>
115 #include <dev/mii/brgphyreg.h>
116 #include "miibus_if.h"
118 #include <mips/rmi/dev/nlge/if_nlge.h>
120 MODULE_DEPEND(nlna, nlge, 1, 1, 1);
121 MODULE_DEPEND(nlge, ether, 1, 1, 1);
122 MODULE_DEPEND(nlge, miibus, 1, 1, 1);
124 /* Network accelarator entry points */
125 static int nlna_probe(device_t);
126 static int nlna_attach(device_t);
127 static int nlna_detach(device_t);
128 static int nlna_suspend(device_t);
129 static int nlna_resume(device_t);
130 static int nlna_shutdown(device_t);
132 /* GMAC port entry points */
133 static int nlge_probe(device_t);
134 static int nlge_attach(device_t);
135 static int nlge_detach(device_t);
136 static int nlge_suspend(device_t);
137 static int nlge_resume(device_t);
138 static void nlge_init(void *);
139 static int nlge_ioctl(struct ifnet *, u_long, caddr_t);
140 static int nlge_tx(struct ifnet *ifp, struct mbuf *m);
141 static void nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len);
143 static int nlge_mii_write(struct device *, int, int, int);
144 static int nlge_mii_read(struct device *, int, int);
145 static void nlge_mac_mii_statchg(device_t);
146 static int nlge_mediachange(struct ifnet *ifp);
147 static void nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr);
149 /* Other internal/helper functions */
150 static void *get_buf(void);
152 static void nlna_add_to_port_set(struct nlge_port_set *pset,
153 struct nlge_softc *sc);
154 static void nlna_config_pde(struct nlna_softc *);
155 static void nlna_config_parser(struct nlna_softc *);
156 static void nlna_config_classifier(struct nlna_softc *);
157 static void nlna_config_fifo_spill_area(struct nlna_softc *sc);
158 static void nlna_config_translate_table(struct nlna_softc *sc);
159 static void nlna_config_common(struct nlna_softc *);
160 static void nlna_disable_ports(struct nlna_softc *sc);
161 static void nlna_enable_intr(struct nlna_softc *sc);
162 static void nlna_disable_intr(struct nlna_softc *sc);
163 static void nlna_enable_ports(struct nlna_softc *sc);
164 static void nlna_get_all_softc(device_t iodi_dev,
165 struct nlna_softc **sc_vec, uint32_t vec_sz);
166 static void nlna_hw_init(struct nlna_softc *sc);
167 static int nlna_is_last_active_na(struct nlna_softc *sc);
168 static void nlna_media_specific_config(struct nlna_softc *sc);
169 static void nlna_reset_ports(struct nlna_softc *sc,
170 struct xlr_gmac_block_t *blk);
171 static struct nlna_softc *nlna_sc_init(device_t dev,
172 struct xlr_gmac_block_t *blk);
173 static void nlna_setup_intr(struct nlna_softc *sc);
174 static void nlna_smp_update_pde(void *dummy __unused);
175 static void nlna_submit_rx_free_desc(struct nlna_softc *sc,
178 static int nlge_gmac_config_speed(struct nlge_softc *, int quick);
179 static void nlge_hw_init(struct nlge_softc *sc);
180 static int nlge_if_init(struct nlge_softc *sc);
181 static void nlge_intr(void *arg);
182 static int nlge_irq_init(struct nlge_softc *sc);
183 static void nlge_irq_fini(struct nlge_softc *sc);
184 static void nlge_media_specific_init(struct nlge_softc *sc);
185 static void nlge_mii_init(device_t dev, struct nlge_softc *sc);
186 static int nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr,
188 static void nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr,
189 int regidx, int regval);
190 void nlge_msgring_handler(int bucket, int size, int code,
191 int stid, struct msgrng_msg *msg, void *data);
192 static void nlge_port_disable(struct nlge_softc *sc);
193 static void nlge_port_enable(struct nlge_softc *sc);
194 static void nlge_read_mac_addr(struct nlge_softc *sc);
195 static void nlge_sc_init(struct nlge_softc *sc, device_t dev,
196 struct xlr_gmac_port *port_info);
197 static void nlge_set_mac_addr(struct nlge_softc *sc);
198 static void nlge_set_port_attribs(struct nlge_softc *,
199 struct xlr_gmac_port *);
200 static void nlge_mac_set_rx_mode(struct nlge_softc *sc);
201 static void nlge_sgmii_init(struct nlge_softc *sc);
202 static int nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc,
205 static int prepare_fmn_message(struct nlge_softc *sc,
206 struct msgrng_msg *msg, uint32_t *n_entries, struct mbuf *m_head,
207 uint64_t fr_stid, struct nlge_tx_desc **tx_desc);
209 static void release_tx_desc(vm_paddr_t phy_addr);
210 static int send_fmn_msg_tx(struct nlge_softc *, struct msgrng_msg *,
215 static int mac_debug = 1;
217 #define PDEBUG(fmt, args...) \
220 printf("[%s@%d|%s]: cpu_%d: " fmt, \
221 __FILE__, __LINE__, __FUNCTION__, PCPU_GET(cpuid), ##args);\
225 /* Debug/dump functions */
226 static void dump_reg(xlr_reg_t *addr, uint32_t offset, char *name);
227 static void dump_gmac_registers(struct nlge_softc *);
228 static void dump_na_registers(xlr_reg_t *base, int port_id);
229 static void dump_mac_stats(struct nlge_softc *sc);
230 static void dump_mii_regs(struct nlge_softc *sc) __attribute__((used));
231 static void dump_mii_data(struct mii_data *mii) __attribute__((used));
232 static void dump_board_info(struct xlr_board_info *);
233 static void dump_pcs_regs(struct nlge_softc *sc, int phy);
237 #define PDEBUG(fmt, args...)
238 #define dump_reg(a, o, n) /* nop */
239 #define dump_gmac_registers(a) /* nop */
240 #define dump_na_registers(a, p) /* nop */
241 #define dump_board_info(b) /* nop */
242 #define dump_mac_stats(sc) /* nop */
243 #define dump_mii_regs(sc) /* nop */
244 #define dump_mii_data(mii) /* nop */
245 #define dump_pcs_regs(sc, phy) /* nop */
248 /* Wrappers etc. to export the driver entry points. */
249 static device_method_t nlna_methods[] = {
250 /* Device interface */
251 DEVMETHOD(device_probe, nlna_probe),
252 DEVMETHOD(device_attach, nlna_attach),
253 DEVMETHOD(device_detach, nlna_detach),
254 DEVMETHOD(device_shutdown, nlna_shutdown),
255 DEVMETHOD(device_suspend, nlna_suspend),
256 DEVMETHOD(device_resume, nlna_resume),
258 /* bus interface : TBD : what are these for ? */
259 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
260 DEVMETHOD(bus_print_child, bus_generic_print_child),
261 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
266 static driver_t nlna_driver = {
269 sizeof(struct nlna_softc)
272 static devclass_t nlna_devclass;
274 static device_method_t nlge_methods[] = {
275 /* Device interface */
276 DEVMETHOD(device_probe, nlge_probe),
277 DEVMETHOD(device_attach, nlge_attach),
278 DEVMETHOD(device_detach, nlge_detach),
279 DEVMETHOD(device_shutdown, bus_generic_shutdown),
280 DEVMETHOD(device_suspend, nlge_suspend),
281 DEVMETHOD(device_resume, nlge_resume),
284 DEVMETHOD(miibus_readreg, nlge_mii_read),
285 DEVMETHOD(miibus_writereg, nlge_mii_write),
286 DEVMETHOD(miibus_statchg, nlge_mac_mii_statchg),
291 static driver_t nlge_driver = {
294 sizeof(struct nlge_softc)
297 static devclass_t nlge_devclass;
299 DRIVER_MODULE(nlna, iodi, nlna_driver, nlna_devclass, 0, 0);
300 DRIVER_MODULE(nlge, nlna, nlge_driver, nlge_devclass, 0, 0);
301 DRIVER_MODULE(miibus, nlge, miibus_driver, miibus_devclass, 0, 0);
303 static uma_zone_t nl_tx_desc_zone;
306 static int flow_classification = 0;
307 TUNABLE_INT("hw.nlge.flow_classification", &flow_classification);
309 #define NLGE_HW_CHKSUM 1
312 atomic_incr_long(unsigned long *addr)
314 /* XXX: fix for 64 bit */
315 unsigned int *iaddr = (unsigned int *)addr;
317 xlr_ldaddwu(1, iaddr);
321 nlna_probe(device_t dev)
323 return (BUS_PROBE_DEFAULT);
327 * Add all attached GMAC/XGMAC ports to the device tree. Port
328 * configuration is spread in two regions - common configuration
329 * for all ports in the NA and per-port configuration in MAC-specific
330 * region. This function does the following:
331 * - adds the ports to the device tree
333 * - do all the common initialization
334 * - invoke bus_generic_attach for per-port configuration
335 * - supply initial free rx descriptors to ports
336 * - initialize s/w data structures
337 * - finally, enable interrupts (only in the last NA).
339 * For reference, sample address space for common and per-port
340 * registers is given below.
342 * The address map for RNA0 is: (typical value)
344 * XLR_IO_BASE +--------------------------------------+ 0xbef0_0000
351 * GMAC0 ---> +--------------------------------------+ 0xbef0_c000
354 * (common) -> |......................................| 0xbef0_c400
356 * | (RGMII/SGMII: common registers) |
358 * GMAC1 ---> |--------------------------------------| 0xbef0_d000
361 * (common) -> |......................................| 0xbef0_d400
363 * | (RGMII/SGMII: common registers) |
365 * |......................................|
368 * Ref: Figure 14-3 and Table 14-1 of XLR PRM
371 nlna_attach(device_t dev)
373 struct xlr_gmac_block_t *block_info;
375 struct nlna_softc *sc;
380 id = device_get_unit(dev);
381 block_info = device_get_ivars(dev);
382 if (!block_info->enabled) {
387 dump_board_info(&xlr_board_info);
389 /* Initialize nlna state in softc structure */
390 sc = nlna_sc_init(dev, block_info);
392 /* Add device's for the ports controlled by this NA. */
393 if (block_info->type == XLR_GMAC) {
394 KASSERT(id < 2, ("No GMACs supported with this network"
395 "accelerator: %d", id));
396 for (i = 0; i < sc->num_ports; i++) {
397 gmac_dev = device_add_child(dev, "nlge", -1);
398 device_set_ivars(gmac_dev, &block_info->gmac_port[i]);
400 } else if (block_info->type == XLR_XGMAC) {
401 KASSERT(id > 0 && id <= 2, ("No XGMACs supported with this"
402 "network accelerator: %d", id));
403 gmac_dev = device_add_child(dev, "nlge", -1);
404 device_set_ivars(gmac_dev, &block_info->gmac_port[0]);
405 } else if (block_info->type == XLR_SPI4) {
406 /* SPI4 is not supported here */
407 device_printf(dev, "Unsupported: NA with SPI4 type");
411 nlna_reset_ports(sc, block_info);
413 /* Initialize Network Accelarator registers. */
416 error = bus_generic_attach(dev);
418 device_printf(dev, "failed to attach port(s)\n");
422 /* Send out the initial pool of free-descriptors for the rx path */
423 nlna_submit_rx_free_desc(sc, MAX_FRIN_SPILL);
425 /* S/w data structure initializations shared by all NA's. */
426 if (nl_tx_desc_zone == NULL) {
427 /* Create a zone for allocating tx descriptors */
428 nl_tx_desc_zone = uma_zcreate("NL Tx Desc",
429 sizeof(struct nlge_tx_desc), NULL, NULL, NULL, NULL,
430 XLR_CACHELINE_SIZE, 0);
433 /* Enable NA interrupts */
443 nlna_detach(device_t dev)
445 struct nlna_softc *sc;
447 sc = device_get_softc(dev);
448 if (device_is_alive(dev)) {
449 nlna_disable_intr(sc);
450 /* This will make sure that per-port detach is complete
451 * and all traffic on the ports has been stopped. */
452 bus_generic_detach(dev);
453 uma_zdestroy(nl_tx_desc_zone);
460 nlna_suspend(device_t dev)
467 nlna_resume(device_t dev)
474 nlna_shutdown(device_t dev)
480 /* GMAC port entry points */
482 nlge_probe(device_t dev)
484 struct nlge_softc *sc;
485 struct xlr_gmac_port *port_info;
487 char *desc[] = { "RGMII", "SGMII", "RGMII/SGMII", "XGMAC", "XAUI",
490 port_info = device_get_ivars(dev);
491 index = (port_info->type < XLR_RGMII || port_info->type > XLR_XAUI) ?
493 device_set_desc_copy(dev, desc[index]);
495 sc = device_get_softc(dev);
496 nlge_sc_init(sc, dev, port_info);
498 nlge_port_disable(sc);
504 nlge_attach(device_t dev)
506 struct nlge_softc *sc;
507 struct nlna_softc *nsc;
510 sc = device_get_softc(dev);
513 nlge_mii_init(dev, sc);
514 error = nlge_irq_init(sc);
519 nsc = (struct nlna_softc *)device_get_softc(device_get_parent(dev));
520 nsc->child_sc[sc->instance] = sc;
526 nlge_detach(device_t dev)
528 struct nlge_softc *sc;
531 sc = device_get_softc(dev);
534 if (device_is_attached(dev)) {
535 nlge_port_disable(sc);
538 bus_generic_detach(dev);
547 nlge_suspend(device_t dev)
553 nlge_resume(device_t dev)
559 nlge_init(void *addr)
561 struct nlge_softc *sc;
564 sc = (struct nlge_softc *)addr;
567 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
570 nlge_gmac_config_speed(sc, 1);
571 ifp->if_drv_flags |= IFF_DRV_RUNNING;
572 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
573 nlge_port_enable(sc);
575 if (sc->port_type == XLR_SGMII) {
576 dump_pcs_regs(sc, 27);
578 dump_gmac_registers(sc);
583 nlge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
585 struct mii_data *mii;
586 struct nlge_softc *sc;
592 ifr = (struct ifreq *)data;
597 if (ifp->if_flags & IFF_UP) {
598 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
601 if (ifp->if_flags & IFF_PROMISC &&
602 !(sc->if_flags & IFF_PROMISC)) {
603 sc->if_flags |= IFF_PROMISC;
604 nlge_mac_set_rx_mode(sc);
605 } else if (!(ifp->if_flags & IFF_PROMISC) &&
606 sc->if_flags & IFF_PROMISC) {
607 sc->if_flags &= IFF_PROMISC;
608 nlge_mac_set_rx_mode(sc);
611 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
612 nlge_port_disable(sc);
615 sc->if_flags = ifp->if_flags;
622 if (sc->mii_bus != NULL) {
623 mii = (struct mii_data *)device_get_softc(sc->mii_bus);
624 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
630 error = ether_ioctl(ifp, command, data);
637 /* This function is called from an interrupt handler */
639 nlge_msgring_handler(int bucket, int size, int code, int stid,
640 struct msgrng_msg *msg, void *data)
642 struct nlna_softc *na_sc;
643 struct nlge_softc *sc;
646 vm_paddr_t phys_addr;
655 length = (msg->msg0 >> 40) & 0x3fff;
656 na_sc = (struct nlna_softc *)data;
658 ctrl = CTRL_REG_FREE;
659 phys_addr = msg->msg0 & 0xffffffffffULL;
660 port = (msg->msg0 >> 54) & 0x0f;
661 is_p2p = (msg->msg0 >> 62) & 0x1;
662 tx_error = (msg->msg0 >> 58) & 0xf;
665 phys_addr = msg->msg0 & 0xffffffffe0ULL;
666 length = length - BYTE_OFFSET - MAC_CRC_LEN;
667 port = msg->msg0 & 0x0f;
670 sc = na_sc->child_sc[port];
672 printf("Message (of %d len) with softc=NULL on %d port (type=%s)\n",
673 length, port, (ctrl == CTRL_SNGL ? "Pkt rx" :
674 "Freeback for tx packet"));
678 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
682 release_tx_desc(phys_addr);
685 m = (struct mbuf *)(uintptr_t)xlr_paddr_ld(phys_addr);
688 m = (struct mbuf *)(uintptr_t)phys_addr;
693 if (ifp->if_drv_flags & IFF_DRV_OACTIVE){
694 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
698 printf("ERROR: Tx fb error (%d) on port %d\n", tx_error,
701 atomic_incr_long((tx_error) ? &ifp->if_oerrors: &ifp->if_opackets);
702 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
705 nlge_rx(sc, phys_addr, length);
706 nlna_submit_rx_free_desc(na_sc, 1); /* return free descr to NA */
708 printf("[%s]: unrecognized ctrl=%d!\n", __func__, ctrl);
714 nlge_tx(struct ifnet *ifp, struct mbuf *m)
716 return (nlge_start_locked(ifp, ifp->if_softc, m));
720 nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc, struct mbuf *m)
722 struct msgrng_msg msg;
723 struct nlge_tx_desc *tx_desc;
735 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) ||
736 ifp->if_drv_flags & IFF_DRV_OACTIVE) {
738 goto fail; // note: mbuf will get free'd
743 /* H/w threads [0, 2] --> bucket 6 and [1, 3] --> bucket 7 */
744 fr_stid = cpu * 8 + 6 + (tid % 2);
747 * First, remove some freeback messages before transmitting
748 * any new packets. However, cap the number of messages
749 * drained to permit this thread to continue with its
752 * Mask for buckets {6, 7} is 0xc0
754 xlr_msgring_handler(0xc0, 4);
756 ret = prepare_fmn_message(sc, &msg, &n_entries, m, fr_stid, &tx_desc);
758 error = (ret == 2) ? ENOBUFS : ENOTSUP;
761 ret = send_fmn_msg_tx(sc, &msg, n_entries);
770 if (tx_desc != NULL) {
771 uma_zfree(nl_tx_desc_zone, tx_desc);
774 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
776 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
780 atomic_incr_long(&ifp->if_iqdrops);
786 nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len)
793 sr = xlr_enable_kx();
794 tm = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
795 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
798 m = (struct mbuf *)(intptr_t)tm;
799 if (mag != 0xf00bad) {
800 /* somebody else's packet. Error - FIXME in intialization */
801 printf("cpu %d: *ERROR* Not my packet paddr %jx\n",
802 xlr_core_id(), (uintmax_t)paddr);
808 #ifdef NLGE_HW_CHKSUM
809 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
810 if (m->m_data[10] & 0x2) {
811 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
812 if (m->m_data[10] & 0x1) {
813 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
815 m->m_pkthdr.csum_data = htons(0xffff);
818 m->m_data += NLGE_PREPAD_LEN;
819 len -= NLGE_PREPAD_LEN;
821 m->m_pkthdr.csum_flags = 0;
825 m->m_data += BYTE_OFFSET ;
826 m->m_pkthdr.len = m->m_len = len;
827 m->m_pkthdr.rcvif = ifp;
829 atomic_incr_long(&ifp->if_ipackets);
830 (*ifp->if_input)(ifp, m);
834 nlge_mii_write(struct device *dev, int phyaddr, int regidx, int regval)
836 struct nlge_softc *sc;
838 sc = device_get_softc(dev);
839 if (sc->port_type != XLR_XGMII)
840 nlge_mii_write_internal(sc->mii_base, phyaddr, regidx, regval);
846 nlge_mii_read(struct device *dev, int phyaddr, int regidx)
848 struct nlge_softc *sc;
851 sc = device_get_softc(dev);
852 val = (sc->port_type == XLR_XGMII) ? (0xffff) :
853 nlge_mii_read_internal(sc->mii_base, phyaddr, regidx);
859 nlge_mac_mii_statchg(device_t dev)
864 nlge_mediachange(struct ifnet *ifp)
870 nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
872 struct nlge_softc *sc;
878 md = device_get_softc(sc->mii_bus);
880 ifmr->ifm_status = IFM_AVALID;
881 ifmr->ifm_active = IFM_ETHER;
883 if (sc->link == xlr_mac_link_down)
887 ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media;
888 ifmr->ifm_status |= IFM_ACTIVE;
891 static struct nlna_softc *
892 nlna_sc_init(device_t dev, struct xlr_gmac_block_t *blk)
894 struct nlna_softc *sc;
896 sc = device_get_softc(dev);
897 memset(sc, 0, sizeof(*sc));
899 sc->base = xlr_io_mmio(blk->baseaddr);
900 sc->rfrbucket = blk->station_rfr;
901 sc->station_id = blk->station_id;
902 sc->na_type = blk->type;
903 sc->mac_type = blk->mode;
904 sc->num_ports = blk->num_ports;
906 sc->mdio_set.port_vec = sc->mdio_sc;
907 sc->mdio_set.vec_sz = XLR_MAX_MACS;
914 * - Initialize common GMAC registers (index range 0x100-0x3ff).
917 nlna_hw_init(struct nlna_softc *sc)
921 * Register message ring handler for the NA block, messages from
922 * the GMAC will have source station id to the first bucket of the
923 * NA FMN station, so register just that station id.
925 if (register_msgring_handler(sc->station_id, sc->station_id + 1,
926 nlge_msgring_handler, sc)) {
927 panic("Couldn't register msgring handler\n");
929 nlna_config_fifo_spill_area(sc);
931 nlna_config_common(sc);
932 nlna_config_parser(sc);
933 nlna_config_classifier(sc);
937 * Enable interrupts on all the ports controlled by this NA. For now, we
938 * only care about the MII interrupt and this has to be enabled only
941 * This function is not in-sync with the regular way of doing things - it
942 * executes only in the context of the last active network accelerator (and
943 * thereby has some ugly accesses in the device tree). Though inelegant, it
944 * is necessary to do it this way as the per-port interrupts can be
945 * setup/enabled only after all the network accelerators have been
949 nlna_setup_intr(struct nlna_softc *sc)
951 struct nlna_softc *na_sc[XLR_MAX_NLNA];
952 struct nlge_port_set *pset;
953 struct xlr_gmac_port *port_info;
957 if (!nlna_is_last_active_na(sc))
960 /* Collect all nlna softc pointers */
961 memset(na_sc, 0, sizeof(*na_sc) * XLR_MAX_NLNA);
962 iodi_dev = device_get_parent(sc->nlna_dev);
963 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
965 /* Setup the MDIO interrupt lists. */
967 * MDIO interrupts are coarse - a single interrupt line provides
968 * information about one of many possible ports. To figure out the
969 * exact port on which action is to be taken, all of the ports
970 * linked to an MDIO interrupt should be read. To enable this,
971 * ports need to add themselves to port sets.
973 for (i = 0; i < XLR_MAX_NLNA; i++) {
974 if (na_sc[i] == NULL)
976 for (j = 0; j < na_sc[i]->num_ports; j++) {
977 /* processing j-th port on i-th NA */
978 port_info = device_get_ivars(
979 na_sc[i]->child_sc[j]->nlge_dev);
980 pset = &na_sc[port_info->mdint_id]->mdio_set;
981 nlna_add_to_port_set(pset, na_sc[i]->child_sc[j]);
985 /* Enable interrupts */
986 for (i = 0; i < XLR_MAX_NLNA; i++) {
987 if (na_sc[i] != NULL && na_sc[i]->na_type != XLR_XGMAC) {
988 nlna_enable_intr(na_sc[i]);
994 nlna_add_to_port_set(struct nlge_port_set *pset, struct nlge_softc *sc)
998 /* step past the non-NULL elements */
999 for (i = 0; i < pset->vec_sz && pset->port_vec[i] != NULL; i++) ;
1000 if (i < pset->vec_sz)
1001 pset->port_vec[i] = sc;
1003 printf("warning: internal error: out-of-bounds for MDIO array");
1007 nlna_enable_intr(struct nlna_softc *sc)
1011 for (i = 0; i < sc->num_ports; i++) {
1012 if (sc->child_sc[i]->instance == 0)
1013 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK,
1014 (1 << O_INTMASK__MDInt));
1019 nlna_disable_intr(struct nlna_softc *sc)
1023 for (i = 0; i < sc->num_ports; i++) {
1024 if (sc->child_sc[i]->instance == 0)
1025 NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK, 0);
1030 nlna_is_last_active_na(struct nlna_softc *sc)
1034 id = device_get_unit(sc->nlna_dev);
1035 return (id == 2 || xlr_board_info.gmac_block[id + 1].enabled == 0);
1039 nlna_submit_rx_free_desc(struct nlna_softc *sc, uint32_t n_desc)
1041 struct msgrng_msg msg;
1043 uint32_t msgrng_flags;
1044 int i, n, stid, ret, code;
1047 PDEBUG("Sending %d free-in descriptors to station=%d\n", n_desc,
1051 stid = sc->rfrbucket;
1052 code = (sc->na_type == XLR_XGMAC) ? MSGRNG_CODE_XGMAC : MSGRNG_CODE_MAC;
1053 memset(&msg, 0, sizeof(msg));
1055 for (i = 0; i < n_desc; i++) {
1059 device_printf(sc->nlna_dev, "Cannot allocate mbuf\n");
1063 /* Send the free Rx desc to the MAC */
1064 msg.msg0 = vtophys(ptr) & 0xffffffffe0ULL;
1067 msgrng_flags = msgrng_access_enable();
1068 ret = message_send(1, code, stid, &msg);
1069 msgrng_restore(msgrng_flags);
1070 KASSERT(n++ < 100000, ("Too many credit fails in rx path\n"));
1075 static __inline__ void *
1076 nlna_config_spill(xlr_reg_t *base, int reg_start_0, int reg_start_1,
1077 int reg_size, int size)
1081 uint32_t spill_size;
1084 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
1085 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
1086 if (spill == NULL || ((vm_offset_t) spill & (XLR_CACHELINE_SIZE - 1))) {
1087 panic("Unable to allocate memory for spill area!\n");
1089 phys_addr = vtophys(spill);
1090 PDEBUG("Allocated spill %d bytes at %llx\n", size, phys_addr);
1091 NLGE_WRITE(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
1092 NLGE_WRITE(base, reg_start_1, (phys_addr >> 37) & 0x07);
1093 NLGE_WRITE(base, reg_size, spill_size);
1099 * Configure the 6 FIFO's that are used by the network accelarator to
1100 * communicate with the rest of the XLx device. 4 of the FIFO's are for
1101 * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
1102 * the NA with free descriptors.
1105 nlna_config_fifo_spill_area(struct nlna_softc *sc)
1107 sc->frin_spill = nlna_config_spill(sc->base,
1108 R_REG_FRIN_SPILL_MEM_START_0,
1109 R_REG_FRIN_SPILL_MEM_START_1,
1110 R_REG_FRIN_SPILL_MEM_SIZE,
1112 sizeof(struct fr_desc));
1113 sc->frout_spill = nlna_config_spill(sc->base,
1114 R_FROUT_SPILL_MEM_START_0,
1115 R_FROUT_SPILL_MEM_START_1,
1116 R_FROUT_SPILL_MEM_SIZE,
1118 sizeof(struct fr_desc));
1119 sc->class_0_spill = nlna_config_spill(sc->base,
1120 R_CLASS0_SPILL_MEM_START_0,
1121 R_CLASS0_SPILL_MEM_START_1,
1122 R_CLASS0_SPILL_MEM_SIZE,
1124 sizeof(union rx_tx_desc));
1125 sc->class_1_spill = nlna_config_spill(sc->base,
1126 R_CLASS1_SPILL_MEM_START_0,
1127 R_CLASS1_SPILL_MEM_START_1,
1128 R_CLASS1_SPILL_MEM_SIZE,
1130 sizeof(union rx_tx_desc));
1131 sc->class_2_spill = nlna_config_spill(sc->base,
1132 R_CLASS2_SPILL_MEM_START_0,
1133 R_CLASS2_SPILL_MEM_START_1,
1134 R_CLASS2_SPILL_MEM_SIZE,
1136 sizeof(union rx_tx_desc));
1137 sc->class_3_spill = nlna_config_spill(sc->base,
1138 R_CLASS3_SPILL_MEM_START_0,
1139 R_CLASS3_SPILL_MEM_START_1,
1140 R_CLASS3_SPILL_MEM_SIZE,
1142 sizeof(union rx_tx_desc));
1145 /* Set the CPU buckets that receive packets from the NA class FIFOs. */
1147 nlna_config_pde(struct nlna_softc *sc)
1149 uint64_t bucket_map;
1156 * rge may be called before SMP start in a BOOTP/NFSROOT
1157 * setup. we will distribute packets to other cpus only when
1158 * the SMP is started.
1161 cpumask = xlr_hw_thread_mask;
1164 for (i = 0; i < 32; i++) {
1165 if (cpumask & (1 << i)) {
1167 /* use bucket 0 and 1 on every core for NA msgs */
1169 bucket_map |= (3ULL << bucket);
1173 NLGE_WRITE(sc->base, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
1174 NLGE_WRITE(sc->base, R_PDE_CLASS_0 + 1, ((bucket_map >> 32) & 0xffffffff));
1176 NLGE_WRITE(sc->base, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
1177 NLGE_WRITE(sc->base, R_PDE_CLASS_1 + 1, ((bucket_map >> 32) & 0xffffffff));
1179 NLGE_WRITE(sc->base, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
1180 NLGE_WRITE(sc->base, R_PDE_CLASS_2 + 1, ((bucket_map >> 32) & 0xffffffff));
1182 NLGE_WRITE(sc->base, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
1183 NLGE_WRITE(sc->base, R_PDE_CLASS_3 + 1, ((bucket_map >> 32) & 0xffffffff));
1187 * Update the network accelerator packet distribution engine for SMP.
1188 * On bootup, we have just the boot hw thread handling all packets, on SMP
1189 * start, we can start distributing packets across all the cores which are up.
1192 nlna_smp_update_pde(void *dummy __unused)
1195 struct nlna_softc *na_sc[XLR_MAX_NLNA];
1198 printf("Updating packet distribution for SMP\n");
1200 iodi_dev = devclass_get_device(devclass_find("iodi"), 0);
1201 nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA);
1203 for (i = 0; i < XLR_MAX_NLNA; i++) {
1204 if (na_sc[i] == NULL)
1206 nlna_disable_ports(na_sc[i]);
1207 nlna_config_pde(na_sc[i]);
1208 nlna_config_translate_table(na_sc[i]);
1209 nlna_enable_ports(na_sc[i]);
1213 SYSINIT(nlna_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, nlna_smp_update_pde,
1217 nlna_config_translate_table(struct nlna_softc *sc)
1221 int bkts[32]; /* one bucket is assumed for each cpu */
1222 int b1, b2, c1, c2, i, j, k;
1225 if (!flow_classification)
1230 cpu_mask = xlr_hw_thread_mask;
1234 printf("Using %s-based distribution\n", (use_bkt) ? "bucket" : "class");
1237 for(i = 0; i < 32; i++) {
1238 if ((1 << i) & cpu_mask){
1239 /* for each cpu, mark the 4+threadid bucket */
1240 bkts[j] = ((i / 4) * 8) + (i % 4);
1245 /*configure the 128 * 9 Translation table to send to available buckets*/
1249 for(i = 0; i < 64; i++) {
1250 /* Get the next 2 pairs of (class, bucket):
1253 c1, c2 limited to {0, 1, 2, 3}
1254 i.e, the 4 classes defined by h/w
1255 b1, b2 limited to { bkts[i], where 0 <= i < j}
1256 i.e, the set of buckets computed in the
1266 PDEBUG("Translation table[%d] b1=%d b2=%d c1=%d c2=%d\n",
1268 val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
1269 (c2 << 7) | (b2 << 1) | (use_bkt << 0));
1270 NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, val);
1276 nlna_config_parser(struct nlna_softc *sc)
1281 * Mark it as ETHERNET type.
1283 NLGE_WRITE(sc->base, R_L2TYPE_0, 0x01);
1285 #ifndef NLGE_HW_CHKSUM
1286 if (!flow_classification)
1290 /* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/
1291 NLGE_WRITE(sc->base, R_PARSERCONFIGREG, ((0x7f << 8) | (1 << 1)));
1293 /* configure the parser : L2 Type is configured in the bootloader */
1294 /* extract IP: src, dest protocol */
1295 NLGE_WRITE(sc->base, R_L3CTABLE,
1296 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1298 NLGE_WRITE(sc->base, R_L3CTABLE + 1,
1299 (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) | (16 << 4) | 4);
1300 #ifdef NLGE_HW_CHKSUM
1301 device_printf(sc->nlna_dev, "Enabled h/w support to compute TCP/IP"
1305 /* Configure to extract SRC port and Dest port for TCP and UDP pkts */
1306 NLGE_WRITE(sc->base, R_L4CTABLE, 6);
1307 NLGE_WRITE(sc->base, R_L4CTABLE + 2, 17);
1308 val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7));
1309 NLGE_WRITE(sc->base, R_L4CTABLE + 1, val);
1310 NLGE_WRITE(sc->base, R_L4CTABLE + 3, val);
1314 nlna_config_classifier(struct nlna_softc *sc)
1318 if (sc->mac_type == XLR_XGMII) { /* TBD: XGMII init sequence */
1319 /* xgmac translation table doesn't have sane values on reset */
1320 for (i = 0; i < 64; i++)
1321 NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, 0x0);
1324 * use upper 7 bits of the parser extract to index the
1327 NLGE_WRITE(sc->base, R_PARSERCONFIGREG, 0x0);
1332 * Complete a bunch of h/w register initializations that are common for all the
1333 * ports controlled by a NA.
1336 nlna_config_common(struct nlna_softc *sc)
1338 struct xlr_gmac_block_t *block_info;
1339 struct stn_cc *gmac_cc_config;
1342 block_info = device_get_ivars(sc->nlna_dev);
1343 gmac_cc_config = block_info->credit_config;
1344 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1345 NLGE_WRITE(sc->base, R_CC_CPU0_0 + i,
1346 gmac_cc_config->counters[i >> 3][i & 0x07]);
1349 NLGE_WRITE(sc->base, R_MSG_TX_THRESHOLD, 3);
1351 NLGE_WRITE(sc->base, R_DMACR0, 0xffffffff);
1352 NLGE_WRITE(sc->base, R_DMACR1, 0xffffffff);
1353 NLGE_WRITE(sc->base, R_DMACR2, 0xffffffff);
1354 NLGE_WRITE(sc->base, R_DMACR3, 0xffffffff);
1355 NLGE_WRITE(sc->base, R_FREEQCARVE, 0);
1357 nlna_media_specific_config(sc);
1361 nlna_media_specific_config(struct nlna_softc *sc)
1363 struct bucket_size *bucket_sizes;
1365 bucket_sizes = xlr_board_info.bucket_sizes;
1366 switch (sc->mac_type) {
1370 NLGE_WRITE(sc->base, R_GMAC_JFR0_BUCKET_SIZE,
1371 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1372 NLGE_WRITE(sc->base, R_GMAC_RFR0_BUCKET_SIZE,
1373 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1374 NLGE_WRITE(sc->base, R_GMAC_JFR1_BUCKET_SIZE,
1375 bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1376 NLGE_WRITE(sc->base, R_GMAC_RFR1_BUCKET_SIZE,
1377 bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1379 if (sc->mac_type == XLR_XAUI) {
1380 NLGE_WRITE(sc->base, R_TXDATAFIFO0, (224 << 16));
1385 NLGE_WRITE(sc->base, R_XGS_RFR_BUCKET_SIZE,
1386 bucket_sizes->bucket[sc->rfrbucket]);
1394 nlna_reset_ports(struct nlna_softc *sc, struct xlr_gmac_block_t *blk)
1400 /* Refer Section 13.9.3 in the PRM for the reset sequence */
1402 for (i = 0; i < sc->num_ports; i++) {
1403 addr = xlr_io_mmio(blk->gmac_port[i].base_addr);
1405 /* 1. Reset RxEnable in MAC_CONFIG */
1406 switch (sc->mac_type) {
1409 NLGE_UPDATE(addr, R_MAC_CONFIG_1, 0,
1410 (1 << O_MAC_CONFIG_1__rxen));
1414 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1415 (1 << O_RX_CONTROL__RxEnable));
1418 printf("Error: Unsupported port_type=%d\n",
1422 /* 1.1 Wait for RxControl.RxHalt to be set */
1424 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1425 } while (!(rx_ctrl & 0x2));
1427 /* 2. Set the soft reset bit in RxControl */
1428 NLGE_UPDATE(addr, R_RX_CONTROL, (1 << O_RX_CONTROL__SoftReset),
1429 (1 << O_RX_CONTROL__SoftReset));
1431 /* 2.1 Wait for RxControl.SoftResetDone to be set */
1433 rx_ctrl = NLGE_READ(addr, R_RX_CONTROL);
1434 } while (!(rx_ctrl & 0x8));
1436 /* 3. Clear the soft reset bit in RxControl */
1437 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1438 (1 << O_RX_CONTROL__SoftReset));
1440 /* Turn off tx/rx on the port. */
1441 NLGE_UPDATE(addr, R_RX_CONTROL, 0,
1442 (1 << O_RX_CONTROL__RxEnable));
1443 NLGE_UPDATE(addr, R_TX_CONTROL, 0,
1444 (1 << O_TX_CONTROL__TxEnable));
1449 nlna_disable_ports(struct nlna_softc *sc)
1453 for (i = 0; i < sc->num_ports; i++) {
1454 if (sc->child_sc[i] != NULL)
1455 nlge_port_disable(sc->child_sc[i]);
1460 nlna_enable_ports(struct nlna_softc *sc)
1462 device_t nlge_dev, *devlist;
1463 struct nlge_softc *port_sc;
1466 device_get_children(sc->nlna_dev, &devlist, &numdevs);
1467 for (i = 0; i < numdevs; i++) {
1468 nlge_dev = devlist[i];
1469 if (nlge_dev == NULL)
1471 port_sc = device_get_softc(nlge_dev);
1472 if (port_sc->nlge_if->if_drv_flags & IFF_DRV_RUNNING)
1473 nlge_port_enable(port_sc);
1475 free(devlist, M_TEMP);
1479 nlna_get_all_softc(device_t iodi_dev, struct nlna_softc **sc_vec,
1485 for (i = 0; i < vec_sz; i++) {
1487 na_dev = device_find_child(iodi_dev, "nlna", i);
1489 sc_vec[i] = device_get_softc(na_dev);
1494 nlge_port_disable(struct nlge_softc *sc)
1502 port_type = sc->port_type;
1506 NLGE_UPDATE(base, R_RX_CONTROL, 0x0, 1 << O_RX_CONTROL__RxEnable);
1508 rd = NLGE_READ(base, R_RX_CONTROL);
1509 } while (!(rd & (1 << O_RX_CONTROL__RxHalt)));
1511 NLGE_UPDATE(base, R_TX_CONTROL, 0, 1 << O_TX_CONTROL__TxEnable);
1513 rd = NLGE_READ(base, R_TX_CONTROL);
1514 } while (!(rd & (1 << O_TX_CONTROL__TxIdle)));
1516 switch (port_type) {
1519 NLGE_UPDATE(base, R_MAC_CONFIG_1, 0,
1520 ((1 << O_MAC_CONFIG_1__rxen) |
1521 (1 << O_MAC_CONFIG_1__txen)));
1525 NLGE_UPDATE(base, R_XGMAC_CONFIG_1, 0,
1526 ((1 << O_XGMAC_CONFIG_1__hsttfen) |
1527 (1 << O_XGMAC_CONFIG_1__hstrfen)));
1530 panic("Unknown MAC type on port %d\n", id);
1534 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1539 nlge_port_enable(struct nlge_softc *sc)
1541 struct xlr_gmac_port *self;
1545 self = device_get_ivars(sc->nlge_dev);
1546 if (xlr_board_info.is_xls && sc->port_type == XLR_RGMII)
1547 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RGMII),
1548 (1 << O_RX_CONTROL__RGMII));
1550 NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RxEnable),
1551 (1 << O_RX_CONTROL__RxEnable));
1552 NLGE_UPDATE(base, R_TX_CONTROL,
1553 (1 << O_TX_CONTROL__TxEnable | RGE_TX_THRESHOLD_BYTES),
1554 (1 << O_TX_CONTROL__TxEnable | 0x3fff));
1555 switch (sc->port_type) {
1558 NLGE_UPDATE(base, R_MAC_CONFIG_1,
1559 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)),
1560 ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)));
1564 NLGE_UPDATE(base, R_XGMAC_CONFIG_1,
1565 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)),
1566 ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)));
1569 panic("Unknown MAC type on port %d\n", sc->id);
1574 nlge_mac_set_rx_mode(struct nlge_softc *sc)
1578 regval = NLGE_READ(sc->base, R_MAC_FILTER_CONFIG);
1580 if (sc->if_flags & IFF_PROMISC) {
1581 regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1582 (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1583 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1584 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1586 regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1587 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1590 NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG, regval);
1594 nlge_sgmii_init(struct nlge_softc *sc)
1596 xlr_reg_t *mmio_gpio;
1599 if (sc->port_type != XLR_SGMII)
1602 nlge_mii_write_internal(sc->serdes_addr, 26, 0, 0x6DB0);
1603 nlge_mii_write_internal(sc->serdes_addr, 26, 1, 0xFFFF);
1604 nlge_mii_write_internal(sc->serdes_addr, 26, 2, 0xB6D0);
1605 nlge_mii_write_internal(sc->serdes_addr, 26, 3, 0x00FF);
1606 nlge_mii_write_internal(sc->serdes_addr, 26, 4, 0x0000);
1607 nlge_mii_write_internal(sc->serdes_addr, 26, 5, 0x0000);
1608 nlge_mii_write_internal(sc->serdes_addr, 26, 6, 0x0005);
1609 nlge_mii_write_internal(sc->serdes_addr, 26, 7, 0x0001);
1610 nlge_mii_write_internal(sc->serdes_addr, 26, 8, 0x0000);
1611 nlge_mii_write_internal(sc->serdes_addr, 26, 9, 0x0000);
1612 nlge_mii_write_internal(sc->serdes_addr, 26,10, 0x0000);
1614 /* program GPIO values for serdes init parameters */
1616 mmio_gpio = xlr_io_mmio(XLR_IO_GPIO_OFFSET);
1617 xlr_write_reg(mmio_gpio, 0x20, 0x7e6802);
1618 xlr_write_reg(mmio_gpio, 0x10, 0x7104);
1622 * This kludge is needed to setup serdes (?) clock correctly on some
1625 if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI ||
1626 xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) &&
1627 xlr_boot1_info.board_minor_version == 4) {
1628 /* use 125 Mhz instead of 156.25Mhz ref clock */
1630 xlr_write_reg(mmio_gpio, 0x10, 0x7103);
1631 xlr_write_reg(mmio_gpio, 0x21, 0x7103);
1635 /* enable autoneg - more magic */
1636 phy = sc->phy_addr % 4 + 27;
1637 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x1000);
1639 nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x0200);
1644 nlge_intr(void *arg)
1646 struct nlge_port_set *pset;
1647 struct nlge_softc *sc;
1648 struct nlge_softc *port_sc;
1651 uint32_t intr_status;
1656 printf("warning: No port registered for interrupt\n");
1661 intreg = NLGE_READ(base, R_INTREG);
1662 if (intreg & (1 << O_INTREG__MDInt)) {
1663 pset = sc->mdio_pset;
1665 printf("warning: No ports for MDIO interrupt\n");
1668 for (i = 0; i < pset->vec_sz; i++) {
1669 port_sc = pset->port_vec[i];
1671 if (port_sc == NULL)
1674 /* Ack phy interrupt - clear on read*/
1675 intr_status = nlge_mii_read_internal(port_sc->mii_base,
1676 port_sc->phy_addr, 26);
1677 PDEBUG("Phy_%d: int_status=0x%08x\n", port_sc->phy_addr,
1680 if (!(intr_status & 0x8000)) {
1681 /* no interrupt for this port */
1685 if (intr_status & 0x2410) {
1686 /* update link status for port */
1687 nlge_gmac_config_speed(port_sc, 1);
1689 printf("%s: Unsupported phy interrupt"
1691 device_get_nameunit(port_sc->nlge_dev),
1697 /* Clear the NA interrupt */
1698 xlr_write_reg(base, R_INTREG, 0xffffffff);
1704 nlge_irq_init(struct nlge_softc *sc)
1706 struct resource irq_res;
1707 struct nlna_softc *na_sc;
1708 struct xlr_gmac_block_t *block_info;
1713 na_dev = device_get_parent(sc->nlge_dev);
1714 block_info = device_get_ivars(na_dev);
1716 irq_num = block_info->baseirq + sc->instance;
1717 irq_res.__r_i = (struct resource_i *)(intptr_t) (irq_num);
1718 ret = bus_setup_intr(sc->nlge_dev, &irq_res,
1719 INTR_TYPE_NET | INTR_MPSAFE, NULL, nlge_intr, sc, NULL);
1721 nlge_detach(sc->nlge_dev);
1722 device_printf(sc->nlge_dev, "couldn't set up irq: error=%d\n",
1726 PDEBUG("Setup intr for dev=%s, irq=%d\n",
1727 device_get_nameunit(sc->nlge_dev), irq_num);
1729 if (sc->instance == 0) {
1730 na_sc = device_get_softc(na_dev);
1731 sc->mdio_pset = &na_sc->mdio_set;
1737 nlge_irq_fini(struct nlge_softc *sc)
1742 nlge_hw_init(struct nlge_softc *sc)
1744 struct xlr_gmac_port *port_info;
1748 port_info = device_get_ivars(sc->nlge_dev);
1749 sc->tx_bucket_id = port_info->tx_bucket_id;
1751 /* each packet buffer is 1536 bytes */
1752 NLGE_WRITE(base, R_DESC_PACK_CTRL,
1753 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1754 #ifdef NLGE_HW_CHKSUM
1755 (1 << O_DESC_PACK_CTRL__PrePadEnable) |
1757 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1758 NLGE_WRITE(base, R_STATCTRL, ((1 << O_STATCTRL__Sten) |
1759 (1 << O_STATCTRL__ClrCnt)));
1760 NLGE_WRITE(base, R_L2ALLOCCTRL, 0xffffffff);
1761 NLGE_WRITE(base, R_INTMASK, 0);
1762 nlge_set_mac_addr(sc);
1763 nlge_media_specific_init(sc);
1767 nlge_sc_init(struct nlge_softc *sc, device_t dev,
1768 struct xlr_gmac_port *port_info)
1770 memset(sc, 0, sizeof(*sc));
1772 sc->id = device_get_unit(dev);
1773 nlge_set_port_attribs(sc, port_info);
1777 nlge_media_specific_init(struct nlge_softc *sc)
1779 struct mii_data *media;
1780 struct bucket_size *bucket_sizes;
1782 bucket_sizes = xlr_board_info.bucket_sizes;
1783 switch (sc->port_type) {
1787 NLGE_UPDATE(sc->base, R_DESC_PACK_CTRL,
1788 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset),
1789 (W_DESC_PACK_CTRL__ByteOffset <<
1790 O_DESC_PACK_CTRL__ByteOffset));
1791 NLGE_WRITE(sc->base, R_GMAC_TX0_BUCKET_SIZE + sc->instance,
1792 bucket_sizes->bucket[sc->tx_bucket_id]);
1793 if (sc->port_type != XLR_XAUI) {
1794 nlge_gmac_config_speed(sc, 1);
1796 media = (struct mii_data *)device_get_softc(
1803 NLGE_WRITE(sc->base, R_BYTEOFFSET0, 0x2);
1804 NLGE_WRITE(sc->base, R_XGMACPADCALIBRATION, 0x30);
1805 NLGE_WRITE(sc->base, R_XGS_TX0_BUCKET_SIZE,
1806 bucket_sizes->bucket[sc->tx_bucket_id]);
1814 * Read the MAC address from the XLR boot registers. All port addresses
1815 * are identical except for the lowest octet.
1818 nlge_read_mac_addr(struct nlge_softc *sc)
1822 for (i = 0, j = 40; i < ETHER_ADDR_LEN && j >= 0; i++, j-= 8)
1823 sc->dev_addr[i] = (xlr_boot1_info.mac_addr >> j) & 0xff;
1825 sc->dev_addr[i - 1] += sc->id; /* last octet is port-specific */
1829 * Write the MAC address to the XLR MAC port. Also, set the address
1830 * masks and MAC filter configuration.
1833 nlge_set_mac_addr(struct nlge_softc *sc)
1835 NLGE_WRITE(sc->base, R_MAC_ADDR0,
1836 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) |
1837 (sc->dev_addr[3] << 8) | (sc->dev_addr[2])));
1838 NLGE_WRITE(sc->base, R_MAC_ADDR0 + 1,
1839 ((sc->dev_addr[1] << 24) | (sc-> dev_addr[0] << 16)));
1841 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2, 0xffffffff);
1842 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
1843 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3, 0xffffffff);
1844 NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
1846 NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG,
1847 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1848 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1849 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
1851 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
1852 NLGE_UPDATE(sc->base, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
1857 nlge_if_init(struct nlge_softc *sc)
1865 NLGE_LOCK_INIT(sc, device_get_nameunit(dev));
1867 ifp = sc->nlge_if = if_alloc(IFT_ETHER);
1869 device_printf(dev, "can not if_alloc()\n");
1874 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1875 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1876 ifp->if_capabilities = 0;
1877 ifp->if_capenable = ifp->if_capabilities;
1878 ifp->if_ioctl = nlge_ioctl;
1879 ifp->if_init = nlge_init;
1880 ifp->if_hwassist = 0;
1881 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1882 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1883 IFQ_SET_READY(&ifp->if_snd);
1885 ifmedia_init(&sc->nlge_mii.mii_media, 0, nlge_mediachange,
1887 ifmedia_add(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1888 ifmedia_set(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1889 sc->nlge_mii.mii_media.ifm_media = sc->nlge_mii.mii_media.ifm_cur->ifm_media;
1890 nlge_read_mac_addr(sc);
1892 ether_ifattach(ifp, sc->dev_addr);
1894 /* override if_transmit : per ifnet(9), do it after if_attach */
1895 ifp->if_transmit = nlge_tx;
1902 nlge_mii_init(device_t dev, struct nlge_softc *sc)
1906 if (sc->port_type != XLR_XAUI && sc->port_type != XLR_XGMII) {
1907 NLGE_WRITE(sc->mii_base, R_MII_MGMT_CONFIG, 0x07);
1909 error = mii_attach(dev, &sc->mii_bus, sc->nlge_if, nlge_mediachange,
1910 nlge_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr, MII_OFFSET_ANY,
1913 device_printf(dev, "attaching PHYs failed\n");
1916 if (sc->mii_bus != NULL) {
1918 * Enable all MDIO interrupts in the phy. RX_ER bit seems to get
1919 * set about every 1 sec in GigE mode, ignore it for now...
1921 nlge_mii_write_internal(sc->mii_base, sc->phy_addr, 25,
1927 * Read a PHY register.
1930 * mii_base - Base address of MII
1931 * phyaddr - PHY's address
1932 * regidx = index of register to read
1935 * value read, or 0 if an error occurred.
1939 nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr, int regidx)
1943 /* setup the phy reg to be used */
1944 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1945 (phyaddr << 8) | (regidx << 0));
1946 /* Issue the read command */
1947 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND,
1948 (1 << O_MII_MGMT_COMMAND__rstat));
1950 /* poll for the read cycle to complete */
1951 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1952 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
1956 /* clear the read cycle */
1957 NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND, 0);
1959 if (i == PHY_STATUS_RETRIES) {
1960 return (0xffffffff);
1963 val = NLGE_READ(mii_base, R_MII_MGMT_STATUS);
1969 * Write a value to a PHY register.
1972 * mii_base - Base address of MII
1973 * phyaddr - PHY to use
1974 * regidx - register within the PHY
1975 * regval - data to write to register
1981 nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr, int regidx,
1986 NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS,
1987 (phyaddr << 8) | (regidx << 0));
1989 /* Write the data which starts the write cycle */
1990 NLGE_WRITE(mii_base, R_MII_MGMT_WRITE_DATA, regval);
1992 /* poll for the write cycle to complete */
1993 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
1994 if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0)
2000 * Function to optimize the use of p2d descriptors for the given PDU.
2001 * As it is on the fast-path (called during packet transmission), it
2002 * described in more detail than the initialization functions.
2004 * Input: mbuf chain (MC), pointer to fmn message
2005 * Input constraints: None
2006 * Output: FMN message to transmit the data in MC
2007 * Return values: 0 - success
2008 * 1 - MC cannot be handled (see Limitations below)
2009 * 2 - MC cannot be handled presently (maybe worth re-trying)
2010 * Other output: Number of entries filled in the FMN message
2012 * Output structure/constraints:
2013 * 1. Max 3 p2d's + 1 zero-len (ZL) p2d with virtual address of MC.
2014 * 2. 3 p2d's + 1 p2p with max 14 p2d's (ZL p2d not required in this case).
2015 * 3. Each p2d points to physically contiguous chunk of data (subject to
2016 * entire MC requiring max 17 p2d's).
2018 * 1. MC's that require more than 17 p2d's are not handled.
2019 * Benefits: MC's that require <= 3 p2d's avoid the overhead of allocating
2020 * the p2p structure. Small packets (which typically give low
2021 * performance) are expected to have a small MC that takes
2022 * advantage of this.
2025 prepare_fmn_message(struct nlge_softc *sc, struct msgrng_msg *fmn_msg,
2026 uint32_t *n_entries, struct mbuf *mbuf_chain, uint64_t fb_stn_id,
2027 struct nlge_tx_desc **tx_desc)
2030 struct nlge_tx_desc *p2p;
2035 int msg_sz, p2p_sz, len, frag_sz;
2036 /* Num entries per FMN msg is 4 for XLR/XLS */
2037 const int FMN_SZ = sizeof(*fmn_msg) / sizeof(uint64_t);
2039 msg_sz = p2p_sz = 0;
2041 cur_p2d = &fmn_msg->msg0;
2043 for (m = mbuf_chain; m != NULL; m = m->m_next) {
2044 buf = (vm_offset_t) m->m_data;
2048 if (msg_sz == (FMN_SZ - 1)) {
2049 p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT);
2054 * Save the virtual address in the descriptor,
2055 * it makes freeing easy.
2057 p2p->frag[XLR_MAX_TX_FRAGS] =
2058 (uint64_t)(vm_offset_t)p2p;
2059 cur_p2d = &p2p->frag[0];
2060 } else if (msg_sz == (FMN_SZ - 2 + XLR_MAX_TX_FRAGS)) {
2061 uma_zfree(nl_tx_desc_zone, p2p);
2064 paddr = vtophys(buf);
2065 frag_sz = PAGE_SIZE - (buf & PAGE_MASK);
2068 *cur_p2d++ = (127ULL << 54) | ((uint64_t)frag_sz << 40)
2079 printf("Zero-length mbuf chain ??\n");
2080 *n_entries = msg_sz ;
2084 /* set eop in most-recent p2d */
2085 cur_p2d[-1] |= (1ULL << 63);
2089 * On n64, we cannot store our mbuf pointer(64 bit) in the freeback
2090 * message (40bit available), so we put the mbuf in m_nextpkt and
2091 * use the physical addr of that in freeback message.
2093 mbuf_chain->m_nextpkt = mbuf_chain;
2094 fbpaddr = vtophys(&mbuf_chain->m_nextpkt);
2096 /* Careful, don't sign extend when going to 64bit */
2097 fbpaddr = (uint64_t)(uintptr_t)mbuf_chain;
2099 *cur_p2d = (1ULL << 63) | ((uint64_t)fb_stn_id << 54) | fbpaddr;
2103 paddr = vtophys(p2p);
2105 fmn_msg->msg3 = (1ULL << 62) | ((uint64_t)fb_stn_id << 54) |
2106 ((uint64_t)(p2p_sz * 8) << 40) | paddr;
2107 *n_entries = FMN_SZ;
2109 *n_entries = msg_sz + 1;
2116 send_fmn_msg_tx(struct nlge_softc *sc, struct msgrng_msg *msg,
2119 uint32_t msgrng_flags;
2124 msgrng_flags = msgrng_access_enable();
2125 ret = message_send(n_entries, MSGRNG_CODE_MAC,
2126 sc->tx_bucket_id, msg);
2127 msgrng_restore(msgrng_flags);
2131 } while (i < 100000);
2133 device_printf(sc->nlge_dev, "Too many credit fails in tx path\n");
2139 release_tx_desc(vm_paddr_t paddr)
2141 struct nlge_tx_desc *tx_desc;
2145 paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t));
2146 sr = xlr_enable_kx();
2147 vaddr = xlr_paddr_ld(paddr);
2150 tx_desc = (struct nlge_tx_desc*)(intptr_t)vaddr;
2151 uma_zfree(nl_tx_desc_zone, tx_desc);
2160 vm_paddr_t temp1, temp2;
2163 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
2165 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
2166 m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f));
2167 md = (uint64_t *)m_new->m_data;
2168 md[0] = (intptr_t)m_new; /* Back Ptr */
2170 m_adj(m_new, XLR_CACHELINE_SIZE);
2173 temp1 = vtophys((vm_offset_t) m_new->m_data);
2174 temp2 = vtophys((vm_offset_t) m_new->m_data + 1536);
2175 if ((temp1 + 1536) != temp2)
2176 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
2179 return ((void *)m_new->m_data);
2183 nlge_gmac_config_speed(struct nlge_softc *sc, int quick)
2185 struct mii_data *md;
2187 int bmsr, n_tries, max_tries;
2188 int core_ctl[] = { 0x2, 0x1, 0x0, 0x1 };
2189 int sgmii_speed[] = { SGMII_SPEED_10,
2192 SGMII_SPEED_100 }; /* default to 100Mbps */
2193 char *speed_str[] = { "10",
2196 "unknown, defaulting to 100" };
2197 int link_state = LINK_STATE_DOWN;
2199 if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII)
2204 if (sc->mii_base != NULL) {
2205 max_tries = (quick == 1) ? 100 : 4000;
2207 for (n_tries = 0; n_tries < max_tries; n_tries++) {
2208 bmsr = nlge_mii_read_internal(sc->mii_base,
2209 sc->phy_addr, MII_BMSR);
2210 if ((bmsr & BMSR_ACOMP) && (bmsr & BMSR_LINK))
2211 break; /* Auto-negotiation is complete
2216 sc->link = (bmsr == 0) ? xlr_mac_link_down : xlr_mac_link_up;
2217 sc->speed = nlge_mii_read_internal(sc->mii_base, sc->phy_addr, 28);
2218 sc->speed = (sc->speed >> 3) & 0x03;
2219 if (sc->link == xlr_mac_link_up) {
2220 link_state = LINK_STATE_UP;
2221 nlge_sgmii_init(sc);
2224 md = (struct mii_data *)device_get_softc(sc->mii_bus);
2227 if (sc->port_type != XLR_RGMII)
2228 NLGE_WRITE(mmio, R_INTERFACE_CONTROL, sgmii_speed[sc->speed]);
2229 if (sc->speed == xlr_mac_speed_10 || sc->speed == xlr_mac_speed_100 ||
2230 sc->speed == xlr_mac_speed_rsvd) {
2231 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7117);
2232 } else if (sc->speed == xlr_mac_speed_1000) {
2233 NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7217);
2235 ifmedia_set(&md->mii_media, IFM_MAKEWORD(IFM_ETHER,
2236 IFM_1000_T, IFM_FDX, md->mii_instance));
2239 NLGE_WRITE(mmio, R_CORECONTROL, core_ctl[sc->speed]);
2240 if_link_state_change(sc->nlge_if, link_state);
2241 printf("%s: [%sMbps]\n", device_get_nameunit(sc->nlge_dev),
2242 speed_str[sc->speed]);
2248 * This function is called for each port that was added to the device tree
2249 * and it initializes the following port attributes:
2251 * - base (base address to access port-specific registers)
2256 nlge_set_port_attribs(struct nlge_softc *sc,
2257 struct xlr_gmac_port *port_info)
2259 sc->instance = port_info->instance % 4; /* TBD: will not work for SPI-4 */
2260 sc->port_type = port_info->type;
2261 sc->base = xlr_io_mmio(port_info->base_addr);
2262 sc->mii_base = xlr_io_mmio(port_info->mii_addr);
2263 if (port_info->pcs_addr != 0)
2264 sc->pcs_addr = xlr_io_mmio(port_info->pcs_addr);
2265 if (port_info->serdes_addr != 0)
2266 sc->serdes_addr = xlr_io_mmio(port_info->serdes_addr);
2267 sc->phy_addr = port_info->phy_addr;
2269 PDEBUG("Port%d: base=%p, mii_base=%p, phy_addr=%d\n", sc->id, sc->base,
2270 sc->mii_base, sc->phy_addr);
2273 /* ------------------------------------------------------------------------ */
2275 /* Debug dump functions */
2280 dump_reg(xlr_reg_t *base, uint32_t offset, char *name)
2284 val = NLGE_READ(base, offset);
2285 printf("%-30s: 0x%8x 0x%8x\n", name, offset, val);
2288 #define STRINGIFY(x) #x
2291 dump_na_registers(xlr_reg_t *base_addr, int port_id)
2293 PDEBUG("Register dump for NA (of port=%d)\n", port_id);
2294 dump_reg(base_addr, R_PARSERCONFIGREG, STRINGIFY(R_PARSERCONFIGREG));
2295 PDEBUG("Tx bucket sizes\n");
2296 dump_reg(base_addr, R_GMAC_JFR0_BUCKET_SIZE,
2297 STRINGIFY(R_GMAC_JFR0_BUCKET_SIZE));
2298 dump_reg(base_addr, R_GMAC_RFR0_BUCKET_SIZE,
2299 STRINGIFY(R_GMAC_RFR0_BUCKET_SIZE));
2300 dump_reg(base_addr, R_GMAC_TX0_BUCKET_SIZE,
2301 STRINGIFY(R_GMAC_TX0_BUCKET_SIZE));
2302 dump_reg(base_addr, R_GMAC_TX1_BUCKET_SIZE,
2303 STRINGIFY(R_GMAC_TX1_BUCKET_SIZE));
2304 dump_reg(base_addr, R_GMAC_TX2_BUCKET_SIZE,
2305 STRINGIFY(R_GMAC_TX2_BUCKET_SIZE));
2306 dump_reg(base_addr, R_GMAC_TX3_BUCKET_SIZE,
2307 STRINGIFY(R_GMAC_TX3_BUCKET_SIZE));
2308 dump_reg(base_addr, R_GMAC_JFR1_BUCKET_SIZE,
2309 STRINGIFY(R_GMAC_JFR1_BUCKET_SIZE));
2310 dump_reg(base_addr, R_GMAC_RFR1_BUCKET_SIZE,
2311 STRINGIFY(R_GMAC_RFR1_BUCKET_SIZE));
2312 dump_reg(base_addr, R_TXDATAFIFO0, STRINGIFY(R_TXDATAFIFO0));
2313 dump_reg(base_addr, R_TXDATAFIFO1, STRINGIFY(R_TXDATAFIFO1));
2317 dump_gmac_registers(struct nlge_softc *sc)
2319 xlr_reg_t *base_addr = sc->base;
2320 int port_id = sc->instance;
2322 PDEBUG("Register dump for port=%d\n", port_id);
2323 if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) {
2324 dump_reg(base_addr, R_MAC_CONFIG_1, STRINGIFY(R_MAC_CONFIG_1));
2325 dump_reg(base_addr, R_MAC_CONFIG_2, STRINGIFY(R_MAC_CONFIG_2));
2326 dump_reg(base_addr, R_IPG_IFG, STRINGIFY(R_IPG_IFG));
2327 dump_reg(base_addr, R_HALF_DUPLEX, STRINGIFY(R_HALF_DUPLEX));
2328 dump_reg(base_addr, R_MAXIMUM_FRAME_LENGTH,
2329 STRINGIFY(R_MAXIMUM_FRAME_LENGTH));
2330 dump_reg(base_addr, R_TEST, STRINGIFY(R_TEST));
2331 dump_reg(base_addr, R_MII_MGMT_CONFIG,
2332 STRINGIFY(R_MII_MGMT_CONFIG));
2333 dump_reg(base_addr, R_MII_MGMT_COMMAND,
2334 STRINGIFY(R_MII_MGMT_COMMAND));
2335 dump_reg(base_addr, R_MII_MGMT_ADDRESS,
2336 STRINGIFY(R_MII_MGMT_ADDRESS));
2337 dump_reg(base_addr, R_MII_MGMT_WRITE_DATA,
2338 STRINGIFY(R_MII_MGMT_WRITE_DATA));
2339 dump_reg(base_addr, R_MII_MGMT_STATUS,
2340 STRINGIFY(R_MII_MGMT_STATUS));
2341 dump_reg(base_addr, R_MII_MGMT_INDICATORS,
2342 STRINGIFY(R_MII_MGMT_INDICATORS));
2343 dump_reg(base_addr, R_INTERFACE_CONTROL,
2344 STRINGIFY(R_INTERFACE_CONTROL));
2345 dump_reg(base_addr, R_INTERFACE_STATUS,
2346 STRINGIFY(R_INTERFACE_STATUS));
2347 } else if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII) {
2348 dump_reg(base_addr, R_XGMAC_CONFIG_0,
2349 STRINGIFY(R_XGMAC_CONFIG_0));
2350 dump_reg(base_addr, R_XGMAC_CONFIG_1,
2351 STRINGIFY(R_XGMAC_CONFIG_1));
2352 dump_reg(base_addr, R_XGMAC_CONFIG_2,
2353 STRINGIFY(R_XGMAC_CONFIG_2));
2354 dump_reg(base_addr, R_XGMAC_CONFIG_3,
2355 STRINGIFY(R_XGMAC_CONFIG_3));
2356 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_LS,
2357 STRINGIFY(R_XGMAC_STATION_ADDRESS_LS));
2358 dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_MS,
2359 STRINGIFY(R_XGMAC_STATION_ADDRESS_MS));
2360 dump_reg(base_addr, R_XGMAC_MAX_FRAME_LEN,
2361 STRINGIFY(R_XGMAC_MAX_FRAME_LEN));
2362 dump_reg(base_addr, R_XGMAC_REV_LEVEL,
2363 STRINGIFY(R_XGMAC_REV_LEVEL));
2364 dump_reg(base_addr, R_XGMAC_MIIM_COMMAND,
2365 STRINGIFY(R_XGMAC_MIIM_COMMAND));
2366 dump_reg(base_addr, R_XGMAC_MIIM_FILED,
2367 STRINGIFY(R_XGMAC_MIIM_FILED));
2368 dump_reg(base_addr, R_XGMAC_MIIM_CONFIG,
2369 STRINGIFY(R_XGMAC_MIIM_CONFIG));
2370 dump_reg(base_addr, R_XGMAC_MIIM_LINK_FAIL_VECTOR,
2371 STRINGIFY(R_XGMAC_MIIM_LINK_FAIL_VECTOR));
2372 dump_reg(base_addr, R_XGMAC_MIIM_INDICATOR,
2373 STRINGIFY(R_XGMAC_MIIM_INDICATOR));
2376 dump_reg(base_addr, R_MAC_ADDR0, STRINGIFY(R_MAC_ADDR0));
2377 dump_reg(base_addr, R_MAC_ADDR0 + 1, STRINGIFY(R_MAC_ADDR0+1));
2378 dump_reg(base_addr, R_MAC_ADDR1, STRINGIFY(R_MAC_ADDR1));
2379 dump_reg(base_addr, R_MAC_ADDR2, STRINGIFY(R_MAC_ADDR2));
2380 dump_reg(base_addr, R_MAC_ADDR3, STRINGIFY(R_MAC_ADDR3));
2381 dump_reg(base_addr, R_MAC_ADDR_MASK2, STRINGIFY(R_MAC_ADDR_MASK2));
2382 dump_reg(base_addr, R_MAC_ADDR_MASK3, STRINGIFY(R_MAC_ADDR_MASK3));
2383 dump_reg(base_addr, R_MAC_FILTER_CONFIG, STRINGIFY(R_MAC_FILTER_CONFIG));
2384 dump_reg(base_addr, R_TX_CONTROL, STRINGIFY(R_TX_CONTROL));
2385 dump_reg(base_addr, R_RX_CONTROL, STRINGIFY(R_RX_CONTROL));
2386 dump_reg(base_addr, R_DESC_PACK_CTRL, STRINGIFY(R_DESC_PACK_CTRL));
2387 dump_reg(base_addr, R_STATCTRL, STRINGIFY(R_STATCTRL));
2388 dump_reg(base_addr, R_L2ALLOCCTRL, STRINGIFY(R_L2ALLOCCTRL));
2389 dump_reg(base_addr, R_INTMASK, STRINGIFY(R_INTMASK));
2390 dump_reg(base_addr, R_INTREG, STRINGIFY(R_INTREG));
2391 dump_reg(base_addr, R_TXRETRY, STRINGIFY(R_TXRETRY));
2392 dump_reg(base_addr, R_CORECONTROL, STRINGIFY(R_CORECONTROL));
2393 dump_reg(base_addr, R_BYTEOFFSET0, STRINGIFY(R_BYTEOFFSET0));
2394 dump_reg(base_addr, R_BYTEOFFSET1, STRINGIFY(R_BYTEOFFSET1));
2395 dump_reg(base_addr, R_L2TYPE_0, STRINGIFY(R_L2TYPE_0));
2396 dump_na_registers(base_addr, port_id);
2400 dump_fmn_cpu_credits_for_gmac(struct xlr_board_info *board, int gmac_id)
2403 int gmac_bucket_ids[] = { 97, 98, 99, 100, 101, 103 };
2407 n_gmac_buckets = sizeof (gmac_bucket_ids) / sizeof (gmac_bucket_ids[0]);
2408 for (j = 0; j < 8; j++) { // for each cpu
2409 cc = board->credit_configs[j];
2410 printf("Credits for Station CPU_%d ---> GMAC buckets (tx path)\n", j);
2411 for (k = 0; k < n_gmac_buckets; k++) {
2412 r = gmac_bucket_ids[k] / 8;
2413 c = gmac_bucket_ids[k] % 8;
2414 printf (" --> gmac%d_bucket_%-3d: credits=%d\n", gmac_id,
2415 gmac_bucket_ids[k], cc->counters[r][c]);
2421 dump_fmn_gmac_credits(struct xlr_board_info *board, int gmac_id)
2426 cc = board->gmac_block[gmac_id].credit_config;
2427 printf("Credits for Station: GMAC_%d ---> CPU buckets (rx path)\n", gmac_id);
2428 for (j = 0; j < 8; j++) { // for each cpu
2429 printf(" ---> cpu_%d\n", j);
2430 for (k = 0; k < 8; k++) { // for each bucket in cpu
2431 printf(" ---> bucket_%d: credits=%d\n", j * 8 + k,
2432 cc->counters[j][k]);
2438 dump_board_info(struct xlr_board_info *board)
2440 struct xlr_gmac_block_t *gm;
2443 printf("cpu=%x ", xlr_revision());
2444 printf("board_version: major=%llx, minor=%llx\n",
2445 xlr_boot1_info.board_major_version,
2446 xlr_boot1_info.board_minor_version);
2447 printf("is_xls=%d, nr_cpus=%d, usb=%s, cfi=%s, ata=%s\npci_irq=%d,"
2448 "gmac_ports=%d\n", board->is_xls, board->nr_cpus,
2449 board->usb ? "Yes" : "No", board->cfi ? "Yes": "No",
2450 board->ata ? "Yes" : "No", board->pci_irq, board->gmacports);
2451 printf("FMN: Core-station bucket sizes\n");
2452 for (i = 0; i < 128; i++) {
2453 if (i && ((i % 16) == 0))
2455 printf ("b[%d] = %d ", i, board->bucket_sizes->bucket[i]);
2458 for (i = 0; i < 3; i++) {
2459 gm = &board->gmac_block[i];
2460 printf("RNA_%d: type=%d, enabled=%s, mode=%d, station_id=%d,"
2461 "station_txbase=%d, station_rfr=%d ", i, gm->type,
2462 gm->enabled ? "Yes" : "No", gm->mode, gm->station_id,
2463 gm->station_txbase, gm->station_rfr);
2464 printf("n_ports=%d, baseaddr=%p, baseirq=%d, baseinst=%d\n",
2465 gm->num_ports, (xlr_reg_t *)gm->baseaddr, gm->baseirq,
2468 for (k = 0; k < 3; k++) { // for each NA
2469 dump_fmn_cpu_credits_for_gmac(board, k);
2470 dump_fmn_gmac_credits(board, k);
2475 dump_mac_stats(struct nlge_softc *sc)
2478 uint32_t pkts_tx, pkts_rx;
2481 pkts_rx = NLGE_READ(sc->base, R_RPKT);
2482 pkts_tx = NLGE_READ(sc->base, R_TPKT);
2484 printf("[nlge_%d mac stats]: pkts_tx=%u, pkts_rx=%u\n", sc->id, pkts_tx,
2489 /* dump all rx counters. we need this because pkts_rx includes
2491 for (r = R_RFCS; r <= R_ROVR; r++)
2492 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2493 NLGE_READ(sc->base, r));
2498 /* dump all tx counters. might be useful for debugging. */
2499 for (r = R_TMCA; r <= R_TFRG; r++) {
2500 if ((r == (R_TNCL + 1)) || (r == (R_TNCL + 2)))
2502 printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r,
2503 NLGE_READ(sc->base, r));
2510 dump_mii_regs(struct nlge_softc *sc)
2512 uint32_t mii_regs[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
2513 0x8, 0x9, 0xa, 0xf, 0x10, 0x11, 0x12, 0x13,
2514 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
2518 if (sc->mii_base == NULL || sc->mii_bus == NULL)
2521 n_regs = sizeof (mii_regs) / sizeof (mii_regs[0]);
2522 for (i = 0; i < n_regs; i++) {
2523 printf("[mii_0x%x] = %x\n", mii_regs[i],
2524 nlge_mii_read_internal(sc->mii_base, sc->phy_addr,
2530 dump_ifmedia(struct ifmedia *ifm)
2532 printf("ifm_mask=%08x, ifm_media=%08x, cur=%p\n", ifm->ifm_mask,
2533 ifm->ifm_media, ifm->ifm_cur);
2534 if (ifm->ifm_cur != NULL) {
2535 printf("Cur attribs: ifmedia_entry.ifm_media=%08x,"
2536 " ifmedia_entry.ifm_data=%08x\n", ifm->ifm_cur->ifm_media,
2537 ifm->ifm_cur->ifm_data);
2542 dump_mii_data(struct mii_data *mii)
2544 dump_ifmedia(&mii->mii_media);
2545 printf("ifp=%p, mii_instance=%d, mii_media_status=%08x,"
2546 " mii_media_active=%08x\n", mii->mii_ifp, mii->mii_instance,
2547 mii->mii_media_status, mii->mii_media_active);
2551 dump_pcs_regs(struct nlge_softc *sc, int phy)
2555 printf("PCS regs from %p for phy=%d\n", sc->pcs_addr, phy);
2556 for (i = 0; i < 18; i++) {
2557 if (i == 2 || i == 3 || (i >= 9 && i <= 14))
2559 val = nlge_mii_read_internal(sc->pcs_addr, phy, i);
2560 printf("PHY:%d pcs[0x%x] is 0x%x\n", phy, i, val);