1 /***********************license start***************
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
37 ***********************license end**************************************/
40 * octeon_rgmx.c RGMII Ethernet Interfaces on Octeon
46 * Driver for the Reduced Gigabit Media Independent Interface (RGMII)
47 * present on the Cavium Networks' Octeon chip.
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/sysctl.h>
57 #include <sys/kernel.h>
58 #include <sys/module.h>
60 #include <sys/power.h>
63 #include <sys/timetc.h>
64 #include <sys/malloc.h>
65 #include <sys/kthread.h>
66 #include <sys/socket.h>
67 #include <sys/sockio.h>
69 #include <sys/taskqueue.h>
73 #include <net/ethernet.h>
75 #include <net/if_dl.h>
76 #include <net/if_mib.h>
77 #include <net/if_media.h>
78 #include <net/if_types.h>
80 #include <netinet/in.h>
81 #include <netinet/if_ether.h>
85 #include <machine/clock.h>
86 #include <machine/locore.h>
87 #include <machine/md_var.h>
89 #include <mips/cavium/octeon_pcmap_regs.h>
91 #include "octeon_fau.h"
92 #include "octeon_fpa.h"
93 #include "octeon_ipd.h"
94 #include "octeon_pko.h"
95 #include "octeon_pip.h"
96 #include "octeon_rgmx.h"
99 /* The "battleship" boards have 8 ports */
100 #define OCTEON_RGMX_NUM_PORTS_MAX 8
101 #define NUM_TX_PACKETS 80
102 #define NUM_RX_PACKETS 300
103 #define MAX_RX_BUFS (NUM_RX_PACKETS) * (OCTEON_RGMX_NUM_PORTS_MAX)
104 #define MAX_TX_BUFS (NUM_TX_PACKETS)
105 #define OCTEON_RGMX_DEV_NAME "rgmx"
106 #define OCTEON_RGMX_MIN_PORT 0
107 #define OCTEON_RGMX_MAX_PORT 19
108 #define OCTEON_RGMX_OQUEUE_PER_PORT 8
111 #define OCTEON_RGMX_SCHEDULED_ISRS 1 /* Use Scheduled ISRs from kernel tasks */
115 #define POW_MAX_LOOP 0x800
120 * CIU related stuff for enabling POW interrupts
122 #define OCTEON_RGMX_CIU_INTX CIU_INT_0
123 #define OCTEON_RGMX_CIU_ENX CIU_EN_0
125 MALLOC_DEFINE(M_RGMII_WQE, "rgmii_wqe", "FPA pool for WQEs");
129 struct rgmx_softc_dev {
130 device_t sc_dev; /* Device ID */
131 uint64_t link_status;
139 u_short txb_size; /* size of TX buffer, in bytes */
141 /* Transmission buffer management. */
142 u_short txb_free; /* free bytes in TX buffer */
143 u_char txb_count; /* number of packets in TX buffer */
144 u_char txb_sched; /* number of scheduled packets */
146 /* Media information. */
147 struct ifmedia media; /* used by if_media. */
148 u_short mbitmap; /* bitmap for supported media; see bit2media */
149 int defmedia; /* default media */
150 struct ifqueue tx_pending_queue; /* Queue of mbuf given to PKO currently */
151 octeon_pko_sw_queue_info_t *outq_ptr;
160 static int rgmii_probe(device_t);
161 static void rgmii_identify(driver_t *, device_t);
162 static int rgmii_attach(device_t);
167 * Octeon specific routines
169 static int octeon_has_4ports(void);
170 static void octeon_config_rgmii_port(u_int port);
171 static void octeon_rgmx_config_pip(u_int port);
172 static void octeon_line_status_loop(void *);
173 static void octeon_rx_loop(void *);
174 static void octeon_config_hw_units_post_ports(void);
175 static void octeon_config_hw_units_pre_ports(void);
176 static void octeon_config_hw_units_port(struct rgmx_softc_dev *sc, u_int port);
177 static struct rgmx_softc_dev *get_rgmx_softc(u_int port);
178 static void octeon_rgmx_start_port(u_int port);
179 static u_int octeon_rgmx_stop_port(u_int port);
180 static u_int get_rgmx_port_ordinal(u_int port);
181 static void octeon_rgmx_set_mac(u_int port);
182 static void octeon_rgmx_init_sc(struct rgmx_softc_dev *sc, device_t dev, u_int port, u_int num_devices);
183 static int octeon_rgmx_init_ifnet(struct rgmx_softc_dev *sc);
184 static void octeon_rgmx_stop(struct rgmx_softc_dev *sc);
185 static void octeon_rgmx_config_speed(u_int port, u_int);
186 #ifdef DEBUG_RGMX_DUMP
187 static void octeon_dump_rgmx_stats(u_int port);
188 static void octeon_dump_pow_stats(void);
191 static void rgmx_timer_periodic(void);
193 static void octeon_rgmx_enable_RED_all(int, int);
195 #ifdef OCTEON_RGMX_SCHEDULED_ISRS
196 static void octeon_rgmx_isr_link(void *context, int pending);
197 static void octeon_rgmx_isr_rxtx(void *context, int pending);
198 static int octeon_rgmx_intr_fast(void *arg);
200 static int octeon_rgmx_intr(void *arg);
209 /* Standard driver entry points. These can be static. */
210 static void octeon_rgmx_init (void *);
211 //static driver_intr_t rgmx_intr;
212 static void octeon_rgmx_config_cam (struct ifnet *);
213 static int octeon_rgmx_ioctl (struct ifnet *, u_long, caddr_t);
214 static void octeon_rgmx_output_start (struct ifnet *);
215 static void octeon_rgmx_output_start_locked (struct ifnet *);
216 static int octeon_rgmx_medchange (struct ifnet *);
217 static void octeon_rgmx_medstat (struct ifnet *, struct ifmediareq *);
220 /* Mapping between media bitmap (in fe_softc.mbitmap) and ifm_media. */
221 static int const bit2media [] = {
222 IFM_ETHER | IFM_AUTO,
223 IFM_ETHER | IFM_MANUAL,
224 IFM_ETHER | IFM_10_T,
225 IFM_ETHER | IFM_10_2,
226 IFM_ETHER | IFM_10_5,
227 IFM_ETHER | IFM_10_FL,
228 IFM_ETHER | IFM_10_T,
229 /* More can be added here... */
232 /* Mapping between media bitmap (in fe_softc.mbitmap) and ifm_media. */
241 #define LEBLEN (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
244 static struct rgmx_softc_dev *rgmx_scdev_array[OCTEON_RGMX_NUM_PORTS_MAX] = {NULL};
245 static u_int port_array[OCTEON_RGMX_NUM_PORTS_MAX] = {0};
246 static u_int num_devices = 0;
247 static octeon_pko_sw_queue_info_t output_queues_array[OCTEON_RGMX_NUM_PORTS_MAX * OCTEON_RGMX_OQUEUE_PER_PORT];
248 static struct resource *irq_res; /* Interrupt resource. */
249 static void *int_handler_tag;
252 #ifdef OCTEON_RGMX_SCHEDULED_ISRS
254 struct task link_isr_task;
255 struct task rxtx_isr_task;
256 struct taskqueue *tq; /* private task queue */
262 static u_int get_rgmx_port_ordinal (u_int port)
266 for (idx = 0; idx < OCTEON_RGMX_NUM_PORTS_MAX; idx++) {
267 if (port_array[idx] == port) {
274 static struct rgmx_softc_dev *get_rgmx_softc (u_int port)
278 idx = get_rgmx_port_ordinal(port);
280 return (rgmx_scdev_array[idx]);
287 static void octeon_rgmx_init_sc (struct rgmx_softc_dev *sc, device_t dev, u_int port, u_int num_devices)
291 /* No software-controllable media selection. */
293 sc->defmedia = MB_HM;
297 sc->idx = num_devices;
299 sc->sc_unit = num_devices;
301 sc->defmedia = MB_HT;
302 sc->tx_pending_queue.ifq_maxlen = NUM_TX_PACKETS;
303 sc->tx_pending_queue.ifq_head = sc->tx_pending_queue.ifq_tail = NULL;
304 sc->tx_pending_queue.ifq_len = sc->tx_pending_queue.ifq_drops = 0;
305 mtx_init(&sc->tx_pending_queue.ifq_mtx, "if->sc->txpq.ifqmtx", NULL, MTX_DEF);
307 sc->outq_ptr = &(output_queues_array[num_devices * OCTEON_RGMX_OQUEUE_PER_PORT]);
309 for (ii = 0; ii < 6; ii++) {
310 sc->ieee[ii] = octeon_mac_addr[ii];
312 sc->ieee[5] += get_rgmx_port_ordinal(port);
316 static int octeon_rgmx_init_ifnet (struct rgmx_softc_dev *sc)
320 ifp = sc->ifp = if_alloc(IFT_ETHER);
322 device_printf(sc->sc_dev, "can not ifalloc for rgmx port\n");
326 * Initialize ifnet structure
329 if_initname(sc->ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev));
330 ifp->if_start = octeon_rgmx_output_start;
331 ifp->if_ioctl = octeon_rgmx_ioctl;
332 ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
333 ifp->if_capabilities = IFCAP_HWCSUM;
334 ifp->if_capenable = ifp->if_capabilities;
335 ifp->if_init = octeon_rgmx_init;
336 ifp->if_linkmib = NULL; // &sc->mibdata;
337 ifp->if_linkmiblen = 0; // sizeof (sc->mibdata);
339 * Set fixed interface flags.
341 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
343 if (ifp->if_snd.ifq_maxlen == 0)
344 ifp->if_snd.ifq_maxlen = ifqmaxlen;
346 ifmedia_init(&sc->media, 0, octeon_rgmx_medchange, octeon_rgmx_medstat);
347 ifmedia_add(&sc->media, bit2media[0], 0, NULL);
348 ifmedia_set(&sc->media, bit2media[0]);
350 ether_ifattach(sc->ifp, sc->ieee);
360 /* ------------------------------------------------------------------- *
362 * ------------------------------------------------------------------- */
363 static void rgmii_identify (driver_t *drv, device_t parent)
365 BUS_ADD_CHILD(parent, 0, "rgmii", 0);
369 /* ------------------------------------------------------------------- *
371 * ------------------------------------------------------------------- */
372 static int rgmii_probe (device_t dev)
374 if (device_get_unit(dev) != 0)
375 panic("can't probe/attach more rgmii devices\n");
377 device_set_desc(dev, "Octeon RGMII");
383 /* ------------------------------------------------------------------- *
385 * ------------------------------------------------------------------- */
386 static int rgmii_attach (device_t dev)
388 struct rgmx_softc_dev *sc;
390 int iface, port, nr_ports, error;
394 octeon_config_hw_units_pre_ports();
396 /* Count interfaces and ports*/
397 octeon_gmxx_inf_mode_t iface_mode;
398 iface_mode.word64 = 0;
400 for (iface = 0; iface < 2; iface++) {
401 iface_mode.word64 = oct_read64(OCTEON_RGMX_INF_MODE(iface));
403 /* interface is either disabled or SPI */
404 if (!iface_mode.bits.en)
406 if (octeon_get_chipid() == OCTEON_CN3020_CHIP) {
409 nr_ports = (octeon_has_4ports()) ? 4 : 3;
410 if (iface_mode.bits.type ) {
411 if (octeon_get_chipid() == OCTEON_CN5020_CHIP)
418 oct_write64(OCTEON_RGMX_TX_PRTS(iface), nr_ports);
420 for (port = iface * 16; port < iface * 16 + nr_ports; port++) {
422 child = device_add_child(dev, OCTEON_RGMX_DEV_NAME, num_devices);
424 panic("%s: device_add_child() failed\n", __func__);
426 softc = malloc(sizeof(struct rgmx_softc_dev), M_DEVBUF, M_NOWAIT | M_ZERO);
428 panic("%s malloc failed for softc\n", __func__);
430 device_set_softc(child, softc);
431 device_set_desc(child, "Octeon RGMII");
432 sc = device_get_softc(child);
438 port_array[num_devices] = port;
439 rgmx_scdev_array[num_devices] = sc;
440 RGMX_LOCK_INIT(sc, device_get_nameunit(child));
441 octeon_rgmx_init_sc(sc, child, port, num_devices);
442 octeon_config_hw_units_port(sc, port);
443 if (octeon_rgmx_init_ifnet(sc)) {
444 device_printf(dev, " ifinit failed for rgmx port %u\n", port);
451 octeon_config_hw_units_post_ports();
454 irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &irq_rid, 0, 0, 1, RF_SHAREABLE | RF_ACTIVE);
455 if (irq_res == NULL) {
456 device_printf(dev, "failed to allocate irq\n");
461 #ifdef OCTEON_RGMX_SCHEDULED_ISRS
463 * Single task queues for all child devices. Since POW gives us a unified
464 * interrupt based on POW groups, not based on PORTs.
466 TASK_INIT(&rxtx_isr_task, 0, octeon_rgmx_isr_rxtx, NULL);
467 TASK_INIT(&link_isr_task, 0, octeon_rgmx_isr_link, NULL);
468 tq = taskqueue_create_fast("octeon_rgmx_taskq", M_NOWAIT,
469 taskqueue_thread_enqueue, &tq);
470 taskqueue_start_threads(&tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev));
472 error = bus_setup_intr(dev, irq_res, INTR_TYPE_NET, octeon_rgmx_intr_fast, NULL,
473 NULL, &int_handler_tag);
475 device_printf(dev, "bus_setup_intr returned %d\n", error);
481 #else /* OCTEON_RGMX_SCHEDULED_ISRS */
483 error = bus_setup_intr(dev, irq_res, INTR_TYPE_NET, octeon_rgmx_intr, NULL,
484 NULL, &int_handler_tag);
487 device_printf(dev, "bus_setup_intr returned %d\n", error);
492 #endif /* OCTEON_RGMX_SCHEDULED_ISRS */
494 return (bus_generic_attach(dev));
500 #define OCTEON_MAX_RGMX_PORT_NUMS 32
504 #define OCTEON_POW_RX_GROUP_NUM 0
505 #define OCTEON_POW_TX_GROUP_NUM 1 /* If using TX WQE from PKO */
507 #define OCTEON_POW_RX_GROUP_MASK (1 << OCTEON_POW_RX_GROUP_NUM)
508 #define OCTEON_POW_TX_GROUP_MASK (1 << OCTEON_POW_TX_GROUP_NUM)
509 #define OCTEON_POW_ALL_OUR_GROUPS_MASK (OCTEON_POW_RX_GROUP_MASK | OCTEON_POW_RX_GROUP_MASK)
510 #define OCTEON_POW_ALL_GROUPS_MASK 0xffff
511 #define OCTEON_POW_WORKQUEUE_INT (0x8001670000000200ull)
512 #define OCTEON_POW_WORKQUEUE_INT_PC (0x8001670000000208ull)
513 #define OCTEON_POW_WORKQUEUE_INT_THRESHOLD(group_num) ((0x8001670000000080ull+((group_num)*0x8)))
514 #define OCTEON_RGMX_POW_NOS_CNT (0x8001670000000228ull)
515 #define OCTEON_POW_INT_CNTR(core) (0x8001670000000100ull+((core)*0x8))
516 #define OCTEON_POW_INPT_Q_ALL_QOS (0x8001670000000388ull)
517 #define OCTEON_POW_INPT_QOS_GRP(grp) (0x8001670000000340ull + ((grp) * 0x8))
522 #define NUM_RX_PACKETS_CTL (MAX_RX_BUFS + 3000)
523 #define NUM_TX_PACKETS_CTL 40
527 #define OCTEON_FPA_RX_PACKET_POOL 0
528 #define OCTEON_FPA_RX_PACKET_POOL_WORDS 208 /* 2048 bytes */
529 #define OCTEON_FPA_RX_PACKET_POOL_ELEM_SIZE (OCTEON_FPA_RX_PACKET_POOL_WORDS)
530 #define OCTEON_FPA_RX_PACKET_POOL_ELEMENTS (MAX_RX_BUFS)
531 #define OCTEON_RX_MAX_SIZE (OCTEON_FPA_RX_PACKET_POOL_WORDS * sizeof(uint64_t))
533 #define OCTEON_FPA_WQE_RX_POOL 1
534 #define OCTEON_FPA_WQE_RX_WORDS (OCTEON_CACHE_LINE_SIZE/8)
535 #define OCTEON_FPA_WQE_RX_POOL_ELEM_SIZE (OCTEON_FPA_WQE_RX_WORDS)
536 #define OCTEON_FPA_WQE_RX_POOL_ELEMENTS (NUM_RX_PACKETS_CTL)
538 #define OCTEON_FPA_TX_PACKET_POOL 2
539 #define OCTEON_FPA_TX_PACKET_POOL_WORDS 208 /* 2048 bytes */
540 #define OCTEON_FPA_TX_PACKET_POOL_ELEM_SIZE (OCTEON_FPA_TX_PACKET_POOL_WORDS)
541 #define OCTEON_FPA_TX_PACKET_POOL_ELEMENTS (MAX_TX_BUFS)
542 #define OCTEON_TX_MAX_SIZE (OCTEON_FPA_TX_PACKET_POOL_WORDS * sizeof(uint64_t))
544 #define OCTEON_FPA_TX_CMDBUF_POOL 3
545 #define OCTEON_FPA_TX_CMD_SIZE 2
546 #define OCTEON_FPA_TX_CMD_NUM 300
547 #define OCTEON_FPA_TX_CMDBUF_POOL_WORDS (OCTEON_FPA_TX_CMD_SIZE * OCTEON_FPA_TX_CMD_NUM)
548 #define OCTEON_FPA_TX_CMDBUF_POOL_ELEM_SIZE (OCTEON_FPA_TX_CMDBUF_POOL_WORDS +1)
549 #define OCTEON_FPA_TX_CMDBUF_POOL_ELEMENTS (30 * OCTEON_RGMX_NUM_PORTS_MAX)
551 #define FIRST_PARTICLE_SKIP 0
552 #define NOT_FIRST_PARTICLE_SKIP 0
554 #define ENABLE_BACK_PRESSURE 0
555 #define RGMX_MAX_PAK_RECEIVE 5000000
558 #ifdef OCTEON_RGMX_SCHEDULED_ISRS
561 static void octeon_rgmx_isr_link (void *context, int pending)
563 octeon_line_status_loop(NULL);
567 static void octeon_rgmx_isr_rxtx (void *context, int pending)
569 octeon_rx_loop(NULL);
573 /*********************************************************************
575 * Fast Interrupt Service routine
577 *********************************************************************/
579 //#define OCTEON_RGMX_POW_TIME_THR_INTS 1
582 static int octeon_rgmx_intr_fast(void *arg)
585 int handled_flag = 0;
586 uint64_t ciu_summary;
588 ciu_summary = ciu_get_int_summary(CIU_THIS_CORE, OCTEON_RGMX_CIU_INTX,
589 OCTEON_RGMX_CIU_ENX);
591 if (ciu_summary & CIU_GENTIMER_BITS_ENABLE(CIU_GENTIMER_NUM_1)) {
594 * Timer Interrupt for link status checks
595 * Acknowledging it will mask it for this cycle.
597 ciu_clear_int_summary(CIU_THIS_CORE, OCTEON_RGMX_CIU_INTX,
599 CIU_GENTIMER_BITS_ENABLE(CIU_GENTIMER_NUM_1));
601 taskqueue_enqueue(taskqueue_fast, &link_isr_task);
605 if (ciu_summary & OCTEON_POW_ALL_GROUPS_MASK) {
606 #ifndef OCTEON_RGMX_POW_TIME_THR_INTS
608 * When using POW IQ/DSQ size based interrupts, then
609 * ack the interrupts right away. So they don't interrupt
610 * until the queue size goes to 0 again.
612 oct_write64(OCTEON_POW_WORKQUEUE_INT,
613 0x10001 << OCTEON_POW_RX_GROUP_NUM);
617 * We use POW thresholds based interrupt signalled on timer
618 * countdown. Acknowledge it now so that it doesn't
619 * interrupt us until next countdown to zero.
621 oct_write64(OCTEON_POW_WORKQUEUE_INT,
622 0x1 << OCTEON_POW_RX_GROUP_NUM);
625 taskqueue_enqueue(tq, &rxtx_isr_task);
629 return ((handled_flag) ? FILTER_HANDLED : FILTER_STRAY);
633 #else /* ! OCTEON_RGMX_SCHEDULED_ISRS */
639 * This is direct inline isr. Will do all its work and heavy-lifting in interrupt context.
641 * Also note that the RGMX_LOCK/UNLOCK code will have to checked/added, since that is new and
642 * was not supported with this model.
644 static int octeon_rgmx_intr (void *arg)
647 uint64_t ciu_summary;
650 * read ciu to see if any bits are pow
653 ciu_summary = ciu_get_int_summary(CIU_THIS_CORE, OCTEON_RGMX_CIU_INTX,
654 OCTEON_RGMX_CIU_ENX);
656 if ((ciu_summary & (OCTEON_POW_ALL_GROUPS_MASK | CIU_GENTIMER_BITS_ENABLE(CIU_GENTIMER_NUM_1))) == 0) {
662 if (ciu_summary & OCTEON_POW_ALL_GROUPS_MASK) {
663 octeon_rx_loop(NULL);
665 * Acknowledge the interrupt after processing queues.
667 oct_write64(OCTEON_POW_WORKQUEUE_INT, OCTEON_POW_RX_GROUP_MASK);
669 if (ciu_summary & CIU_GENTIMER_BITS_ENABLE(CIU_GENTIMER_NUM_1)) {
670 octeon_line_status_loop(NULL);
671 ciu_clear_int_summary(CIU_THIS_CORE, OCTEON_RGMX_CIU_INTX,
673 CIU_GENTIMER_BITS_ENABLE(CIU_GENTIMER_NUM_1));
677 return ((flag) ? FILTER_HANDLED : FILTER_STRAY);
681 #endif /* OCTEON_RGMX_SCHEDULED_ISRS */
685 static struct mbuf *octeon_rgmx_build_new_rx_mbuf(struct ifnet *ifp, void *data_start, u_int totlen);
687 static struct mbuf *octeon_rgmx_build_new_rx_mbuf (struct ifnet *ifp, void *data_start, u_int totlen)
689 struct mbuf *m, *m0, *newm;
693 if (totlen <= ETHER_HDR_LEN || totlen > LEBLEN - ETHER_CRC_LEN) {
695 if_printf(ifp, "invalid packet size %d; dropping\n", totlen);
700 MGETHDR(m0, M_DONTWAIT, MT_DATA);
705 /* Initialize packet header info. */
706 m0->m_pkthdr.rcvif = ifp;
707 m0->m_pkthdr.len = totlen;
708 m0->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
709 m0->m_pkthdr.csum_data = 0xffff;
714 if (totlen >= MINCLSIZE) {
715 MCLGET(m, M_DONTWAIT);
716 if ((m->m_flags & M_EXT) == 0)
717 goto octeon_rgmx_build_new_rx_mbuf_bad;
722 newdata = (caddr_t)ALIGN(m->m_data + ETHER_HDR_LEN) - ETHER_HDR_LEN;
723 len -= newdata - m->m_data;
727 /* Set the length of this mbuf. */
728 m->m_len = len = min(totlen, len);
729 bcopy(data_start, mtod(m, caddr_t), len);
730 data_start = (void *) (((u_long) (data_start)) + len);
734 MGET(newm, M_DONTWAIT, MT_DATA);
736 goto octeon_rgmx_build_new_rx_mbuf_bad;
738 m = m->m_next = newm;
744 octeon_rgmx_build_new_rx_mbuf_bad:
754 static void octeon_rgmx_rx_process_work (octeon_wqe_t *work, u_int port)
756 struct rgmx_softc_dev *sc;
759 void *data_start, *new_data_start;
762 //#define DEBUG_RX_PKT_DUMP 1
763 #ifdef DEBUG_RX_PKT_DUMP
767 data_start = octeon_pow_pktptr_to_kbuffer(work->packet_ptr);
771 printf(" WQE 0x%X: port:%u ", work, port);
772 printf(" Grp: %u, %llX Tag: %u %llX type: %u 0x%llx\n",
773 work->grp, work->grp, work->tag, work->tag, work->tag_type, work->tag_type);
776 if ((port >= OCTEON_RGMX_MIN_PORT) || (port <= OCTEON_RGMX_MAX_PORT)) {
778 sc = get_rgmx_softc(port);
780 if (!sc || !sc->ifp) {
782 printf(" octeon_rgmx_rx_process_work No sc or sc->ifp - port:%u", port);
787 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
789 if (!work->word2.bits.rcv_error) {
794 * We cannot pass the same FPA phys-buffer higher up.
795 * User space will not be able to use this phys-buffer.
797 * Start building a mbuf packet here using data_start & len.
800 new_data_start = data_start;
801 if (!work->word2.bits.not_IP) {
802 new_data_start = (void *) (((unsigned long) (new_data_start)) + 14);
803 /* mark it as checksum checked */
805 new_data_start = (void *) (((unsigned long) (new_data_start)) + 8);
808 #ifdef DEBUG_RX_PKT_DUMP
809 dc = new_data_start; printf("In:\n");
810 for (i = 0; i < len; i++) { if (!(i % 16)) printf ("\n"); printf(" %02X", dc[i]); }
813 mbuf = octeon_rgmx_build_new_rx_mbuf(ifp, new_data_start, len);
815 // printf(" Passing pkt to ifp: pkt_len: %u len: %u ", mbuf->m_pkthdr.len, mbuf->m_len);
816 #ifdef DEBUG_RX_PKT_DUMP
818 dc = mtod(mbuf, u_char *); printf("\n"); printf("In: ");
819 for (i = 0; i < mbuf->m_len; i++) { if (!(i % 16)) printf ("\n"); printf(" %02X", dc[i]); }
823 /* Feed the packet to upper layer. */
824 (*ifp->if_input)(ifp, mbuf);
827 } else { /* mbuf error */
828 if_printf(ifp, "mbuf rx construct error\n");
829 printf(" mbuf rx construct error\n");
833 } else { /* rcv_error */
837 } /* IFF_DRV_RUNNING */
839 } /* sc && sc->ifp */
841 } else { /* port number */
842 printf(" rgmx_rx:%u bad port\n", port);
845 octeon_fpa_free(data_start, OCTEON_FPA_RX_PACKET_POOL, 0);
846 octeon_fpa_free((void *)work, OCTEON_FPA_WQE_RX_POOL, 0);
852 /* ------------------------------------------------------------------- *
854 * ------------------------------------------------------------------- */
857 //#define OCTEON_VISUAL_RGMX 1
858 #ifdef OCTEON_VISUAL_RGMX
859 static int where0 = 0;
860 static int where1 = 0;
863 static void octeon_rx_loop (void *unused)
866 uint64_t prev_grp_mask;
870 core_id = octeon_get_core_num();
873 /* Only allow work for our group */
874 prev_grp_mask = oct_read64(OCTEON_POW_CORE_GROUP_MASK(core_id));
875 oct_write64(OCTEON_POW_CORE_GROUP_MASK(core_id), OCTEON_POW_ALL_GROUPS_MASK);
878 #ifdef OCTEON_VISUAL_RGMX
879 octeon_led_run_wheel(&where0, 3);
883 if (pak_count++ > RGMX_MAX_PAK_RECEIVE) {
887 work = octeon_pow_work_request_sync(OCTEON_POW_WAIT);
891 * No more incoming packets. We can take a break now.
896 #ifdef OCTEON_VISUAL_RGMX
897 octeon_led_run_wheel(&where1, 4);
899 octeon_rgmx_rx_process_work(work, work->ipprt);
903 oct_write64(OCTEON_POW_CORE_GROUP_MASK(core_id), prev_grp_mask);
907 static void *octeon_rgmx_write_mbufs_to_fpa_buff (struct rgmx_softc_dev *sc, struct mbuf *m, u_int len)
911 u_char *write_offset;
916 * Compare len with max FPA-tx-packet size. Or else we will possibly corrupt the next pkt.
921 * Get an FPA buffer from Xmit-packets FPA pool
923 data_area = octeon_fpa_alloc(OCTEON_FPA_TX_PACKET_POOL);
926 * Fail. No room. No resources.
932 * Transfer the data from mbuf chain to the transmission buffer.
934 write_offset = data_area;
935 for (mp = m; mp != 0; mp = mp->m_next) {
937 bcopy(mtod(mp, caddr_t), write_offset, mp->m_len);
938 write_offset = (u_char *) (((u_long) write_offset) + mp->m_len);
945 static u_int octeon_rgmx_pko_xmit_packet (struct rgmx_softc_dev *sc, void *out_buff, u_int len, u_int checksum)
947 octeon_pko_command_word0_t pko_cmd;
948 octeon_pko_packet_ptr_t pko_pkt_word;
950 u_short xmit_cmd_index;
951 uint64_t *xmit_cmd_ptr;
952 uint64_t xmit_cmd_state;
953 int queue = 0; // we should randomize queue # based on core num. Using same
954 // queue 0 for this port, by all cores on is less efficient.
957 * Prepare the PKO buffer and command word.
960 * Set #-segs and #-bytes
963 pko_cmd.bits.segs = 1;
964 pko_cmd.bits.total_bytes = len;
966 pko_cmd.bits.ipoffp1 = ETHER_HDR_LEN + 1; /* IPOffP1 is +1 based. 1 means offset 0 */
970 * Build the PKO buffer pointer. PKO Cmd Buf Word 1
972 pko_pkt_word.word64 = 0;
973 pko_pkt_word.bits.addr = OCTEON_PTR2PHYS(out_buff);
974 pko_pkt_word.bits.pool = OCTEON_FPA_TX_PACKET_POOL;
975 pko_pkt_word.bits.size = 2048; // dummy. Actual len is above.
978 printf(" PKO: 0x%llX 0x%llX ", pko_cmd.word64, pko_pkt_word.word64);
982 * Get the queue command ptr location from the per port per queue, pko info struct.
984 octeon_spinlock_lock(&(sc->outq_ptr[queue].lock));
986 printf(" xmit: sc->outq_ptr[queue].xmit_command_state: 0x%llX ", sc->outq_ptr[queue].xmit_command_state);
988 xmit_cmd_state = sc->outq_ptr[queue].xmit_command_state;
989 sc->outq_ptr[queue].xmit_command_state = xmit_cmd_state + 2;
991 temp = (u_long) (xmit_cmd_state >> OCTEON_PKO_INDEX_BITS);
993 printf(" temp: 0x%X ", temp);
995 xmit_cmd_ptr = (uint64_t *) MIPS_PHYS_TO_KSEG0(temp);
996 xmit_cmd_index = xmit_cmd_state & OCTEON_PKO_INDEX_MASK;
997 xmit_cmd_ptr += xmit_cmd_index;
1000 * We end the PKO cmd buffer at odd boundary. Towards the end we will have
1001 * 4 or 3 or 2 or 1 or 0 word remaining. Case of 4, 2, or 0 can never happen.
1002 * We only care when we have 3 words remaining. In this case we write our 2 words
1003 * for PKO command and 3rd word as chain for next PKO cmd buffer.
1005 xmit_cmd_ptr[0] = pko_cmd.word64;
1007 if (xmit_cmd_index < (OCTEON_FPA_TX_CMDBUF_POOL_WORDS - 2)) {
1009 * Plenty of space left. Write our 2nd word and worry the next time.
1011 xmit_cmd_ptr[1] = pko_pkt_word.word64;
1015 * 3 words or less are left. We write our 2nd word now and then put in a chain link
1016 * to new PKO cmd buf.
1018 uint64_t phys_cmd_buf = octeon_fpa_alloc_phys(OCTEON_FPA_TX_CMDBUF_POOL);
1020 if (!phys_cmd_buf) {
1022 * FPA pool for xmit-buffer-commands is empty.
1024 sc->outq_ptr[queue].xmit_command_state -= 2;
1025 octeon_spinlock_unlock(&(sc->outq_ptr[queue].lock));
1029 xmit_cmd_ptr[1] = pko_pkt_word.word64;
1030 xmit_cmd_ptr[2] = phys_cmd_buf;
1032 sc->outq_ptr[queue].xmit_command_state = (phys_cmd_buf << OCTEON_PKO_INDEX_BITS);
1035 * Unlock queue structures.
1037 octeon_spinlock_unlock(&(sc->outq_ptr[queue].lock));
1040 * 2 words incremented in PKO. Ring the doorbell.
1043 printf(" Ringing doorbell: Port %u Queue %u words 2", sc->port, octeon_pko_get_base_queue(sc->port) + queue);
1045 octeon_pko_ring_doorbell(sc->port, octeon_pko_get_base_queue(sc->port) + queue, 2);
1051 static void octeon_rgmx_xmit_mark_buffers_done(struct rgmx_softc_dev *sc, u_int n);
1053 static void octeon_rgmx_xmit_mark_buffers_done (struct rgmx_softc_dev *sc, u_int n)
1058 for (i = 0; i < n; i++) {
1060 * Remove packets in queue. Leaving a lag of 3, to allow for PKO in-flight xmission
1062 if (_IF_QLEN(&sc->tx_pending_queue) > 4) {
1063 IF_DEQUEUE(&sc->tx_pending_queue, m);
1065 break; // Queue became empty now. Break out.
1068 * Return the mbuf to system.
1074 return; // Nothing removed from queue.
1078 * The transmitter is no more active.
1079 * Reset output active flag and watchdog timer.
1081 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1085 #define OCTEON_RGMX_FLUSH_N_XMIT_MBUFS_EACH_LOOP 5
1086 #define OCTEON_RGMX_FLUSH_PENDING_MBUFS_MAX 1000
1090 * octeon_rgmx_output_flush
1092 * Drop all packets queued at ifnet layer.
1094 static void octeon_rgmx_output_flush (struct ifnet *ifp)
1097 u_int max_flush = OCTEON_RGMX_FLUSH_PENDING_MBUFS_MAX; /* Arbitrarily high number */
1099 while (max_flush-- && _IF_QLEN(&ifp->if_snd)) {
1101 * Get the next mbuf Packet chain to flush.
1103 IF_DEQUEUE(&ifp->if_snd, m);
1105 /* No more packets to flush */
1108 _IF_DROP(&ifp->if_snd);
1116 * octeon_rgmx_output_start
1118 * Start output on interface.
1120 static void octeon_rgmx_output_start (struct ifnet *ifp)
1122 struct rgmx_softc_dev *sc = ifp->if_softc;
1125 octeon_rgmx_output_start_locked(ifp);
1132 * octeon_rgmx_output_start_locked
1134 * Start output on interface. Assume Driver locked
1136 static void octeon_rgmx_output_start_locked (struct ifnet *ifp)
1138 struct rgmx_softc_dev *sc = ifp->if_softc;
1140 u_int len, need_l4_checksum;
1144 * Take out some of the last queued mbuf's from xmit-pending queue
1146 octeon_rgmx_xmit_mark_buffers_done(sc, OCTEON_RGMX_FLUSH_N_XMIT_MBUFS_EACH_LOOP);
1150 * See if there is room to put another packet in the buffer.
1151 * We *could* do better job by peeking the send queue to
1152 * know the length of the next packet. Current version just
1153 * tests against the worst case (i.e., longest packet). FIXME.
1155 * When adding the packet-peek feature, don't forget adding a
1156 * test on txb_count against QUEUEING_MAX.
1157 * There is a little chance the packet count exceeds
1158 * the limit. Assume transmission buffer is 8KB (2x8KB
1159 * configuration) and an application sends a bunch of small
1160 * (i.e., minimum packet sized) packets rapidly. An 8KB
1161 * buffer can hold 130 blocks of 62 bytes long...
1165 * If unable to send more.
1167 if (_IF_QLEN(&sc->tx_pending_queue) >= MAX_TX_BUFS) {
1168 printf(" Xmit not possible. NO room %u", _IF_QLEN(&sc->tx_pending_queue));
1169 goto indicate_active;
1174 * Get the next mbuf chain for a packet to send.
1176 IF_DEQUEUE(&ifp->if_snd, m);
1178 /* No more packets to send. */
1179 goto indicate_inactive;
1182 len = m->m_pkthdr.len;
1184 * Should never send big packets. If such a packet is passed,
1185 * it should be a bug of upper layer. We just ignore it.
1186 * ... Partial (too short) packets, neither.
1188 if (len < ETHER_HDR_LEN ||
1189 len > ETHER_MAX_LEN - ETHER_CRC_LEN) {
1191 * Fail. Bad packet size. Return the mbuf to system.
1194 "got an out-of-spec packet (%u bytes) to send\n", len);
1196 goto indicate_active;
1200 * Copy the mbuf chain into the transmission buffer.
1201 * txb_* variables are updated as necessary.
1203 out_buff = octeon_rgmx_write_mbufs_to_fpa_buff(sc, m, len);
1206 * No FPA physical buf resource.
1207 * Let's requeue it back. And slow it down for a while.
1209 IF_PREPEND(&ifp->if_snd, m);
1210 goto indicate_active;
1213 need_l4_checksum = (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) ? 1 : 0;
1216 * put the mbuf onto pending queue
1218 //#define DEBUG_TX_PKT_DUMP 1
1219 #ifdef DEBUG_TX_PKT_DUMP
1221 u_char *dc = out_buff;
1223 printf("\n"); printf("Out: ");
1224 for (ii = 0; ii < len; ii++) printf(" %X", dc[ii]); printf("\n");
1227 ETHER_BPF_MTAP(ifp, m);
1229 IF_ENQUEUE(&sc->tx_pending_queue, m);
1232 * Pass the mbuf data packet to PKO for xmission.
1234 octeon_rgmx_pko_xmit_packet(sc, out_buff, len, need_l4_checksum);
1241 * We are using the !OACTIVE flag to indicate to
1242 * the outside world that we can accept an
1243 * additional packet rather than that the
1244 * transmitter is _actually_ active. Indeed, the
1245 * transmitter may be active, but if we haven't
1246 * filled all the buffers with data then we still
1247 * want to accept more.
1249 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1255 * The transmitter is active, and there are no room for
1256 * more outgoing packets in the transmission buffer.
1259 // sc->mibdata.dot3StatsInternalMacTransmitErrors++;
1260 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1267 /* ------------------------------------------------------------------- *
1268 * octeon_config_hw_units() *
1269 * ------------------------------------------------------------------- *
1271 * Initialize Octeon hardware components. To get the RGMX going.
1274 static void octeon_config_hw_units_pre_ports (void)
1278 octeon_enable_fpa();
1281 octeon_pko_enable();
1290 * Input Buffers Pool
1293 octeon_fpa_fill_pool_mem(OCTEON_FPA_RX_PACKET_POOL, OCTEON_FPA_RX_PACKET_POOL_ELEM_SIZE,
1294 OCTEON_FPA_RX_PACKET_POOL_ELEMENTS);
1300 octeon_fpa_fill_pool_mem(OCTEON_FPA_WQE_RX_POOL, OCTEON_FPA_WQE_RX_POOL_ELEM_SIZE,
1301 OCTEON_FPA_WQE_RX_POOL_ELEMENTS);
1307 octeon_fpa_fill_pool_mem(OCTEON_FPA_TX_CMDBUF_POOL, OCTEON_FPA_TX_CMDBUF_POOL_ELEM_SIZE,
1308 OCTEON_FPA_TX_CMDBUF_POOL_ELEMENTS);
1311 * Output Buffers Pool
1314 octeon_fpa_fill_pool_mem(OCTEON_FPA_TX_PACKET_POOL, OCTEON_FPA_TX_PACKET_POOL_ELEM_SIZE,
1315 OCTEON_FPA_TX_PACKET_POOL_ELEMENTS);
1319 octeon_rgmx_enable_RED_all(OCTEON_FPA_RX_PACKET_POOL_ELEMENTS >> 2, OCTEON_FPA_RX_PACKET_POOL_ELEMENTS >> 3);
1322 octeon_ipd_config(OCTEON_FPA_RX_PACKET_POOL_WORDS,
1323 FIRST_PARTICLE_SKIP / 8,
1324 NOT_FIRST_PARTICLE_SKIP / 8,
1325 FIRST_PARTICLE_SKIP / 128,
1326 NOT_FIRST_PARTICLE_SKIP / 128,
1327 OCTEON_FPA_WQE_RX_POOL,
1328 OCTEON_IPD_OPC_MODE_STF,
1329 ENABLE_BACK_PRESSURE);
1332 * PKO setup Output Command Buffers
1334 octeon_pko_config_cmdbuf_global_defaults(OCTEON_FPA_TX_CMDBUF_POOL,
1335 OCTEON_FPA_TX_CMDBUF_POOL_ELEM_SIZE);
1341 static void octeon_config_hw_units_port (struct rgmx_softc_dev *sc, u_int port)
1343 const u_int priorities[8] = {8,8,8,8,8,8,8,8};
1344 u_int total_queues, base_queue;
1346 octeon_config_rgmii_port(port);
1348 total_queues = octeon_pko_get_num_queues(port);
1349 base_queue = octeon_pko_get_base_queue(port);
1350 /* Packet output configures Queue and Ports */
1351 octeon_pko_config_port(port, base_queue,
1354 OCTEON_FPA_TX_CMDBUF_POOL,
1357 octeon_rgmx_set_mac(port);
1359 /* Setup Port input tagging */
1360 octeon_rgmx_config_pip(port);
1369 uint64_t rsvd3 : 35;
1370 uint64_t enable : 1;
1371 uint64_t time_thr : 4;
1373 uint64_t ds_thr : 11;
1375 uint64_t iq_thr : 11;
1377 } octeon_rgmx_pow_int_threshold_t;
1385 uint64_t tc_cnt : 4;
1386 uint64_t ds_cnt : 12;
1387 uint64_t iq_cnt : 12;
1389 } octeon_rgmx_pow_int_cnt_t;
1397 uint64_t thr_freq : 28; // R/O
1399 uint64_t thr_period : 20;
1402 } octeon_rgmx_pow_int_pc_t;
1411 uint64_t nos_cnt : 12;
1413 } octeon_rgmx_pow_nos_cnt;
1423 uint64_t inb_pkts : 32;
1425 } octeon_rgmx_pip_inb_pkts;
1433 uint64_t inb_errs : 16;
1435 } octeon_rgmx_pip_inb_errs;
1445 uint64_t iq_cnt : 32;
1447 } octeon_pow_inpt_q_all_qos;
1457 uint64_t iq_cnt : 32;
1459 } octeon_pow_inpt_q_grp_qos;
1462 static void octeon_config_hw_units_post_ports (void)
1465 octeon_rgmx_pow_int_threshold_t thr;
1466 octeon_rgmx_pow_int_pc_t intpc;
1470 intpc.bits.thr_freq = (500 * 1000 * 1000) / (1000 * 16 * 256);
1472 #ifdef OCTEON_RGMX_POW_TIME_THR_INTS
1473 thr.bits.enable = 1;
1474 thr.bits.time_thr = 0xf;
1475 oct_write64(OCTEON_POW_WORKQUEUE_INT_THRESHOLD(OCTEON_POW_RX_GROUP_NUM), thr.word64);
1477 oct_write64(OCTEON_POW_WORKQUEUE_INT_PC, intpc.word64);
1480 thr.bits.ds_thr = thr.bits.iq_thr = 1; // Only if doing absolute queue-cnt interrupts.
1481 oct_write64(OCTEON_POW_WORKQUEUE_INT_THRESHOLD(OCTEON_POW_RX_GROUP_NUM), thr.word64);
1484 ciu_enable_interrupts(PCPU_GET(cpuid), OCTEON_RGMX_CIU_INTX, OCTEON_RGMX_CIU_ENX,
1485 (OCTEON_POW_RX_GROUP_MASK |
1486 CIU_GENTIMER_BITS_ENABLE(CIU_GENTIMER_NUM_1)), CIU_MIPS_IP2);
1488 ciu_clear_int_summary(CIU_THIS_CORE, OCTEON_RGMX_CIU_INTX,
1489 OCTEON_RGMX_CIU_ENX, CIU_GENTIMER_BITS_ENABLE(CIU_GENTIMER_NUM_1));
1491 octeon_ciu_start_gtimer(CIU_GENTIMER_NUM_1, OCTEON_GENTIMER_PERIODIC,
1492 OCTEON_GENTIMER_LEN_1SEC);
1496 octeon_ipd_enable();
1503 static void octeon_rgmx_config_pip (u_int port)
1505 octeon_pip_gbl_cfg_t pip_config;
1506 octeon_pip_port_cfg_t pip_port_config;
1507 octeon_pip_port_tag_cfg_t pip_tag_config;
1512 pip_config.word64 = 0;
1513 pip_config.bits.max_l2 = 1;
1514 oct_write64(OCTEON_PIP_GBL_CFG, pip_config.word64);
1519 pip_port_config.word64 = 0;
1520 pip_port_config.bits.mode = OCTEON_PIP_PORT_CFG_MODE_SKIPL2;
1521 pip_port_config.bits.qos = port & 0x7;
1522 pip_port_config.bits.crc_en = 1;
1526 * PIP -> POW tags config
1528 * We don't use any pkt input fields for tag hash, except for Port#
1530 pip_tag_config.word64 = 0;
1532 pip_tag_config.bits.grptag = 0;
1533 pip_tag_config.bits.grptagmask = 0xf;
1534 pip_tag_config.bits.grptagbase = 1;
1536 pip_tag_config.bits.ip6_src_flag = 0;
1537 pip_tag_config.bits.ip6_dst_flag = 0;
1538 pip_tag_config.bits.ip6_sprt_flag = 0;
1539 pip_tag_config.bits.ip6_dprt_flag = 0;
1540 pip_tag_config.bits.ip6_nxth_flag = 0;
1542 pip_tag_config.bits.ip4_src_flag = 1;
1543 pip_tag_config.bits.ip4_dst_flag = 1;
1544 pip_tag_config.bits.ip4_sprt_flag = 1;
1545 pip_tag_config.bits.ip4_dprt_flag = 1;
1546 pip_tag_config.bits.ip4_pctl_flag = 1;
1548 pip_tag_config.bits.tcp6_tag_type = 0;
1549 pip_tag_config.bits.tcp4_tag_type = 0;
1550 pip_tag_config.bits.ip6_tag_type = 0;
1551 pip_tag_config.bits.ip4_tag_type = 0;
1552 pip_tag_config.bits.inc_prt_flag = 1;
1553 pip_tag_config.bits.non_tag_type = OCTEON_POW_TAG_TYPE_NULL;
1554 pip_tag_config.bits.grp = OCTEON_POW_RX_GROUP_NUM;
1556 octeon_pip_config_port(port, pip_port_config, pip_tag_config);
1558 oct_write64(OCTEON_POW_CORE_GROUP_MASK(OUR_CORE), OCTEON_POW_ALL_GROUPS_MASK);
1564 * octeon_rgmx_stop_port
1567 static u_int octeon_rgmx_stop_port (u_int port)
1569 int interface = INTERFACE(port);
1570 int index = INDEX(port);
1571 octeon_rgmx_prtx_cfg_t gmx_cfg;
1572 u_int last_enabled = 0;
1574 gmx_cfg.word64 = oct_read64(OCTEON_RGMX_PRTX_CFG(index, interface));
1575 last_enabled = (gmx_cfg.bits.en == 1);
1576 gmx_cfg.bits.en = 0;
1577 oct_write64(OCTEON_RGMX_PRTX_CFG(index, interface), gmx_cfg.word64);
1578 return (last_enabled);
1581 static void octeon_rgmx_start_port(u_int port)
1583 int interface = INTERFACE(port);
1584 int index = INDEX(port);
1585 octeon_rgmx_prtx_cfg_t gmx_cfg;
1587 gmx_cfg.word64 = oct_read64(OCTEON_RGMX_PRTX_CFG(index, interface));
1588 gmx_cfg.bits.en = 1;
1589 oct_write64(OCTEON_RGMX_PRTX_CFG(index, interface), gmx_cfg.word64);
1593 static void octeon_rgmx_stop (struct rgmx_softc_dev *sc)
1595 octeon_rgmx_stop_port(sc->port);
1597 /* Reset transmitter variables and interface flags. */
1598 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
1604 /* Change the media selection. */
1605 static int octeon_rgmx_medchange (struct ifnet *ifp)
1607 struct rgmx_softc_dev *sc = ifp->if_softc;
1610 /* If_media should not pass any request for a media which this
1611 interface doesn't support. */
1614 for (b = 0; bit2media[b] != 0; b++) {
1615 if (bit2media[b] == sc->media.ifm_media) break;
1617 if (((1 << b) & sc->mbitmap) == 0) {
1619 "got an unsupported media request (0x%x)\n",
1620 sc->media.ifm_media);
1625 /* We don't actually change media when the interface is down.
1626 fe_init() will do the job, instead. Should we also wait
1627 until the transmission buffer being empty? Changing the
1628 media when we are sending a frame will cause two garbages
1629 on wires, one on old media and another on new. FIXME */
1630 if (sc->ifp->if_flags & IFF_UP) {
1631 printf(" Media change requested while IF is up\n");
1633 printf(" Media change requested while IF is Down\n");
1640 static void octeon_rgmx_medstat (struct ifnet *ifp, struct ifmediareq *ifm)
1642 struct rgmx_softc_dev *sc = ifp->if_softc;
1643 octeon_rgmx_rxx_rx_inbnd_t link_status;
1645 octeon_rgmx_config_speed(sc->port, 1);
1649 ifm->ifm_status = IFM_AVALID;
1650 ifm->ifm_active = IFM_ETHER;
1653 * Parse link status.
1655 link_status.word64 = sc->link_status;
1657 if (!link_status.bits.status) {
1662 ifm->ifm_status |= IFM_ACTIVE;
1664 switch (link_status.bits.speed) {
1666 ifm->ifm_active |= IFM_10_T;
1669 ifm->ifm_active |= IFM_100_TX;
1672 ifm->ifm_active |= IFM_1000_T;;
1682 if (link_status.bits.duplex == 1)
1683 ifm->ifm_active |= IFM_FDX;
1685 ifm->ifm_active |= IFM_HDX;
1690 static void octeon_rgmx_config_cam(struct ifnet *ifp)
1692 struct rgmx_softc_dev *sc = ifp->if_softc;
1693 u_int port = sc->port;
1694 int index = INDEX(port);
1695 int iface = INTERFACE(port);
1699 last_enabled = octeon_rgmx_stop_port(port);
1701 adr_ctl = oct_read64(OCTEON_RGMX_RXX_ADR_CTL(index, iface));
1704 * Always accept broadcast traffic.
1706 if ((adr_ctl & OCTEON_RGMX_ADRCTL_ACCEPT_BROADCAST) == 0)
1707 adr_ctl |= OCTEON_RGMX_ADRCTL_ACCEPT_BROADCAST;
1710 * Accept all multicast in all multicast mode and in
1713 * XXX Since we don't handle programming the CAM for
1714 * multicast filtering, always accept all multicast.
1716 adr_ctl &= ~OCTEON_RGMX_ADRCTL_REJECT_ALL_MULTICAST;
1717 adr_ctl |= OCTEON_RGMX_ADRCTL_ACCEPT_ALL_MULTICAST;
1720 * In promiscuous mode, the CAM is shut off, so reject everything.
1721 * Otherwise, filter using the CAM.
1723 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1724 adr_ctl &= ~OCTEON_RGMX_ADRCTL_CAM_MODE_ACCEPT_DMAC;
1725 adr_ctl |= OCTEON_RGMX_ADRCTL_CAM_MODE_REJECT_DMAC;
1727 adr_ctl &= ~OCTEON_RGMX_ADRCTL_CAM_MODE_REJECT_DMAC;
1728 adr_ctl |= OCTEON_RGMX_ADRCTL_CAM_MODE_ACCEPT_DMAC;
1731 oct_write64(OCTEON_RGMX_RXX_ADR_CTL(index, iface), adr_ctl);
1734 * If in promiscuous mode, disable the CAM.
1736 if ((ifp->if_flags & IFF_PROMISC) != 0)
1737 oct_write64(OCTEON_RGMX_RXX_ADR_CAM_EN(index, iface), 0);
1739 oct_write64(OCTEON_RGMX_RXX_ADR_CAM_EN(index, iface), 1);
1741 if (last_enabled) octeon_rgmx_start_port(port);
1744 static int octeon_rgmx_ioctl (struct ifnet * ifp, u_long command, caddr_t data)
1746 struct rgmx_softc_dev *sc = ifp->if_softc;
1747 struct ifreq *ifr = (struct ifreq *)data;
1751 printf(" octeon_rgmx_ioctl. No sc\n");
1758 * Switch interface state between "running" and
1759 * "stopped", reflecting the UP flag.
1761 if (ifp->if_flags & IFF_UP) {
1763 * New state is IFF_UP
1764 * Restart or Start now, if driver is not running currently.
1766 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1767 octeon_rgmx_init(sc);
1769 octeon_rgmx_config_cam(ifp);
1772 * New state is IFF_DOWN.
1773 * Stop & shut it down now, if driver is running currently.
1775 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1776 octeon_rgmx_stop(sc);
1787 /* Let if_media to handle these commands and to call
1789 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
1796 ifp->if_hwassist &= ~CSUM_TSO;
1797 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1798 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1799 if (mask & IFCAP_HWCSUM) {
1800 ifp->if_capenable ^= IFCAP_HWCSUM;
1801 if (ifp->if_capenable & IFCAP_TXCSUM) {
1802 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1804 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP);
1811 error = ether_ioctl(ifp, command, data);
1818 static void octeon_rgmx_init (void *xsc)
1820 struct rgmx_softc_dev *sc = xsc;
1822 /* Enable interrupts. */
1823 /* For RGMX they are already enabled earlier */
1825 /* Enable transmitter and receiver. */
1826 /* For RGMX they are already enabled earlier */
1828 /* Flush out all HW receive buffers for this interface. */
1829 /* For RGMX, no means to flush an individual port */
1831 /* Set 'running' flag, because we are now running. */
1832 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1834 /* Set the HW Address filter. aka program Mac-addr & Multicast filters */
1835 /* For RGMX this was taken care of via set_mac_addr() */
1837 /* Kick start the output */
1838 /* Hopefully PKO is running and will pick up packets via the timer or receive loop */
1840 /* Set link status. */
1841 octeon_rgmx_config_speed(sc->port, 1);
1846 static void octeon_rgmx_config_speed (u_int port, u_int report_link)
1848 int index = INDEX(port);
1849 int iface = INTERFACE(port);
1850 struct rgmx_softc_dev *sc;
1851 octeon_rgmx_rxx_rx_inbnd_t link_status, old_link_status;
1852 octeon_rgmx_prtx_cfg_t gmx_cfg;
1853 uint64_t val64_tx_clk, val64_tx_slot, val64_tx_burst;
1856 sc = get_rgmx_softc(port);
1858 printf(" config_speed didn't find sc int:%u port:%u", iface, port);
1863 * Look up interface-port speed params
1865 link_status.word64 = oct_read64(OCTEON_RGMX_RXX_RX_INBND(index, iface));
1870 * Compre to prev known state. If same then nothing to do.
1872 if (link_status.word64 == sc->link_status) {
1876 old_link_status.word64 = sc->link_status;
1879 * Compare to previous state modulo link status. If only link
1880 * status is different, we don't need to change media.
1882 if (old_link_status.bits.duplex != link_status.bits.duplex ||
1883 old_link_status.bits.speed != link_status.bits.speed) {
1884 last_enabled = octeon_rgmx_stop_port(port);
1886 gmx_cfg.word64 = oct_read64(OCTEON_RGMX_PRTX_CFG(index, iface));
1890 * XXX Set based on link_status.bits.duplex?
1892 gmx_cfg.bits.duplex = 1;
1894 switch (link_status.bits.speed) {
1895 case 0: /* 10Mbps */
1896 gmx_cfg.bits.speed = 0;
1897 gmx_cfg.bits.slottime = 0;
1898 val64_tx_clk = 50; val64_tx_slot = 0x40; val64_tx_burst = 0;
1901 case 1: /* 100Mbps */
1902 gmx_cfg.bits.speed = 0;
1903 gmx_cfg.bits.slottime = 0;
1904 val64_tx_clk = 5; val64_tx_slot = 0x40; val64_tx_burst = 0;
1908 gmx_cfg.bits.speed = 1;
1909 gmx_cfg.bits.slottime = 1;
1910 val64_tx_clk = 1; val64_tx_slot = 0x200; val64_tx_burst = 0x2000;
1915 gmx_cfg.bits.speed = 1;
1916 gmx_cfg.bits.slottime = 1;
1917 val64_tx_clk = 1; val64_tx_slot = 0x200; val64_tx_burst = 0x2000;
1921 oct_write64(OCTEON_RGMX_TXX_CLK(index, iface), val64_tx_clk);
1922 oct_write64(OCTEON_RGMX_TXX_SLOT(index, iface), val64_tx_slot);
1923 oct_write64(OCTEON_RGMX_TXX_BURST(index, iface), val64_tx_burst);
1925 oct_write64(OCTEON_RGMX_PRTX_CFG(index, iface), gmx_cfg.word64);
1927 if (last_enabled) octeon_rgmx_start_port(port);
1931 * Now check and possibly change link status.
1933 if (link_status.bits.status != old_link_status.bits.status) {
1935 if (link_status.bits.status) {
1936 if_link_state_change(sc->ifp, LINK_STATE_UP);
1938 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1944 sc->link_status = link_status.word64;
1947 * We can't update link status proper since we can't
1948 * change it in the interface, so keep the old link
1949 * status intact but note the current speed and duplex
1952 link_status.bits.status = old_link_status.bits.status;
1953 sc->link_status = link_status.word64;
1961 #ifdef DEBUG_RGMX_DUMP
1962 static void octeon_dump_rgmx_stats (u_int port)
1969 static void rgmx_timer_periodic (void)
1973 struct rgmx_softc_dev *sc;
1976 for (index = 0; index < OCTEON_RGMX_NUM_PORTS_MAX; index ++) {
1978 port = port_array[index];
1979 sc = rgmx_scdev_array[index];
1982 * Skip over ports/slots not in service.
1984 if ((port < OCTEON_RGMX_MIN_PORT) || (port > OCTEON_RGMX_MAX_PORT)) {
1987 if ((NULL == sc) || (((struct rgmx_softc_dev *)-1) == sc)) {
1992 * Now look for anamolous conditions
1994 if (sc != get_rgmx_softc(port)) {
1995 printf(" port %u sc %p not in sync with index: %u\n",
2000 if (sc->port != port) {
2001 printf(" port %u sc %p port-> %u not in sync with index: %u\n",
2002 port, sc, sc->port, index);
2008 printf(" port %u sc %p . Bad ifp %p\n", port, sc, ifp);
2013 * Check if packets queued at ifnet layer. Kick start output if we can.
2015 if (sc->ifp->if_flags & IFF_UP) {
2016 octeon_rgmx_output_start(ifp);
2018 octeon_rgmx_output_flush(ifp);
2022 * Check if line status changed ? Adjust ourselves.
2024 octeon_rgmx_config_speed(port, 1);
2029 #ifdef DEBUG_RGMX_DUMP
2030 static void octeon_dump_pow_stats(void)
2032 octeon_rgmx_pow_nos_cnt nos_cnt;
2033 octeon_rgmx_pow_int_pc_t intpc;
2034 octeon_rgmx_pow_int_threshold_t thr;
2035 octeon_rgmx_pow_int_cnt_t int_cnt;
2036 int core = octeon_get_core_num();
2037 octeon_pow_inpt_q_all_qos inpt_q_all;
2038 octeon_pow_inpt_q_grp_qos inpt_q_grp;
2039 octeon_rgmx_pip_inb_pkts pkts;
2040 octeon_rgmx_pip_inb_errs errs;
2041 static u_int pkts0 = 0;
2042 static u_int pkts1 = 0;
2043 static u_int errs0 = 0;
2044 static u_int errs1 = 0;
2048 nos_cnt.word64 = oct_read64(OCTEON_RGMX_POW_NOS_CNT);
2049 if (nos_cnt.bits.nos_cnt) printf(" *** No sched cnt %u\n", nos_cnt.bits.nos_cnt);
2050 printf(" \nGroup mask: 0x%llX WorkQueue Int : 0x%llX\n", oct_read64(OCTEON_POW_CORE_GROUP_MASK(OUR_CORE)), oct_read64(OCTEON_POW_WORKQUEUE_INT));
2051 intpc.word64 = oct_read64(OCTEON_POW_WORKQUEUE_INT_PC);
2052 printf(" Intr Periodic Cntr: PC %u thr: %u\n", intpc.bits.thr_freq, intpc.bits.thr_period);
2053 thr.word64 = oct_read64(OCTEON_POW_WORKQUEUE_INT_THRESHOLD(OCTEON_POW_RX_GROUP_NUM));
2054 printf(" Thresholds iq %u ds %u time %u enable %u\n",
2055 thr.bits.iq_thr, thr.bits.ds_thr, thr.bits.time_thr, thr.bits.enable);
2056 int_cnt.word64 = oct_read64(OCTEON_POW_INT_CNTR(core));
2057 printf(" Int_cnt iq_cnt %u ds_cnt %u tc_cnt %u\n",
2058 int_cnt.bits.iq_cnt, int_cnt.bits.ds_cnt, int_cnt.bits.tc_cnt);
2059 pkts.word64 = oct_read64(OCTEON_PIP_STAT_INB_PKTS(16)); pkts0 += pkts.bits.inb_pkts;
2060 errs.word64 = oct_read64(OCTEON_PIP_STAT_INB_ERRS(16)); errs0 += errs.bits.inb_errs;
2061 pkts.word64 = oct_read64(OCTEON_PIP_STAT_INB_PKTS(17)); pkts1 += pkts.bits.inb_pkts;
2062 errs.word64 = oct_read64(OCTEON_PIP_STAT_INB_ERRS(17)); errs1 += errs.bits.inb_errs;
2063 printf(" PIP inbound pkts(16): %u Errors: %u inbound(17): %u Errors: %u\n", pkts0, errs0, pkts1, errs1);
2064 inpt_q_all.word64 = oct_read64(OCTEON_POW_INPT_Q_ALL_QOS);
2065 printf(" All queued pkt in qos Levels: %u -- ", inpt_q_all.bits.iq_cnt);
2066 for (i = 0 ; i < 7; i++) {
2067 inpt_q_grp.word64 = oct_read64(OCTEON_POW_INPT_QOS_GRP(i));
2068 if (inpt_q_grp.bits.iq_cnt) printf(" Grp-%u: %u ", i, inpt_q_grp.bits.iq_cnt);
2073 /* ------------------------------------------------------------------- *
2074 * octeon_line_status_loop() *
2075 * ------------------------------------------------------------------- */
2076 static void octeon_line_status_loop (void *unused)
2078 struct rgmx_softc_dev *sc;
2081 for (idx = 0; idx < num_devices; idx++) {
2082 sc = rgmx_scdev_array[idx];
2083 if (sc && sc->ifp) {
2084 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2085 octeon_rgmx_config_speed(sc->port, 1);
2087 octeon_rgmx_output_start(sc->ifp);
2092 //#define DEBUG_RGMX_DUMP
2093 #ifdef DEBUG_RGMX_DUMP
2094 static int count = 0;
2098 // octeon_dump_fpa_pool(OCTEON_FPA_RX_PACKET_POOL);
2099 // octeon_dump_fpa_pool(OCTEON_FPA_WQE_RX_POOL);
2100 // octeon_dump_fpa_pool(OCTEON_FPA_TX_PACKET_POOL);
2101 octeon_dump_rgmx_stats(16);
2102 octeon_dump_pow_stats();
2108 /* ------------------------------------------------------------------- *
2109 * octeon_rgmx_set_mac *
2110 * ------------------------------------------------------------------- *
2112 * octeon_rgmx_set_mac
2114 * Program the ethernet HW address
2117 static void octeon_rgmx_set_mac (u_int port)
2119 struct rgmx_softc_dev *sc;
2120 u_int iface = INTERFACE(port);
2121 u_int index = INDEX(port);
2126 sc = get_rgmx_softc(port);
2128 printf(" octeon_rgmx_set_mac Missing sc. port:%u", port);
2132 for (ii = 0; ii < 6; ii++) {
2133 mac = (mac << 8) | (uint64_t)(sc->ieee[ii]);
2136 last_enabled = octeon_rgmx_stop_port(port);
2138 oct_write64(OCTEON_RGMX_SMACX(index, iface), mac);
2139 oct_write64(OCTEON_RGMX_RXX_ADR_CAM0(index, iface), sc->ieee[0]);
2140 oct_write64(OCTEON_RGMX_RXX_ADR_CAM1(index, iface), sc->ieee[1]);
2141 oct_write64(OCTEON_RGMX_RXX_ADR_CAM2(index, iface), sc->ieee[2]);
2142 oct_write64(OCTEON_RGMX_RXX_ADR_CAM3(index, iface), sc->ieee[3]);
2143 oct_write64(OCTEON_RGMX_RXX_ADR_CAM4(index, iface), sc->ieee[4]);
2144 oct_write64(OCTEON_RGMX_RXX_ADR_CAM5(index, iface), sc->ieee[5]);
2145 oct_write64(OCTEON_RGMX_RXX_ADR_CTL(index, iface),
2146 OCTEON_RGMX_ADRCTL_ACCEPT_BROADCAST |
2147 OCTEON_RGMX_ADRCTL_ACCEPT_ALL_MULTICAST |
2148 OCTEON_RGMX_ADRCTL_CAM_MODE_ACCEPT_DMAC);
2149 oct_write64(OCTEON_RGMX_RXX_ADR_CAM_EN(index, iface), 1);
2150 if (last_enabled) octeon_rgmx_start_port(port);
2154 /* ------------------------------------------------------------------- *
2155 * octeon_config_rgmii_port() *
2156 * ------------------------------------------------------------------- */
2157 static void octeon_config_rgmii_port (u_int port)
2159 u_int iface = INTERFACE(port);
2160 u_int index = INDEX(port);
2163 * Configure an RGMII port
2165 octeon_rgmx_prtx_cfg_t gmx_cfg;
2168 oct_write64(OCTEON_ASXX_RX_PRT_EN(iface), oct_read64(OCTEON_ASXX_RX_PRT_EN(iface)) | (1<<index));
2169 oct_write64(OCTEON_ASXX_TX_PRT_EN(iface), oct_read64(OCTEON_ASXX_TX_PRT_EN(iface)) | (1<<index));
2172 gmx_cfg.word64 = oct_read64(OCTEON_RGMX_PRTX_CFG(index, iface));
2173 gmx_cfg.bits.en = 1;
2174 oct_write64(OCTEON_RGMX_PRTX_CFG(index, iface), gmx_cfg.word64);
2176 octeon_rgmx_config_speed(port, 0);
2178 oct_write64(OCTEON_RGMX_TXX_THRESH(index, iface), 32);
2183 oct_write64(OCTEON_ASXX_TX_HI_WATERX(index, iface), 10);
2184 if (octeon_get_chipid() == OCTEON_CN5020_CHIP) {
2185 oct_write64(OCTEON_ASXX_TX_CLK_SETX(index, iface), 16);
2186 oct_write64(OCTEON_ASXX_RX_CLK_SETX(index, iface), 16);
2188 oct_write64(OCTEON_ASXX_TX_CLK_SETX(index, iface), 24);
2189 oct_write64(OCTEON_ASXX_RX_CLK_SETX(index, iface), 24);
2195 static void octeon_rgmx_enable_RED_queue (int queue, int slow_drop, int all_drop)
2197 octeon_rgmx_ipd_queue_red_marks_t red_marks;
2198 octeon_rgmx_ipd_red_q_param_t red_param;
2200 if (slow_drop == all_drop) { printf("Bad val in %s", __FUNCTION__); return; }
2201 red_marks.word64 = 0;
2202 red_marks.bits.all_drop = all_drop;
2203 red_marks.bits.slow_drop = slow_drop;
2204 oct_write64(OCTEON_IPD_QOSX_RED_MARKS(queue), red_marks.word64);
2206 /* Use the actual queue 0 counter, not the average */
2207 red_param.word64 = 0;
2208 red_param.bits.prb_con = (255ul << 24) / (slow_drop - all_drop);
2209 red_param.bits.avg_con = 1;
2210 red_param.bits.new_con = 255;
2211 red_param.bits.use_pagecount = 1;
2212 oct_write64(OCTEON_IPD_RED_Q_PARAM(queue), red_param.word64);
2216 static void octeon_rgmx_enable_RED_all (int slow_drop, int all_drop)
2220 octeon_ipd_port_bp_page_count_t ipd_bp_page_count;
2221 octeon_ipd_red_port_enable_t red_port_enable;
2224 * First remove BP settings
2226 ipd_bp_page_count.word64 = 0;
2227 ipd_bp_page_count.bits.bp_enable = 0;
2228 ipd_bp_page_count.bits.page_count = 100;
2230 for (port = 0; port < OCTEON_RGMX_MAX_PORT; port++) {
2231 oct_write64(OCTEON_IPD_PORT_BP_PAGE_COUNT(port), ipd_bp_page_count.word64);
2235 * Enable RED for each individual queue
2237 for (queue = 0; queue < 8; queue++) {
2238 octeon_rgmx_enable_RED_queue(queue, slow_drop, all_drop);
2241 oct_write64(OCTEON_IPD_BP_PORT_RED_END, 0);
2243 red_port_enable.word64 = 0;
2244 red_port_enable.bits.port_enable = 0xfffffffffull;
2245 red_port_enable.bits.avg_dly = 10000;
2246 red_port_enable.bits.prb_dly = 10000;
2247 oct_write64(OCTEON_IPD_RED_PORT_ENABLE, red_port_enable.word64);
2252 /* ------------------------------------------------------------------- *
2253 * octeon_has_4ports() *
2254 * ------------------------------------------------------------------- */
2255 static int octeon_has_4ports (void)
2260 chipid = octeon_get_chipid();
2263 case OCTEON_CN31XX_CHIP:
2264 case OCTEON_CN30XX_CHIP:
2265 case OCTEON_CN5020_CHIP:
2278 * octeon_rgmx_free_intr
2280 * We have 4 child and one parent device.
2281 * It's tricky and unexpected that anyone will detach the device that is built'in on
2283 * We will not support detachment for now. But keep adding good code that will be used
2286 static void octeon_rgmx_free_intr (struct rgmx_softc_dev *sc)
2288 device_t dev = sc->sc_dev;
2291 * Make sure that sc/dev are the parent Root structs. Not one
2292 * of the rgmxN childs.
2294 if (int_handler_tag != NULL) {
2295 bus_teardown_intr(dev, irq_res, int_handler_tag);
2296 int_handler_tag = NULL;
2299 #ifdef OCTEON_RGMX_SCHEDULED_ISRS
2301 taskqueue_drain(tq, &rxtx_isr_task);
2302 taskqueue_drain(taskqueue_fast, &link_isr_task);
2311 static device_method_t rgmii_methods[] = {
2312 /* Device interface */
2313 DEVMETHOD(device_probe, rgmii_probe),
2314 DEVMETHOD(device_identify, rgmii_identify),
2315 DEVMETHOD(device_attach, rgmii_attach),
2316 DEVMETHOD(device_detach, bus_generic_detach),
2317 DEVMETHOD(device_shutdown, bus_generic_shutdown),
2322 static driver_t rgmii_driver = {
2323 "rgmii", rgmii_methods, sizeof(struct rgmx_softc_dev)
2326 static devclass_t rgmii_devclass;
2328 DRIVER_MODULE(rgmii, nexus, rgmii_driver, rgmii_devclass, 0, 0);