2 * Copyright (c) 2003-2009 RMI Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
39 #include <sys/types.h>
40 #include <sys/endian.h>
41 #include <sys/systm.h>
42 #include <sys/sockio.h>
43 #include <sys/param.h>
45 #include <sys/mutex.h>
47 #include <sys/limits.h>
50 #include <sys/malloc.h>
51 #include <sys/kernel.h>
52 #include <sys/module.h>
53 #include <sys/socket.h>
54 #define __RMAN_RESOURCE_VISIBLE
56 #include <sys/taskqueue.h>
58 #include <sys/sysctl.h>
61 #include <net/if_arp.h>
62 #include <net/ethernet.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_vlan_var.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/in.h>
72 #include <netinet/ip.h>
77 #include <machine/reg.h>
78 #include <machine/cpu.h>
79 #include <machine/mips_opcode.h>
80 #include <machine/asm.h>
82 #include <machine/param.h>
83 #include <machine/intr_machdep.h>
84 #include <machine/clock.h> /* for DELAY */
85 #include <machine/cpuregs.h>
86 #include <machine/bus.h> /* */
87 #include <machine/resource.h>
89 #include <dev/mii/mii.h>
90 #include <dev/mii/miivar.h>
91 #include <dev/mii/brgphyreg.h>
93 #include <mips/rmi/interrupt.h>
94 #include <mips/rmi/msgring.h>
95 #include <mips/rmi/iomap.h>
96 #include <mips/rmi/pic.h>
97 #include <mips/rmi/rmi_mips_exts.h>
98 #include <mips/rmi/rmi_boot_info.h>
99 #include <mips/rmi/board.h>
101 #include <mips/rmi/dev/xlr/debug.h>
102 #include <mips/rmi/dev/xlr/atx_cpld.h>
103 #include <mips/rmi/dev/xlr/xgmac_mdio.h>
104 #include <mips/rmi/dev/xlr/rge.h>
106 #include "miibus_if.h"
108 MODULE_DEPEND(rge, ether, 1, 1, 1);
109 MODULE_DEPEND(rge, miibus, 1, 1, 1);
113 #define RGE_TX_THRESHOLD 1024
114 #define RGE_TX_Q_SIZE 1024
120 #define dbg_msg(fmt, args...) \
123 printf("[%s@%d|%s]: cpu_%d: " fmt, \
124 __FILE__, __LINE__, __FUNCTION__, xlr_cpu_id(), ##args);\
131 #define dbg_msg(fmt, args...)
136 #define MAC_B2B_IPG 88
138 /* frame sizes need to be cacheline aligned */
139 #define MAX_FRAME_SIZE 1536
140 #define MAX_FRAME_SIZE_JUMBO 9216
142 #define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
144 #define BYTE_OFFSET 2
145 #define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE+BYTE_OFFSET+MAC_PREPAD+MAC_SKB_BACK_PTR_SIZE+SMP_CACHE_BYTES)
146 #define MAC_CRC_LEN 4
147 #define MAX_NUM_MSGRNG_STN_CC 128
149 #define MAX_NUM_DESC 1024
150 #define MAX_SPILL_SIZE (MAX_NUM_DESC + 128)
152 #define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
154 #define MAX_FRIN_SPILL (MAX_SPILL_SIZE << 2)
155 #define MAX_FROUT_SPILL (MAX_SPILL_SIZE << 2)
156 #define MAX_CLASS_0_SPILL (MAX_SPILL_SIZE << 2)
157 #define MAX_CLASS_1_SPILL (MAX_SPILL_SIZE << 2)
158 #define MAX_CLASS_2_SPILL (MAX_SPILL_SIZE << 2)
159 #define MAX_CLASS_3_SPILL (MAX_SPILL_SIZE << 2)
161 /*****************************************************************
162 * Phoenix Generic Mac driver
163 *****************************************************************/
165 extern uint32_t cpu_ltop_map[32];
168 static int port_counters[4][8] __aligned(XLR_CACHELINE_SIZE);
170 #define port_inc_counter(port, counter) atomic_add_int(&port_counters[port][(counter)], 1)
172 #define port_inc_counter(port, counter) /* Nothing */
175 int xlr_rge_tx_prepend[MAXCPU];
176 int xlr_rge_tx_done[MAXCPU];
177 int xlr_rge_get_p2d_failed[MAXCPU];
178 int xlr_rge_msg_snd_failed[MAXCPU];
179 int xlr_rge_tx_ok_done[MAXCPU];
180 int xlr_rge_rx_done[MAXCPU];
181 int xlr_rge_repl_done[MAXCPU];
183 /* #define mac_stats_add(x, val) ({(x) += (val);}) */
184 #define mac_stats_add(x, val) xlr_ldaddwu(val, &x)
186 #define XLR_MAX_CORE 8
187 #define RGE_LOCK_INIT(_sc, _name) \
188 mtx_init(&(_sc)->rge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF)
189 #define RGE_LOCK(_sc) mtx_lock(&(_sc)->rge_mtx)
190 #define RGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rge_mtx, MA_OWNED)
191 #define RGE_UNLOCK(_sc) mtx_unlock(&(_sc)->rge_mtx)
192 #define RGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rge_mtx)
194 #define XLR_MAX_MACS 8
195 #define XLR_MAX_TX_FRAGS 14
196 #define MAX_P2D_DESC_PER_PORT 512
198 uint64_t frag[XLR_MAX_TX_FRAGS + 2];
201 #define MAX_TX_RING_SIZE (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT * sizeof(struct p2d_tx_desc))
203 struct rge_softc *dev_mac[XLR_MAX_MACS];
204 static int dev_mac_xgs0;
205 static int dev_mac_gmac0;
207 static int gmac_common_init_done;
210 static int rge_probe(device_t);
211 static int rge_attach(device_t);
212 static int rge_detach(device_t);
213 static int rge_suspend(device_t);
214 static int rge_resume(device_t);
215 static void rge_release_resources(struct rge_softc *);
216 static void rge_rx(struct rge_softc *, vm_paddr_t paddr, int);
217 static void rge_intr(void *);
218 static void rge_start_locked(struct ifnet *, int);
219 static void rge_start(struct ifnet *);
220 static int rge_ioctl(struct ifnet *, u_long, caddr_t);
221 static void rge_init(void *);
222 static void rge_stop(struct rge_softc *);
223 static int rge_shutdown(device_t);
224 static void rge_reset(struct rge_softc *);
226 static struct mbuf *get_mbuf(void);
227 static void free_buf(vm_paddr_t paddr);
228 static void *get_buf(void);
230 static void xlr_mac_get_hwaddr(struct rge_softc *);
231 static void xlr_mac_setup_hwaddr(struct driver_data *);
232 static void rmi_xlr_mac_set_enable(struct driver_data *priv, int flag);
233 static void rmi_xlr_xgmac_init(struct driver_data *priv);
234 static void rmi_xlr_gmac_init(struct driver_data *priv);
235 static void mac_common_init(void);
236 static int rge_mii_write(device_t, int, int, int);
237 static int rge_mii_read(device_t, int, int);
238 static void rmi_xlr_mac_mii_statchg(device_t);
239 static int rmi_xlr_mac_mediachange(struct ifnet *);
240 static void rmi_xlr_mac_mediastatus(struct ifnet *, struct ifmediareq *);
241 static void xlr_mac_set_rx_mode(struct rge_softc *sc);
243 rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
244 int stid, struct msgrng_msg *msg,
246 static void mac_frin_replenish(void *);
247 static int rmi_xlr_mac_open(struct rge_softc *);
248 static int rmi_xlr_mac_close(struct rge_softc *);
250 mac_xmit(struct mbuf *, struct rge_softc *,
251 struct driver_data *, int, struct p2d_tx_desc *);
252 static int rmi_xlr_mac_xmit(struct mbuf *, struct rge_softc *, int, struct p2d_tx_desc *);
253 static struct rge_softc_stats *rmi_xlr_mac_get_stats(struct rge_softc *sc);
254 static void rmi_xlr_mac_set_multicast_list(struct rge_softc *sc);
255 static int rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu);
256 static int rmi_xlr_mac_fill_rxfr(struct rge_softc *sc);
257 static void rmi_xlr_config_spill_area(struct driver_data *priv);
258 static int rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed);
260 rmi_xlr_mac_set_duplex(struct driver_data *s,
261 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc);
262 static void serdes_regs_init(struct driver_data *priv);
263 static int rmi_xlr_gmac_reset(struct driver_data *priv);
266 static int get_p2d_desc_failed = 0;
267 static int msg_snd_failed = 0;
269 SYSCTL_INT(_hw, OID_AUTO, get_p2d_failed, CTLFLAG_RW,
270 &get_p2d_desc_failed, 0, "p2d desc failed");
271 SYSCTL_INT(_hw, OID_AUTO, msg_snd_failed, CTLFLAG_RW,
272 &msg_snd_failed, 0, "msg snd failed");
274 struct callout xlr_tx_stop_bkp;
276 static device_method_t rge_methods[] = {
277 /* Device interface */
278 DEVMETHOD(device_probe, rge_probe),
279 DEVMETHOD(device_attach, rge_attach),
280 DEVMETHOD(device_detach, rge_detach),
281 DEVMETHOD(device_shutdown, rge_shutdown),
282 DEVMETHOD(device_suspend, rge_suspend),
283 DEVMETHOD(device_resume, rge_resume),
286 DEVMETHOD(miibus_readreg, rge_mii_read),
287 DEVMETHOD(miibus_statchg, rmi_xlr_mac_mii_statchg),
288 DEVMETHOD(miibus_writereg, rge_mii_write),
292 static driver_t rge_driver = {
295 sizeof(struct rge_softc)
298 static devclass_t rge_devclass;
300 DRIVER_MODULE(rge, iodi, rge_driver, rge_devclass, 0, 0);
301 DRIVER_MODULE(miibus, rge, miibus_driver, miibus_devclass, 0, 0);
307 #define STR(x) __STR(x)
310 void *xlr_tx_ring_mem;
312 struct tx_desc_node {
313 struct p2d_tx_desc *ptr;
314 TAILQ_ENTRY(tx_desc_node) list;
317 #define XLR_MAX_TX_DESC_NODES (XLR_MAX_MACS * MAX_P2D_DESC_PER_PORT)
318 struct tx_desc_node tx_desc_nodes[XLR_MAX_TX_DESC_NODES];
319 static volatile int xlr_tot_avail_p2d[XLR_MAX_CORE];
320 static int xlr_total_active_core = 0;
323 * This should contain the list of all free tx frag desc nodes pointing to tx
327 TAILQ_HEAD(, tx_desc_node) tx_frag_desc[XLR_MAX_CORE] =
329 TAILQ_HEAD_INITIALIZER(tx_frag_desc[0]),
330 TAILQ_HEAD_INITIALIZER(tx_frag_desc[1]),
331 TAILQ_HEAD_INITIALIZER(tx_frag_desc[2]),
332 TAILQ_HEAD_INITIALIZER(tx_frag_desc[3]),
333 TAILQ_HEAD_INITIALIZER(tx_frag_desc[4]),
334 TAILQ_HEAD_INITIALIZER(tx_frag_desc[5]),
335 TAILQ_HEAD_INITIALIZER(tx_frag_desc[6]),
336 TAILQ_HEAD_INITIALIZER(tx_frag_desc[7]),
339 /* This contains a list of free tx frag node descriptors */
341 TAILQ_HEAD(, tx_desc_node) free_tx_frag_desc[XLR_MAX_CORE] =
343 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[0]),
344 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[1]),
345 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[2]),
346 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[3]),
347 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[4]),
348 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[5]),
349 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[6]),
350 TAILQ_HEAD_INITIALIZER(free_tx_frag_desc[7]),
353 static struct mtx tx_desc_lock[XLR_MAX_CORE];
356 mac_make_desc_rfr(struct msgrng_msg *msg,
359 msg->msg0 = (uint64_t) addr & 0xffffffffe0ULL;
360 msg->msg1 = msg->msg2 = msg->msg3 = 0;
363 #define MAC_TX_DESC_ALIGNMENT (XLR_CACHELINE_SIZE - 1)
366 init_p2d_allocation(void)
368 int active_core[8] = {0};
373 cpumask = xlr_hw_thread_mask;
375 for (i = 0; i < 32; i++) {
376 if (cpumask & (1 << i)) {
378 if (!active_core[cpu / 4]) {
379 active_core[cpu / 4] = 1;
380 xlr_total_active_core++;
384 for (i = 0; i < XLR_MAX_CORE; i++) {
386 xlr_tot_avail_p2d[i] = XLR_MAX_TX_DESC_NODES / xlr_total_active_core;
388 printf("Total Active Core %d\n", xlr_total_active_core);
397 struct tx_desc_node *start, *node;
398 struct p2d_tx_desc *tx_desc;
400 vm_offset_t unmapped_addr;
402 for (i = 0; i < XLR_MAX_CORE; i++)
403 mtx_init(&tx_desc_lock[i], "xlr tx_desc", NULL, MTX_SPIN);
405 start = &tx_desc_nodes[0];
406 /* TODO: try to get this from KSEG0 */
407 xlr_tx_ring_mem = contigmalloc((MAX_TX_RING_SIZE + XLR_CACHELINE_SIZE),
408 M_DEVBUF, M_NOWAIT | M_ZERO, 0,
409 0x10000000, XLR_CACHELINE_SIZE, 0);
411 if (xlr_tx_ring_mem == NULL) {
412 panic("TX ring memory allocation failed");
414 paddr = vtophys((vm_offset_t)xlr_tx_ring_mem);
416 unmapped_addr = MIPS_PHYS_TO_KSEG0(paddr);
419 tx_desc = (struct p2d_tx_desc *)unmapped_addr;
421 for (i = 0; i < XLR_MAX_TX_DESC_NODES; i++) {
425 TAILQ_INSERT_HEAD(&tx_frag_desc[j], node, list);
426 j = (i / (XLR_MAX_TX_DESC_NODES / xlr_total_active_core));
430 static inline struct p2d_tx_desc *
433 struct tx_desc_node *node;
434 struct p2d_tx_desc *tx_desc = NULL;
435 int cpu = xlr_core_id();
437 mtx_lock_spin(&tx_desc_lock[cpu]);
438 node = TAILQ_FIRST(&tx_frag_desc[cpu]);
440 xlr_tot_avail_p2d[cpu]--;
441 TAILQ_REMOVE(&tx_frag_desc[cpu], node, list);
443 TAILQ_INSERT_HEAD(&free_tx_frag_desc[cpu], node, list);
445 /* Increment p2d desc fail count */
446 get_p2d_desc_failed++;
448 mtx_unlock_spin(&tx_desc_lock[cpu]);
452 free_p2d_desc(struct p2d_tx_desc *tx_desc)
454 struct tx_desc_node *node;
455 int cpu = xlr_core_id();
457 mtx_lock_spin(&tx_desc_lock[cpu]);
458 node = TAILQ_FIRST(&free_tx_frag_desc[cpu]);
459 KASSERT((node != NULL), ("Free TX frag node list is empty\n"));
461 TAILQ_REMOVE(&free_tx_frag_desc[cpu], node, list);
463 TAILQ_INSERT_HEAD(&tx_frag_desc[cpu], node, list);
464 xlr_tot_avail_p2d[cpu]++;
465 mtx_unlock_spin(&tx_desc_lock[cpu]);
470 build_frag_list(struct mbuf *m_head, struct msgrng_msg *p2p_msg, struct p2d_tx_desc *tx_desc)
481 fr_stid = (xlr_core_id() << 3) + xlr_thr_id() + 4;
487 for (m = m_head; m != NULL; m = m->m_next) {
488 if ((nfrag + 1) >= XLR_MAX_TX_FRAGS) {
489 free_p2d_desc(tx_desc);
493 paddr = vtophys(mtod(m, vm_offset_t));
494 p1 = paddr + m->m_len;
495 p2 = vtophys(((vm_offset_t)m->m_data + m->m_len));
498 (PAGE_SIZE - (paddr & PAGE_MASK));
499 tx_desc->frag[nfrag] = (127ULL << 54) |
500 ((uint64_t) len1 << 40) | paddr;
502 taddr = (vm_offset_t)m->m_data + len1;
504 len2 = m->m_len - len1;
507 if (nfrag >= XLR_MAX_TX_FRAGS)
508 panic("TX frags exceeded");
510 tx_desc->frag[nfrag] = (127ULL << 54) |
511 ((uint64_t) len2 << 40) | p2;
516 if ((p2 + len2) != p1) {
517 printf("p1 = %p p2 = %p\n", (void *)p1, (void *)p2);
518 printf("len1 = %x len2 = %x\n", len1,
520 printf("m_data %p\n", m->m_data);
522 panic("Multiple Mbuf segment discontiguous\n");
525 tx_desc->frag[nfrag] = (127ULL << 54) |
526 ((uint64_t) m->m_len << 40) | paddr;
531 /* set eop in the last tx p2d desc */
532 tx_desc->frag[nfrag - 1] |= (1ULL << 63);
533 paddr = vtophys((vm_offset_t)tx_desc);
534 tx_desc->frag[nfrag] = (1ULL << 63) | (fr_stid << 54) | paddr;
536 tx_desc->frag[XLR_MAX_TX_FRAGS] = (uint64_t)(intptr_t)tx_desc;
537 tx_desc->frag[XLR_MAX_TX_FRAGS + 1] = (uint64_t)(intptr_t)m_head;
539 p2d_len = (nfrag * 8);
540 p2p_msg->msg0 = (1ULL << 63) | (1ULL << 62) | (127ULL << 54) |
541 (p2d_len << 40) | paddr;
546 release_tx_desc(struct msgrng_msg *msg, int rel_buf)
548 struct p2d_tx_desc *tx_desc, *chk_addr;
551 tx_desc = (struct p2d_tx_desc *)MIPS_PHYS_TO_KSEG0(msg->msg0);
552 chk_addr = (struct p2d_tx_desc *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS];
553 if (tx_desc != chk_addr) {
554 printf("Address %p does not match with stored addr %p - we leaked a descriptor\n",
559 m = (struct mbuf *)(intptr_t)tx_desc->frag[XLR_MAX_TX_FRAGS + 1];
562 free_p2d_desc(tx_desc);
569 struct mbuf *m_new = NULL;
571 if ((m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
574 m_new->m_len = MCLBYTES;
575 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
580 free_buf(vm_paddr_t paddr)
586 sr = xlr_enable_kx();
587 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
588 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
590 if (mag != 0xf00bad) {
591 printf("Something is wrong kseg:%lx found mag:%lx not 0xf00bad\n",
592 (u_long)paddr, (u_long)mag);
602 struct mbuf *m_new = NULL;
605 vm_paddr_t temp1, temp2;
612 m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f));
613 md = (uint64_t *)m_new->m_data;
614 md[0] = (uintptr_t)m_new; /* Back Ptr */
616 m_adj(m_new, XLR_CACHELINE_SIZE);
619 temp1 = vtophys((vm_offset_t)m_new->m_data);
620 temp2 = vtophys((vm_offset_t)m_new->m_data + 1536);
621 if ((temp1 + 1536) != temp2)
622 panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n");
624 return (void *)m_new->m_data;
627 /**********************************************************************
628 **********************************************************************/
630 rmi_xlr_mac_set_enable(struct driver_data *priv, int flag)
633 int tx_threshold = 1518;
636 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
637 regval |= (1 << O_TX_CONTROL__TxEnable) |
638 (tx_threshold << O_TX_CONTROL__TxThreshold);
640 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
642 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
643 regval |= 1 << O_RX_CONTROL__RxEnable;
644 if (priv->mode == XLR_PORT0_RGMII)
645 regval |= 1 << O_RX_CONTROL__RGMII;
646 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
648 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
649 regval |= (O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
650 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
652 regval = xlr_read_reg(priv->mmio, R_TX_CONTROL);
653 regval &= ~((1 << O_TX_CONTROL__TxEnable) |
654 (tx_threshold << O_TX_CONTROL__TxThreshold));
656 xlr_write_reg(priv->mmio, R_TX_CONTROL, regval);
658 regval = xlr_read_reg(priv->mmio, R_RX_CONTROL);
659 regval &= ~(1 << O_RX_CONTROL__RxEnable);
660 xlr_write_reg(priv->mmio, R_RX_CONTROL, regval);
662 regval = xlr_read_reg(priv->mmio, R_MAC_CONFIG_1);
663 regval &= ~(O_MAC_CONFIG_1__txen | O_MAC_CONFIG_1__rxen);
664 xlr_write_reg(priv->mmio, R_MAC_CONFIG_1, regval);
668 /**********************************************************************
669 **********************************************************************/
670 static __inline__ int
671 xlr_mac_send_fr(struct driver_data *priv,
672 vm_paddr_t addr, int len)
674 struct msgrng_msg msg;
675 int stid = priv->rfrbucket;
677 uint32_t msgrng_flags;
682 mac_make_desc_rfr(&msg, addr);
684 /* Send the packet to MAC */
685 dbg_msg("mac_%d: Sending free packet %lx to stid %d\n",
686 priv->instance, (u_long)addr, stid);
687 if (priv->type == XLR_XGMAC)
688 code = MSGRNG_CODE_XGMAC; /* WHY? */
690 code = MSGRNG_CODE_MAC;
693 msgrng_flags = msgrng_access_enable();
694 ret = message_send(1, code, stid, &msg);
695 msgrng_restore(msgrng_flags);
696 KASSERT(i++ < 100000, ("Too many credit fails\n"));
702 /**************************************************************/
705 xgmac_mdio_setup(volatile unsigned int *_mmio)
710 for (i = 0; i < 4; i++) {
711 rd_data = xmdio_read(_mmio, 1, 0x8000 + i);
712 rd_data = rd_data & 0xffffdfff; /* clear isolate bit */
713 xmdio_write(_mmio, 1, 0x8000 + i, rd_data);
717 /**********************************************************************
722 ********************************************************************* */
723 #define PHY_STATUS_RETRIES 25000
726 rmi_xlr_mac_mii_init(struct driver_data *priv)
728 xlr_reg_t *mii_mmio = priv->mii_mmio;
730 /* use the lowest clock divisor - divisor 28 */
731 xlr_write_reg(mii_mmio, R_MII_MGMT_CONFIG, 0x07);
734 /**********************************************************************
735 * Read a PHY register.
739 * phyaddr - PHY's address
740 * regidx = index of register to read
743 * value read, or 0 if an error occurred.
744 ********************************************************************* */
747 rge_mii_read_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx)
751 /* setup the phy reg to be used */
752 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
753 (phyaddr << 8) | (regidx << 0));
754 /* Issue the read command */
755 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND,
756 (1 << O_MII_MGMT_COMMAND__rstat));
758 /* poll for the read cycle to complete */
759 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
760 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
764 /* clear the read cycle */
765 xlr_write_reg(mii_mmio, R_MII_MGMT_COMMAND, 0);
767 if (i == PHY_STATUS_RETRIES) {
770 /* Read the data back */
771 return xlr_read_reg(mii_mmio, R_MII_MGMT_STATUS);
775 rge_mii_read(device_t dev, int phyaddr, int regidx)
777 struct rge_softc *sc = device_get_softc(dev);
779 return rge_mii_read_internal(sc->priv.mii_mmio, phyaddr, regidx);
782 /**********************************************************************
783 * Set MII hooks to newly selected media
786 * ifp - Interface Pointer
790 ********************************************************************* */
792 rmi_xlr_mac_mediachange(struct ifnet *ifp)
794 struct rge_softc *sc = ifp->if_softc;
796 if (ifp->if_flags & IFF_UP)
797 mii_mediachg(&sc->rge_mii);
802 /**********************************************************************
803 * Get the current interface media status
806 * ifp - Interface Pointer
807 * ifmr - Interface media request ptr
811 ********************************************************************* */
813 rmi_xlr_mac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
815 struct rge_softc *sc = ifp->if_softc;
817 /* Check whether this is interface is active or not. */
818 ifmr->ifm_status = IFM_AVALID;
820 ifmr->ifm_status |= IFM_ACTIVE;
822 ifmr->ifm_active = IFM_ETHER;
826 /**********************************************************************
827 * Write a value to a PHY register.
831 * phyaddr - PHY to use
832 * regidx - register within the PHY
833 * regval - data to write to register
837 ********************************************************************* */
839 rge_mii_write_internal(xlr_reg_t * mii_mmio, int phyaddr, int regidx, int regval)
843 xlr_write_reg(mii_mmio, R_MII_MGMT_ADDRESS,
844 (phyaddr << 8) | (regidx << 0));
846 /* Write the data which starts the write cycle */
847 xlr_write_reg(mii_mmio, R_MII_MGMT_WRITE_DATA, regval);
849 /* poll for the write cycle to complete */
850 for (i = 0; i < PHY_STATUS_RETRIES; i++) {
851 if (xlr_read_reg(mii_mmio, R_MII_MGMT_INDICATORS) == 0)
859 rge_mii_write(device_t dev, int phyaddr, int regidx, int regval)
861 struct rge_softc *sc = device_get_softc(dev);
863 rge_mii_write_internal(sc->priv.mii_mmio, phyaddr, regidx, regval);
868 rmi_xlr_mac_mii_statchg(struct device *dev)
873 serdes_regs_init(struct driver_data *priv)
875 xlr_reg_t *mmio_gpio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GPIO_OFFSET);
877 /* Initialize SERDES CONTROL Registers */
878 rge_mii_write_internal(priv->serdes_mmio, 26, 0, 0x6DB0);
879 rge_mii_write_internal(priv->serdes_mmio, 26, 1, 0xFFFF);
880 rge_mii_write_internal(priv->serdes_mmio, 26, 2, 0xB6D0);
881 rge_mii_write_internal(priv->serdes_mmio, 26, 3, 0x00FF);
882 rge_mii_write_internal(priv->serdes_mmio, 26, 4, 0x0000);
883 rge_mii_write_internal(priv->serdes_mmio, 26, 5, 0x0000);
884 rge_mii_write_internal(priv->serdes_mmio, 26, 6, 0x0005);
885 rge_mii_write_internal(priv->serdes_mmio, 26, 7, 0x0001);
886 rge_mii_write_internal(priv->serdes_mmio, 26, 8, 0x0000);
887 rge_mii_write_internal(priv->serdes_mmio, 26, 9, 0x0000);
888 rge_mii_write_internal(priv->serdes_mmio, 26, 10, 0x0000);
891 * GPIO setting which affect the serdes - needs figuring out
894 xlr_write_reg(mmio_gpio, 0x20, 0x7e6802);
895 xlr_write_reg(mmio_gpio, 0x10, 0x7104);
899 * This kludge is needed to setup serdes (?) clock correctly on some
902 if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI ||
903 xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) &&
904 xlr_boot1_info.board_minor_version == 4) {
905 /* use 125 Mhz instead of 156.25Mhz ref clock */
907 xlr_write_reg(mmio_gpio, 0x10, 0x7103);
908 xlr_write_reg(mmio_gpio, 0x21, 0x7103);
916 serdes_autoconfig(struct driver_data *priv)
920 /* Enable Auto negotiation in the PCS Layer */
921 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x1000);
923 rge_mii_write_internal(priv->pcs_mmio, 27, 0, 0x0200);
926 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x1000);
928 rge_mii_write_internal(priv->pcs_mmio, 28, 0, 0x0200);
931 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x1000);
933 rge_mii_write_internal(priv->pcs_mmio, 29, 0, 0x0200);
936 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x1000);
938 rge_mii_write_internal(priv->pcs_mmio, 30, 0, 0x0200);
943 /*****************************************************************
945 *****************************************************************/
947 rmi_xlr_config_pde(struct driver_data *priv)
949 int i = 0, cpu = 0, bucket = 0;
950 uint64_t bucket_map = 0;
952 /* uint32_t desc_pack_ctrl = 0; */
958 * rge may be called before SMP start in a BOOTP/NFSROOT
959 * setup. we will distribute packets to other cpus only when
960 * the SMP is started.
963 cpumask = xlr_hw_thread_mask;
966 for (i = 0; i < MAXCPU; i++) {
967 if (cpumask & (1 << i)) {
969 bucket = ((cpu >> 2) << 3);
970 bucket_map |= (3ULL << bucket);
973 printf("rmi_xlr_config_pde: bucket_map=%jx\n", (uintmax_t)bucket_map);
975 /* bucket_map = 0x1; */
976 xlr_write_reg(priv->mmio, R_PDE_CLASS_0, (bucket_map & 0xffffffff));
977 xlr_write_reg(priv->mmio, R_PDE_CLASS_0 + 1,
978 ((bucket_map >> 32) & 0xffffffff));
980 xlr_write_reg(priv->mmio, R_PDE_CLASS_1, (bucket_map & 0xffffffff));
981 xlr_write_reg(priv->mmio, R_PDE_CLASS_1 + 1,
982 ((bucket_map >> 32) & 0xffffffff));
984 xlr_write_reg(priv->mmio, R_PDE_CLASS_2, (bucket_map & 0xffffffff));
985 xlr_write_reg(priv->mmio, R_PDE_CLASS_2 + 1,
986 ((bucket_map >> 32) & 0xffffffff));
988 xlr_write_reg(priv->mmio, R_PDE_CLASS_3, (bucket_map & 0xffffffff));
989 xlr_write_reg(priv->mmio, R_PDE_CLASS_3 + 1,
990 ((bucket_map >> 32) & 0xffffffff));
994 rge_smp_update_pde(void *dummy __unused)
997 struct driver_data *priv;
998 struct rge_softc *sc;
1000 printf("Updating packet distribution for SMP\n");
1001 for (i = 0; i < XLR_MAX_MACS; i++) {
1006 rmi_xlr_mac_set_enable(priv, 0);
1007 rmi_xlr_config_pde(priv);
1008 rmi_xlr_mac_set_enable(priv, 1);
1012 SYSINIT(rge_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, rge_smp_update_pde, NULL);
1016 rmi_xlr_config_parser(struct driver_data *priv)
1019 * Mark it as no classification The parser extract is gauranteed to
1020 * be zero with no classfication
1022 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x00);
1024 xlr_write_reg(priv->mmio, R_L2TYPE_0, 0x01);
1026 /* configure the parser : L2 Type is configured in the bootloader */
1027 /* extract IP: src, dest protocol */
1028 xlr_write_reg(priv->mmio, R_L3CTABLE,
1029 (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
1031 xlr_write_reg(priv->mmio, R_L3CTABLE + 1,
1032 (12 << 25) | (4 << 21) | (16 << 14) | (4 << 10));
1037 rmi_xlr_config_classifier(struct driver_data *priv)
1041 if (priv->type == XLR_XGMAC) {
1042 /* xgmac translation table doesn't have sane values on reset */
1043 for (i = 0; i < 64; i++)
1044 xlr_write_reg(priv->mmio, R_TRANSLATETABLE + i, 0x0);
1047 * use upper 7 bits of the parser extract to index the
1050 xlr_write_reg(priv->mmio, R_PARSERCONFIGREG, 0x0);
1055 SGMII_SPEED_10 = 0x00000000,
1056 SGMII_SPEED_100 = 0x02000000,
1057 SGMII_SPEED_1000 = 0x04000000,
1061 rmi_xlr_gmac_config_speed(struct driver_data *priv)
1063 int phy_addr = priv->phy_addr;
1064 xlr_reg_t *mmio = priv->mmio;
1065 struct rge_softc *sc = priv->sc;
1067 priv->speed = rge_mii_read_internal(priv->mii_mmio, phy_addr, 28);
1068 priv->link = rge_mii_read_internal(priv->mii_mmio, phy_addr, 1) & 0x4;
1069 priv->speed = (priv->speed >> 3) & 0x03;
1071 if (priv->speed == xlr_mac_speed_10) {
1072 if (priv->mode != XLR_RGMII)
1073 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_10);
1074 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1075 xlr_write_reg(mmio, R_CORECONTROL, 0x02);
1076 printf("%s: [10Mbps]\n", device_get_nameunit(sc->rge_dev));
1077 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1078 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1079 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10_T | IFM_FDX;
1080 } else if (priv->speed == xlr_mac_speed_100) {
1081 if (priv->mode != XLR_RGMII)
1082 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1083 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1084 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1085 printf("%s: [100Mbps]\n", device_get_nameunit(sc->rge_dev));
1086 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1087 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1088 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1090 if (priv->speed != xlr_mac_speed_1000) {
1091 if (priv->mode != XLR_RGMII)
1092 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_100);
1093 printf("PHY reported unknown MAC speed, defaulting to 100Mbps\n");
1094 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7117);
1095 xlr_write_reg(mmio, R_CORECONTROL, 0x01);
1096 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1097 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1098 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_100_TX | IFM_FDX;
1100 if (priv->mode != XLR_RGMII)
1101 xlr_write_reg(mmio, R_INTERFACE_CONTROL, SGMII_SPEED_1000);
1102 xlr_write_reg(mmio, R_MAC_CONFIG_2, 0x7217);
1103 xlr_write_reg(mmio, R_CORECONTROL, 0x00);
1104 printf("%s: [1000Mbps]\n", device_get_nameunit(sc->rge_dev));
1105 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1106 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1107 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_1000_T | IFM_FDX;
1112 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER;
1119 /*****************************************************************
1121 *****************************************************************/
1123 rmi_xlr_xgmac_init(struct driver_data *priv)
1126 xlr_reg_t *mmio = priv->mmio;
1127 int id = priv->instance;
1128 struct rge_softc *sc = priv->sc;
1129 volatile unsigned short *cpld;
1131 cpld = (volatile unsigned short *)0xBD840000;
1133 xlr_write_reg(priv->mmio, R_DESC_PACK_CTRL,
1134 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize) | (4 << 20));
1135 xlr_write_reg(priv->mmio, R_BYTEOFFSET0, BYTE_OFFSET);
1136 rmi_xlr_config_pde(priv);
1137 rmi_xlr_config_parser(priv);
1138 rmi_xlr_config_classifier(priv);
1140 xlr_write_reg(priv->mmio, R_MSG_TX_THRESHOLD, 1);
1142 /* configure the XGMAC Registers */
1143 xlr_write_reg(mmio, R_XGMAC_CONFIG_1, 0x50000026);
1145 /* configure the XGMAC_GLUE Registers */
1146 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1147 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1148 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1149 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1150 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1151 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1153 xlr_write_reg(mmio, R_XGMACPADCALIBRATION, 0x030);
1154 xlr_write_reg(mmio, R_EGRESSFIFOCARVINGSLOTS, 0x0f);
1155 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1156 xlr_write_reg(mmio, R_XGMAC_MIIM_CONFIG, 0x3e);
1159 * take XGMII phy out of reset
1162 * we are pulling everything out of reset because writing a 0 would
1163 * reset other devices on the chip
1165 cpld[ATX_CPLD_RESET_1] = 0xffff;
1166 cpld[ATX_CPLD_MISC_CTRL] = 0xffff;
1167 cpld[ATX_CPLD_RESET_2] = 0xffff;
1169 xgmac_mdio_setup(mmio);
1171 rmi_xlr_config_spill_area(priv);
1174 for (i = 0; i < 16; i++) {
1175 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1177 bucket[MSGRNG_STNID_XGS0_TX + i]);
1180 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1181 bucket_sizes.bucket[MSGRNG_STNID_XMAC0JFR]);
1182 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1183 bucket_sizes.bucket[MSGRNG_STNID_XMAC0RFR]);
1185 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1186 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1188 counters[i >> 3][i & 0x07]);
1190 } else if (id == 1) {
1191 for (i = 0; i < 16; i++) {
1192 xlr_write_reg(mmio, R_XGS_TX0_BUCKET_SIZE + i,
1194 bucket[MSGRNG_STNID_XGS1_TX + i]);
1197 xlr_write_reg(mmio, R_XGS_JFR_BUCKET_SIZE,
1198 bucket_sizes.bucket[MSGRNG_STNID_XMAC1JFR]);
1199 xlr_write_reg(mmio, R_XGS_RFR_BUCKET_SIZE,
1200 bucket_sizes.bucket[MSGRNG_STNID_XMAC1RFR]);
1202 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1203 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1205 counters[i >> 3][i & 0x07]);
1208 sc->rge_mii.mii_media.ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1209 sc->rge_mii.mii_media.ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1210 sc->rge_mii.mii_media.ifm_cur->ifm_media = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1211 sc->rge_mii.mii_media_active = IFM_ETHER | IFM_AUTO | IFM_10G_SR | IFM_FDX;
1212 sc->rge_mii.mii_media.ifm_cur->ifm_media |= (IFM_AVALID | IFM_ACTIVE);
1214 priv->init_frin_desc = 1;
1217 /*******************************************************
1218 * Initialization gmac
1219 *******************************************************/
1221 rmi_xlr_gmac_reset(struct driver_data *priv)
1223 volatile uint32_t val;
1224 xlr_reg_t *mmio = priv->mmio;
1225 int i, maxloops = 100;
1227 /* Disable MAC RX */
1228 val = xlr_read_reg(mmio, R_MAC_CONFIG_1);
1230 xlr_write_reg(mmio, R_MAC_CONFIG_1, val);
1232 /* Disable Core RX */
1233 val = xlr_read_reg(mmio, R_RX_CONTROL);
1235 xlr_write_reg(mmio, R_RX_CONTROL, val);
1237 /* wait for rx to halt */
1238 for (i = 0; i < maxloops; i++) {
1239 val = xlr_read_reg(mmio, R_RX_CONTROL);
1247 /* Issue a soft reset */
1248 val = xlr_read_reg(mmio, R_RX_CONTROL);
1250 xlr_write_reg(mmio, R_RX_CONTROL, val);
1252 /* wait for reset to complete */
1253 for (i = 0; i < maxloops; i++) {
1254 val = xlr_read_reg(mmio, R_RX_CONTROL);
1262 /* Clear the soft reset bit */
1263 val = xlr_read_reg(mmio, R_RX_CONTROL);
1265 xlr_write_reg(mmio, R_RX_CONTROL, val);
1270 rmi_xlr_gmac_init(struct driver_data *priv)
1273 xlr_reg_t *mmio = priv->mmio;
1274 int id = priv->instance;
1275 struct stn_cc *gmac_cc_config;
1277 int blk = id / 4, port = id % 4;
1279 rmi_xlr_mac_set_enable(priv, 0);
1281 rmi_xlr_config_spill_area(priv);
1283 xlr_write_reg(mmio, R_DESC_PACK_CTRL,
1284 (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) |
1285 (1 << O_DESC_PACK_CTRL__MaxEntry) |
1286 (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize));
1288 rmi_xlr_config_pde(priv);
1289 rmi_xlr_config_parser(priv);
1290 rmi_xlr_config_classifier(priv);
1292 xlr_write_reg(mmio, R_MSG_TX_THRESHOLD, 3);
1293 xlr_write_reg(mmio, R_MAC_CONFIG_1, 0x35);
1294 xlr_write_reg(mmio, R_RX_CONTROL, (0x7 << 6));
1296 if (priv->mode == XLR_PORT0_RGMII) {
1297 printf("Port 0 set in RGMII mode\n");
1298 value = xlr_read_reg(mmio, R_RX_CONTROL);
1299 value |= 1 << O_RX_CONTROL__RGMII;
1300 xlr_write_reg(mmio, R_RX_CONTROL, value);
1302 rmi_xlr_mac_mii_init(priv);
1306 priv->advertising = ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half |
1307 ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half |
1308 ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1313 * Enable all MDIO interrupts in the phy RX_ER bit seems to be get
1314 * set about every 1 sec in GigE mode, ignore it for now...
1316 rge_mii_write_internal(priv->mii_mmio, priv->phy_addr, 25, 0xfffffffe);
1318 if (priv->mode != XLR_RGMII) {
1319 serdes_regs_init(priv);
1320 serdes_autoconfig(priv);
1322 rmi_xlr_gmac_config_speed(priv);
1324 value = xlr_read_reg(mmio, R_IPG_IFG);
1325 xlr_write_reg(mmio, R_IPG_IFG, ((value & ~0x7f) | MAC_B2B_IPG));
1326 xlr_write_reg(mmio, R_DMACR0, 0xffffffff);
1327 xlr_write_reg(mmio, R_DMACR1, 0xffffffff);
1328 xlr_write_reg(mmio, R_DMACR2, 0xffffffff);
1329 xlr_write_reg(mmio, R_DMACR3, 0xffffffff);
1330 xlr_write_reg(mmio, R_STATCTRL, 0x04);
1331 xlr_write_reg(mmio, R_L2ALLOCCTRL, 0xffffffff);
1332 xlr_write_reg(mmio, R_INTMASK, 0);
1333 xlr_write_reg(mmio, R_FREEQCARVE, 0);
1335 xlr_write_reg(mmio, R_GMAC_TX0_BUCKET_SIZE + port,
1336 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1337 xlr_write_reg(mmio, R_GMAC_JFR0_BUCKET_SIZE,
1338 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]);
1339 xlr_write_reg(mmio, R_GMAC_RFR0_BUCKET_SIZE,
1340 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]);
1341 xlr_write_reg(mmio, R_GMAC_JFR1_BUCKET_SIZE,
1342 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]);
1343 xlr_write_reg(mmio, R_GMAC_RFR1_BUCKET_SIZE,
1344 xlr_board_info.bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]);
1346 dbg_msg("Programming credit counter %d : %d -> %d\n", blk, R_GMAC_TX0_BUCKET_SIZE + port,
1347 xlr_board_info.bucket_sizes->bucket[priv->txbucket]);
1349 gmac_cc_config = xlr_board_info.gmac_block[blk].credit_config;
1350 for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) {
1351 xlr_write_reg(mmio, R_CC_CPU0_0 + i,
1352 gmac_cc_config->counters[i >> 3][i & 0x07]);
1353 dbg_msg("%d: %d -> %d\n", priv->instance,
1354 R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]);
1356 priv->init_frin_desc = 1;
1359 /**********************************************************************
1360 * Set promiscuous mode
1361 **********************************************************************/
1363 xlr_mac_set_rx_mode(struct rge_softc *sc)
1365 struct driver_data *priv = &(sc->priv);
1368 regval = xlr_read_reg(priv->mmio, R_MAC_FILTER_CONFIG);
1370 if (sc->flags & IFF_PROMISC) {
1371 regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
1372 (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1373 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
1374 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
1376 regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
1377 (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
1380 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG, regval);
1383 /**********************************************************************
1384 * Configure LAN speed for the specified MAC.
1385 ********************************************************************* */
1387 rmi_xlr_mac_set_speed(struct driver_data *s, xlr_mac_speed_t speed)
1392 /**********************************************************************
1393 * Set Ethernet duplex and flow control options for this MAC
1394 ********************************************************************* */
1396 rmi_xlr_mac_set_duplex(struct driver_data *s,
1397 xlr_mac_duplex_t duplex, xlr_mac_fc_t fc)
1402 /*****************************************************************
1403 * Kernel Net Stack <-> MAC Driver Interface
1404 *****************************************************************/
1405 /**********************************************************************
1406 **********************************************************************/
1407 #define MAC_TX_FAIL 2
1408 #define MAC_TX_PASS 0
1409 #define MAC_TX_RETRY 1
1411 int xlr_dev_queue_xmit_hack = 0;
1414 mac_xmit(struct mbuf *m, struct rge_softc *sc,
1415 struct driver_data *priv, int len, struct p2d_tx_desc *tx_desc)
1417 struct msgrng_msg msg = {0,0,0,0};
1418 int stid = priv->txbucket;
1419 uint32_t tx_cycles = 0;
1421 int vcpu = xlr_cpu_id();
1424 tx_cycles = mips_rd_count();
1426 if (build_frag_list(m, &msg, tx_desc) != 0)
1430 mflags = msgrng_access_enable();
1431 if ((rv = message_send(1, MSGRNG_CODE_MAC, stid, &msg)) != 0) {
1433 msgrng_restore(mflags);
1434 release_tx_desc(&msg, 0);
1435 xlr_rge_msg_snd_failed[vcpu]++;
1436 dbg_msg("Failed packet to cpu %d, rv = %d, stid %d, msg0=%jx\n",
1437 vcpu, rv, stid, (uintmax_t)msg.msg0);
1440 msgrng_restore(mflags);
1441 port_inc_counter(priv->instance, PORT_TX);
1444 /* Send the packet to MAC */
1445 dbg_msg("Sent tx packet to stid %d, msg0=%jx, msg1=%jx \n", stid,
1446 (uintmax_t)msg.msg0, (uintmax_t)msg.msg1);
1450 unsigned char *buf = (char *)m->m_data;
1452 printf("Tx Packet: length=%d\n", len);
1453 for (i = 0; i < 64; i++) {
1454 if (i && (i % 16) == 0)
1456 printf("%02x ", buf[i]);
1461 xlr_inc_counter(NETIF_TX);
1466 rmi_xlr_mac_xmit(struct mbuf *m, struct rge_softc *sc, int len, struct p2d_tx_desc *tx_desc)
1468 struct driver_data *priv = &(sc->priv);
1473 xlr_inc_counter(NETIF_STACK_TX);
1476 ret = mac_xmit(m, sc, priv, len, tx_desc);
1478 if (ret == MAC_TX_RETRY)
1481 dbg_msg("OUT, ret = %d\n", ret);
1482 if (ret == MAC_TX_FAIL) {
1484 dbg_msg("Msg Ring Full. Stopping upper layer Q\n");
1485 port_inc_counter(priv->instance, PORT_STOPQ);
1491 mac_frin_replenish(void *args /* ignored */ )
1493 int cpu = xlr_core_id();
1497 xlr_inc_counter(REPLENISH_ENTER);
1499 * xlr_set_counter(REPLENISH_ENTER_COUNT,
1500 * atomic_read(frin_to_be_sent));
1502 xlr_set_counter(REPLENISH_CPU, PCPU_GET(cpuid));
1508 for (i = 0; i < XLR_MAX_MACS; i++) {
1509 /* int offset = 0; */
1512 struct rge_softc *sc;
1513 struct driver_data *priv;
1514 int frin_to_be_sent;
1521 frin_to_be_sent = priv->frin_to_be_sent[cpu];
1523 /* if (atomic_read(frin_to_be_sent) < 0) */
1524 if (frin_to_be_sent < 0) {
1525 panic("BUG?: [%s]: gmac_%d illegal value for frin_to_be_sent=%d\n",
1529 /* if (!atomic_read(frin_to_be_sent)) */
1530 if (!frin_to_be_sent)
1533 cycles = mips_rd_count();
1537 device_printf(sc->rge_dev, "No buffer\n");
1541 xlr_inc_counter(REPLENISH_FRIN);
1542 if (xlr_mac_send_fr(priv, vtophys(m), MAX_FRAME_SIZE)) {
1543 free_buf(vtophys(m));
1544 printf("[%s]: rx free message_send failed!\n", __FUNCTION__);
1547 xlr_set_counter(REPLENISH_CYCLES,
1548 (read_c0_count() - cycles));
1549 atomic_subtract_int((&priv->frin_to_be_sent[cpu]), 1);
1555 if (done == XLR_MAX_MACS)
1560 static volatile uint32_t g_tx_frm_tx_ok=0;
1563 rge_tx_bkp_func(void *arg, int npending)
1567 for (i = 0; i < xlr_board_info.gmacports; i++) {
1568 if (!dev_mac[i] || !dev_mac[i]->active)
1570 rge_start_locked(dev_mac[i]->rge_ifp, RGE_TX_THRESHOLD);
1572 atomic_subtract_int(&g_tx_frm_tx_ok, 1);
1575 /* This function is called from an interrupt handler */
1577 rmi_xlr_mac_msgring_handler(int bucket, int size, int code,
1578 int stid, struct msgrng_msg *msg,
1579 void *data /* ignored */ )
1581 uint64_t phys_addr = 0;
1582 unsigned long addr = 0;
1583 uint32_t length = 0;
1584 int ctrl = 0, port = 0;
1585 struct rge_softc *sc = NULL;
1586 struct driver_data *priv = 0;
1588 int vcpu = xlr_cpu_id();
1589 int cpu = xlr_core_id();
1591 dbg_msg("mac: bucket=%d, size=%d, code=%d, stid=%d, msg0=%jx msg1=%jx\n",
1592 bucket, size, code, stid, (uintmax_t)msg->msg0, (uintmax_t)msg->msg1);
1594 phys_addr = (uint64_t) (msg->msg0 & 0xffffffffe0ULL);
1595 length = (msg->msg0 >> 40) & 0x3fff;
1597 ctrl = CTRL_REG_FREE;
1598 port = (msg->msg0 >> 54) & 0x0f;
1602 length = length - BYTE_OFFSET - MAC_CRC_LEN;
1603 port = msg->msg0 & 0x0f;
1607 if (xlr_board_info.is_xls) {
1608 if (stid == MSGRNG_STNID_GMAC1)
1610 sc = dev_mac[dev_mac_gmac0 + port];
1612 if (stid == MSGRNG_STNID_XGS0FR)
1613 sc = dev_mac[dev_mac_xgs0];
1614 else if (stid == MSGRNG_STNID_XGS1FR)
1615 sc = dev_mac[dev_mac_xgs0 + 1];
1617 sc = dev_mac[dev_mac_gmac0 + port];
1623 dbg_msg("msg0 = %jx, stid = %d, port = %d, addr=%lx, length=%d, ctrl=%d\n",
1624 (uintmax_t)msg->msg0, stid, port, addr, length, ctrl);
1626 if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) {
1627 xlr_rge_tx_ok_done[vcpu]++;
1628 release_tx_desc(msg, 1);
1630 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1631 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1633 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1634 rge_tx_bkp_func(NULL, 0);
1635 xlr_set_counter(NETIF_TX_COMPLETE_CYCLES,
1636 (read_c0_count() - msgrng_msg_cycles));
1637 } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) {
1639 /* struct mbuf *m = 0; */
1640 /* int logical_cpu = 0; */
1642 dbg_msg("Received packet, port = %d\n", port);
1644 * if num frins to be sent exceeds threshold, wake up the
1647 atomic_add_int(&(priv->frin_to_be_sent[cpu]), 1);
1648 if ((priv->frin_to_be_sent[cpu]) > MAC_FRIN_TO_BE_SENT_THRESHOLD) {
1649 mac_frin_replenish(NULL);
1651 dbg_msg("gmac_%d: rx packet: phys_addr = %jx, length = %x\n",
1652 priv->instance, (uintmax_t)phys_addr, length);
1653 mac_stats_add(priv->stats.rx_packets, 1);
1654 mac_stats_add(priv->stats.rx_bytes, length);
1655 xlr_inc_counter(NETIF_RX);
1656 xlr_set_counter(NETIF_RX_CYCLES,
1657 (read_c0_count() - msgrng_msg_cycles));
1658 rge_rx(sc, phys_addr, length);
1659 xlr_rge_rx_done[vcpu]++;
1661 printf("[%s]: unrecognized ctrl=%d!\n", __FUNCTION__, ctrl);
1666 /**********************************************************************
1667 **********************************************************************/
1672 device_set_desc(dev, "RMI Gigabit Ethernet");
1674 /* Always return 0 */
1678 volatile unsigned long xlr_debug_enabled;
1679 struct callout rge_dbg_count;
1681 xlr_debug_count(void *addr)
1683 struct driver_data *priv = &dev_mac[0]->priv;
1685 /* uint32_t crdt; */
1686 if (xlr_debug_enabled) {
1687 printf("\nAvailRxIn %#x\n", xlr_read_reg(priv->mmio, 0x23e));
1689 callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1694 xlr_tx_q_wakeup(void *addr)
1699 for (i = 0; i < xlr_board_info.gmacports; i++) {
1700 if (!dev_mac[i] || !dev_mac[i]->active)
1702 if ((dev_mac[i]->rge_ifp->if_drv_flags) & IFF_DRV_OACTIVE) {
1703 for (j = 0; j < XLR_MAX_CORE; j++) {
1704 if (xlr_tot_avail_p2d[j]) {
1705 dev_mac[i]->rge_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1711 if (atomic_cmpset_int(&g_tx_frm_tx_ok, 0, 1))
1712 rge_tx_bkp_func(NULL, 0);
1713 callout_reset(&xlr_tx_stop_bkp, 5 * hz, xlr_tx_q_wakeup, NULL);
1717 rge_attach(device_t dev)
1720 struct rge_softc *sc;
1721 struct driver_data *priv = 0;
1723 struct xlr_gmac_block_t *gmac_conf = device_get_ivars(dev);
1725 sc = device_get_softc(dev);
1728 /* Initialize mac's */
1729 sc->unit = device_get_unit(dev);
1731 if (sc->unit > XLR_MAX_MACS) {
1735 RGE_LOCK_INIT(sc, device_get_nameunit(dev));
1740 sc->flags = 0; /* TODO : fix me up later */
1742 priv->id = sc->unit;
1743 if (gmac_conf->type == XLR_GMAC) {
1744 priv->instance = priv->id;
1745 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr +
1746 0x1000 * (sc->unit % 4));
1747 if ((ret = rmi_xlr_gmac_reset(priv)) == -1)
1749 } else if (gmac_conf->type == XLR_XGMAC) {
1750 priv->instance = priv->id - xlr_board_info.gmacports;
1751 priv->mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1753 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_VI ||
1754 (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI &&
1755 priv->instance >=4)) {
1756 dbg_msg("Arizona board - offset 4 \n");
1757 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_4_OFFSET);
1759 priv->mii_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1761 priv->pcs_mmio = (xlr_reg_t *) (xlr_io_base + gmac_conf->baseaddr);
1762 priv->serdes_mmio = (xlr_reg_t *) (xlr_io_base + XLR_IO_GMAC_0_OFFSET);
1764 sc->base_addr = (unsigned long)priv->mmio;
1765 sc->mem_end = (unsigned long)priv->mmio + XLR_IO_SIZE - 1;
1767 sc->xmit = rge_start;
1768 sc->stop = rge_stop;
1769 sc->get_stats = rmi_xlr_mac_get_stats;
1770 sc->ioctl = rge_ioctl;
1772 /* Initialize the device specific driver data */
1773 mtx_init(&priv->lock, "rge", NULL, MTX_SPIN);
1775 priv->type = gmac_conf->type;
1777 priv->mode = gmac_conf->mode;
1778 if (xlr_board_info.is_xls == 0) {
1779 /* TODO - check II and IIB boards */
1780 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_II &&
1781 xlr_boot1_info.board_minor_version != 1)
1782 priv->phy_addr = priv->instance - 2;
1784 priv->phy_addr = priv->instance;
1785 priv->mode = XLR_RGMII;
1787 if (gmac_conf->mode == XLR_PORT0_RGMII &&
1788 priv->instance == 0) {
1789 priv->mode = XLR_PORT0_RGMII;
1792 priv->mode = XLR_SGMII;
1793 /* Board 11 has SGMII daughter cards with the XLS chips, in this case
1794 the phy number is 0-3 for both GMAC blocks */
1795 if (xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI)
1796 priv->phy_addr = priv->instance % 4 + 16;
1798 priv->phy_addr = priv->instance + 16;
1802 priv->txbucket = gmac_conf->station_txbase + priv->instance % 4;
1803 priv->rfrbucket = gmac_conf->station_rfr;
1804 priv->spill_configured = 0;
1806 dbg_msg("priv->mmio=%p\n", priv->mmio);
1808 /* Set up ifnet structure */
1809 ifp = sc->rge_ifp = if_alloc(IFT_ETHER);
1811 device_printf(sc->rge_dev, "failed to if_alloc()\n");
1812 rge_release_resources(sc);
1814 RGE_LOCK_DESTROY(sc);
1818 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1819 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1820 ifp->if_ioctl = rge_ioctl;
1821 ifp->if_start = rge_start;
1822 ifp->if_init = rge_init;
1823 ifp->if_mtu = ETHERMTU;
1824 ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE;
1825 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1826 IFQ_SET_READY(&ifp->if_snd);
1828 ifp->if_hwassist = 0;
1829 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;
1830 ifp->if_capenable = ifp->if_capabilities;
1832 /* Initialize the rge_softc */
1833 sc->irq = gmac_conf->baseirq + priv->instance % 4;
1835 /* Set the IRQ into the rid field */
1837 * note this is a hack to pass the irq to the iodi interrupt setup
1840 sc->rge_irq.__r_i = (struct resource_i *)(intptr_t)sc->irq;
1842 ret = bus_setup_intr(dev, &sc->rge_irq, INTR_TYPE_NET | INTR_MPSAFE,
1843 NULL, rge_intr, sc, &sc->rge_intrhand);
1847 device_printf(sc->rge_dev, "couldn't set up irq\n");
1848 RGE_LOCK_DESTROY(sc);
1851 xlr_mac_get_hwaddr(sc);
1852 xlr_mac_setup_hwaddr(priv);
1854 dbg_msg("MMIO %08lx, MII %08lx, PCS %08lx, base %08lx PHY %d IRQ %d\n",
1855 (u_long)priv->mmio, (u_long)priv->mii_mmio, (u_long)priv->pcs_mmio,
1856 (u_long)sc->base_addr, priv->phy_addr, sc->irq);
1857 dbg_msg("HWADDR %02x:%02x tx %d rfr %d\n", (u_int)sc->dev_addr[4],
1858 (u_int)sc->dev_addr[5], priv->txbucket, priv->rfrbucket);
1861 * Set up ifmedia support.
1864 * Initialize MII/media info.
1866 sc->rge_mii.mii_ifp = ifp;
1867 sc->rge_mii.mii_readreg = rge_mii_read;
1868 sc->rge_mii.mii_writereg = (mii_writereg_t) rge_mii_write;
1869 sc->rge_mii.mii_statchg = rmi_xlr_mac_mii_statchg;
1870 ifmedia_init(&sc->rge_mii.mii_media, 0, rmi_xlr_mac_mediachange,
1871 rmi_xlr_mac_mediastatus);
1872 ifmedia_add(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1873 ifmedia_set(&sc->rge_mii.mii_media, IFM_ETHER | IFM_AUTO);
1874 sc->rge_mii.mii_media.ifm_media = sc->rge_mii.mii_media.ifm_cur->ifm_media;
1877 * Call MI attach routine.
1879 ether_ifattach(ifp, sc->dev_addr);
1881 if (priv->type == XLR_GMAC) {
1882 rmi_xlr_gmac_init(priv);
1883 } else if (priv->type == XLR_XGMAC) {
1884 rmi_xlr_xgmac_init(priv);
1886 dbg_msg("rge_%d: Phoenix Mac at 0x%p (mtu=%d)\n",
1887 sc->unit, priv->mmio, sc->mtu);
1888 dev_mac[sc->unit] = sc;
1889 if (priv->type == XLR_XGMAC && priv->instance == 0)
1890 dev_mac_xgs0 = sc->unit;
1891 if (priv->type == XLR_GMAC && priv->instance == 0)
1892 dev_mac_gmac0 = sc->unit;
1894 if (!gmac_common_init_done) {
1896 gmac_common_init_done = 1;
1897 callout_init(&xlr_tx_stop_bkp, CALLOUT_MPSAFE);
1898 callout_reset(&xlr_tx_stop_bkp, hz, xlr_tx_q_wakeup, NULL);
1899 callout_init(&rge_dbg_count, CALLOUT_MPSAFE);
1900 //callout_reset(&rge_dbg_count, hz, xlr_debug_count, NULL);
1902 if ((ret = rmi_xlr_mac_open(sc)) == -1) {
1903 RGE_LOCK_DESTROY(sc);
1908 device_printf(dev, "error - skipping\n");
1914 rge_reset(struct rge_softc *sc)
1922 #ifdef FREEBSD_MAC_NOT_YET
1923 struct rge_softc *sc;
1926 sc = device_get_softc(dev);
1934 ether_ifdetach(ifp);
1937 ifmedia_removeall(&sc->rge_ifmedia);
1939 bus_generic_detach(dev);
1940 device_delete_child(dev, sc->rge_miibus);
1943 rge_release_resources(sc);
1945 #endif /* FREEBSD_MAC_NOT_YET */
1949 rge_suspend(device_t dev)
1951 struct rge_softc *sc;
1953 sc = device_get_softc(dev);
1962 rge_resume(device_t dev)
1964 panic("rge_resume(): unimplemented\n");
1969 rge_release_resources(struct rge_softc *sc)
1972 if (sc->rge_ifp != NULL)
1973 if_free(sc->rge_ifp);
1975 if (mtx_initialized(&sc->rge_mtx)) /* XXX */
1976 RGE_LOCK_DESTROY(sc);
1978 uint32_t gmac_rx_fail[32];
1979 uint32_t gmac_rx_pass[32];
1982 rge_rx(struct rge_softc *sc, vm_paddr_t paddr, int len)
1985 struct ifnet *ifp = sc->rge_ifp;
1989 * On 32 bit machines we use XKPHYS to get the values stores with
1990 * the mbuf, need to explicitly enable KX. Disable interrupts while
1991 * KX is enabled to prevent this setting leaking to other code.
1993 sr = xlr_enable_kx();
1994 m = (struct mbuf *)(intptr_t)xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE);
1995 mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t));
1997 if (mag != 0xf00bad) {
1998 /* somebody else packet Error - FIXME in intialization */
1999 printf("cpu %d: *ERROR* Not my packet paddr %p\n",
2000 xlr_cpu_id(), (void *)paddr);
2003 /* align the data */
2004 m->m_data += BYTE_OFFSET;
2005 m->m_pkthdr.len = m->m_len = len;
2006 m->m_pkthdr.rcvif = ifp;
2011 unsigned char *buf = (char *)m->m_data;
2013 printf("Rx Packet: length=%d\n", len);
2014 for (i = 0; i < 64; i++) {
2015 if (i && (i % 16) == 0)
2017 printf("%02x ", buf[i]);
2023 (*ifp->if_input) (ifp, m);
2029 struct rge_softc *sc = (struct rge_softc *)arg;
2030 struct driver_data *priv = &(sc->priv);
2031 xlr_reg_t *mmio = priv->mmio;
2032 uint32_t intreg = xlr_read_reg(mmio, R_INTREG);
2034 if (intreg & (1 << O_INTREG__MDInt)) {
2035 uint32_t phy_int_status = 0;
2038 for (i = 0; i < XLR_MAX_MACS; i++) {
2039 struct rge_softc *phy_dev = 0;
2040 struct driver_data *phy_priv = 0;
2042 phy_dev = dev_mac[i];
2043 if (phy_dev == NULL)
2046 phy_priv = &phy_dev->priv;
2048 if (phy_priv->type == XLR_XGMAC)
2051 phy_int_status = rge_mii_read_internal(phy_priv->mii_mmio,
2052 phy_priv->phy_addr, 26);
2053 printf("rge%d: Phy addr %d, MII MMIO %lx status %x\n", phy_priv->instance,
2054 (int)phy_priv->phy_addr, (u_long)phy_priv->mii_mmio, phy_int_status);
2055 rmi_xlr_gmac_config_speed(phy_priv);
2058 printf("[%s]: mac type = %d, instance %d error "
2059 "interrupt: INTREG = 0x%08x\n",
2060 __FUNCTION__, priv->type, priv->instance, intreg);
2063 /* clear all interrupts and hope to make progress */
2064 xlr_write_reg(mmio, R_INTREG, 0xffffffff);
2066 /* (not yet) on A0 and B0, xgmac interrupts are routed only to xgs_1 irq */
2067 if ((xlr_revision() < 2) && (priv->type == XLR_XGMAC)) {
2068 struct rge_softc *xgs0_dev = dev_mac[dev_mac_xgs0];
2069 struct driver_data *xgs0_priv = &xgs0_dev->priv;
2070 xlr_reg_t *xgs0_mmio = xgs0_priv->mmio;
2071 uint32_t xgs0_intreg = xlr_read_reg(xgs0_mmio, R_INTREG);
2074 printf("[%s]: mac type = %d, instance %d error "
2075 "interrupt: INTREG = 0x%08x\n",
2076 __FUNCTION__, xgs0_priv->type, xgs0_priv->instance, xgs0_intreg);
2078 xlr_write_reg(xgs0_mmio, R_INTREG, 0xffffffff);
2084 rge_start_locked(struct ifnet *ifp, int threshold)
2086 struct rge_softc *sc = ifp->if_softc;
2087 struct mbuf *m = NULL;
2088 int prepend_pkt = 0;
2090 struct p2d_tx_desc *tx_desc = NULL;
2091 int cpu = xlr_core_id();
2092 uint32_t vcpu = xlr_cpu_id();
2094 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
2097 for (i = 0; i < xlr_tot_avail_p2d[cpu]; i++) {
2098 if (IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2100 tx_desc = get_p2d_desc();
2102 xlr_rge_get_p2d_failed[vcpu]++;
2105 /* Grab a packet off the queue. */
2106 IFQ_DEQUEUE(&ifp->if_snd, m);
2108 free_p2d_desc(tx_desc);
2111 prepend_pkt = rmi_xlr_mac_xmit(m, sc, 0, tx_desc);
2114 xlr_rge_tx_prepend[vcpu]++;
2115 IF_PREPEND(&ifp->if_snd, m);
2116 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2120 xlr_rge_tx_done[vcpu]++;
2126 rge_start(struct ifnet *ifp)
2128 rge_start_locked(ifp, RGE_TX_Q_SIZE);
2132 rge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2134 struct rge_softc *sc = ifp->if_softc;
2135 struct ifreq *ifr = (struct ifreq *)data;
2136 int mask, error = 0;
2138 /* struct mii_data *mii; */
2141 ifp->if_mtu = ifr->ifr_mtu;
2142 error = rmi_xlr_mac_change_mtu(sc, ifr->ifr_mtu);
2147 if (ifp->if_flags & IFF_UP) {
2149 * If only the state of the PROMISC flag changed,
2150 * then just use the 'set promisc mode' command
2151 * instead of reinitializing the entire NIC. Doing a
2152 * full re-init means reloading the firmware and
2153 * waiting for it to start up, which may take a
2154 * second or two. Similarly for ALLMULTI.
2156 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2157 ifp->if_flags & IFF_PROMISC &&
2158 !(sc->flags & IFF_PROMISC)) {
2159 sc->flags |= IFF_PROMISC;
2160 xlr_mac_set_rx_mode(sc);
2161 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2162 !(ifp->if_flags & IFF_PROMISC) &&
2163 sc->flags & IFF_PROMISC) {
2164 sc->flags &= IFF_PROMISC;
2165 xlr_mac_set_rx_mode(sc);
2166 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
2167 (ifp->if_flags ^ sc->flags) & IFF_ALLMULTI) {
2168 rmi_xlr_mac_set_multicast_list(sc);
2170 xlr_mac_set_rx_mode(sc);
2172 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2173 xlr_mac_set_rx_mode(sc);
2176 sc->flags = ifp->if_flags;
2182 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2184 rmi_xlr_mac_set_multicast_list(sc);
2191 error = ifmedia_ioctl(ifp, ifr,
2192 &sc->rge_mii.mii_media, command);
2195 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2196 ifp->if_hwassist = 0;
2199 error = ether_ioctl(ifp, command, data);
2207 rge_init(void *addr)
2209 struct rge_softc *sc = (struct rge_softc *)addr;
2211 struct driver_data *priv = &(sc->priv);
2215 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2217 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2218 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2220 rmi_xlr_mac_set_enable(priv, 1);
2224 rge_stop(struct rge_softc *sc)
2226 rmi_xlr_mac_close(sc);
2230 rge_shutdown(device_t dev)
2232 struct rge_softc *sc;
2234 sc = device_get_softc(dev);
2245 rmi_xlr_mac_open(struct rge_softc *sc)
2247 struct driver_data *priv = &(sc->priv);
2252 if (rmi_xlr_mac_fill_rxfr(sc)) {
2255 mtx_lock_spin(&priv->lock);
2257 xlr_mac_set_rx_mode(sc);
2259 if (sc->unit == xlr_board_info.gmacports - 1) {
2260 printf("Enabling MDIO interrupts\n");
2261 struct rge_softc *tmp = NULL;
2263 for (i = 0; i < xlr_board_info.gmacports; i++) {
2266 xlr_write_reg(tmp->priv.mmio, R_INTMASK,
2267 ((tmp->priv.instance == 0) << O_INTMASK__MDInt));
2271 * Configure the speed, duplex, and flow control
2273 rmi_xlr_mac_set_speed(priv, priv->speed);
2274 rmi_xlr_mac_set_duplex(priv, priv->duplex, priv->flow_ctrl);
2275 rmi_xlr_mac_set_enable(priv, 0);
2277 mtx_unlock_spin(&priv->lock);
2279 for (i = 0; i < 8; i++) {
2280 priv->frin_to_be_sent[i] = 0;
2286 /**********************************************************************
2287 **********************************************************************/
2289 rmi_xlr_mac_close(struct rge_softc *sc)
2291 struct driver_data *priv = &(sc->priv);
2293 mtx_lock_spin(&priv->lock);
2296 * There may have left over mbufs in the ring as well as in free in
2297 * they will be reused next time open is called
2300 rmi_xlr_mac_set_enable(priv, 0);
2302 xlr_inc_counter(NETIF_STOP_Q);
2303 port_inc_counter(priv->instance, PORT_STOPQ);
2305 mtx_unlock_spin(&priv->lock);
2310 /**********************************************************************
2311 **********************************************************************/
2312 static struct rge_softc_stats *
2313 rmi_xlr_mac_get_stats(struct rge_softc *sc)
2315 struct driver_data *priv = &(sc->priv);
2317 /* unsigned long flags; */
2319 mtx_lock_spin(&priv->lock);
2321 /* XXX update other stats here */
2323 mtx_unlock_spin(&priv->lock);
2325 return &priv->stats;
2328 /**********************************************************************
2329 **********************************************************************/
2331 rmi_xlr_mac_set_multicast_list(struct rge_softc *sc)
2335 /**********************************************************************
2336 **********************************************************************/
2338 rmi_xlr_mac_change_mtu(struct rge_softc *sc, int new_mtu)
2340 struct driver_data *priv = &(sc->priv);
2342 if ((new_mtu > 9500) || (new_mtu < 64)) {
2345 mtx_lock_spin(&priv->lock);
2349 /* Disable MAC TX/RX */
2350 rmi_xlr_mac_set_enable(priv, 0);
2352 /* Flush RX FR IN */
2354 rmi_xlr_mac_set_enable(priv, 1);
2356 mtx_unlock_spin(&priv->lock);
2360 /**********************************************************************
2361 **********************************************************************/
2363 rmi_xlr_mac_fill_rxfr(struct rge_softc *sc)
2365 struct driver_data *priv = &(sc->priv);
2371 if (!priv->init_frin_desc)
2373 priv->init_frin_desc = 0;
2376 for (i = 0; i < MAX_NUM_DESC; i++) {
2382 /* Send the free Rx desc to the MAC */
2383 xlr_mac_send_fr(priv, vtophys(ptr), MAX_FRAME_SIZE);
2389 /**********************************************************************
2390 **********************************************************************/
2391 static __inline__ void *
2392 rmi_xlr_config_spill(xlr_reg_t * mmio,
2393 int reg_start_0, int reg_start_1,
2394 int reg_size, int size)
2396 uint32_t spill_size = size;
2398 uint64_t phys_addr = 0;
2401 spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF,
2402 M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0);
2403 if (!spill || ((vm_offset_t)spill & (XLR_CACHELINE_SIZE - 1))) {
2404 panic("Unable to allocate memory for spill area!\n");
2406 phys_addr = vtophys(spill);
2407 dbg_msg("Allocate spill %d bytes at %jx\n", size, (uintmax_t)phys_addr);
2408 xlr_write_reg(mmio, reg_start_0, (phys_addr >> 5) & 0xffffffff);
2409 xlr_write_reg(mmio, reg_start_1, (phys_addr >> 37) & 0x07);
2410 xlr_write_reg(mmio, reg_size, spill_size);
2416 rmi_xlr_config_spill_area(struct driver_data *priv)
2419 * if driver initialization is done parallely on multiple cpus
2420 * spill_configured needs synchronization
2422 if (priv->spill_configured)
2425 if (priv->type == XLR_GMAC && priv->instance % 4 != 0) {
2426 priv->spill_configured = 1;
2429 priv->spill_configured = 1;
2432 rmi_xlr_config_spill(priv->mmio,
2433 R_REG_FRIN_SPILL_MEM_START_0,
2434 R_REG_FRIN_SPILL_MEM_START_1,
2435 R_REG_FRIN_SPILL_MEM_SIZE,
2437 sizeof(struct fr_desc));
2439 priv->class_0_spill =
2440 rmi_xlr_config_spill(priv->mmio,
2441 R_CLASS0_SPILL_MEM_START_0,
2442 R_CLASS0_SPILL_MEM_START_1,
2443 R_CLASS0_SPILL_MEM_SIZE,
2445 sizeof(union rx_tx_desc));
2446 priv->class_1_spill =
2447 rmi_xlr_config_spill(priv->mmio,
2448 R_CLASS1_SPILL_MEM_START_0,
2449 R_CLASS1_SPILL_MEM_START_1,
2450 R_CLASS1_SPILL_MEM_SIZE,
2452 sizeof(union rx_tx_desc));
2455 rmi_xlr_config_spill(priv->mmio, R_FROUT_SPILL_MEM_START_0,
2456 R_FROUT_SPILL_MEM_START_1,
2457 R_FROUT_SPILL_MEM_SIZE,
2459 sizeof(struct fr_desc));
2461 priv->class_2_spill =
2462 rmi_xlr_config_spill(priv->mmio,
2463 R_CLASS2_SPILL_MEM_START_0,
2464 R_CLASS2_SPILL_MEM_START_1,
2465 R_CLASS2_SPILL_MEM_SIZE,
2467 sizeof(union rx_tx_desc));
2468 priv->class_3_spill =
2469 rmi_xlr_config_spill(priv->mmio,
2470 R_CLASS3_SPILL_MEM_START_0,
2471 R_CLASS3_SPILL_MEM_START_1,
2472 R_CLASS3_SPILL_MEM_SIZE,
2474 sizeof(union rx_tx_desc));
2475 priv->spill_configured = 1;
2478 /*****************************************************************
2479 * Write the MAC address to the XLR registers
2480 * All 4 addresses are the same for now
2481 *****************************************************************/
2483 xlr_mac_setup_hwaddr(struct driver_data *priv)
2485 struct rge_softc *sc = priv->sc;
2487 xlr_write_reg(priv->mmio, R_MAC_ADDR0,
2488 ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16)
2489 | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))
2492 xlr_write_reg(priv->mmio, R_MAC_ADDR0 + 1,
2493 ((sc->dev_addr[1] << 24) | (sc->
2494 dev_addr[0] << 16)));
2496 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2, 0xffffffff);
2498 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
2500 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3, 0xffffffff);
2502 xlr_write_reg(priv->mmio, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
2504 xlr_write_reg(priv->mmio, R_MAC_FILTER_CONFIG,
2505 (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
2506 (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
2507 (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)
2511 /*****************************************************************
2512 * Read the MAC address from the XLR registers
2513 * All 4 addresses are the same for now
2514 *****************************************************************/
2516 xlr_mac_get_hwaddr(struct rge_softc *sc)
2518 struct driver_data *priv = &(sc->priv);
2520 sc->dev_addr[0] = (xlr_boot1_info.mac_addr >> 40) & 0xff;
2521 sc->dev_addr[1] = (xlr_boot1_info.mac_addr >> 32) & 0xff;
2522 sc->dev_addr[2] = (xlr_boot1_info.mac_addr >> 24) & 0xff;
2523 sc->dev_addr[3] = (xlr_boot1_info.mac_addr >> 16) & 0xff;
2524 sc->dev_addr[4] = (xlr_boot1_info.mac_addr >> 8) & 0xff;
2525 sc->dev_addr[5] = ((xlr_boot1_info.mac_addr >> 0) & 0xff) + priv->instance;
2528 /*****************************************************************
2529 * Mac Module Initialization
2530 *****************************************************************/
2532 mac_common_init(void)
2534 init_p2d_allocation();
2537 if (xlr_board_info.is_xls) {
2538 if (register_msgring_handler(MSGRNG_STNID_GMAC,
2539 MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2541 panic("Couldn't register msgring handler\n");
2543 if (register_msgring_handler(MSGRNG_STNID_GMAC1,
2544 MSGRNG_STNID_GMAC1 + 1, rmi_xlr_mac_msgring_handler,
2546 panic("Couldn't register msgring handler\n");
2549 if (register_msgring_handler(MSGRNG_STNID_GMAC,
2550 MSGRNG_STNID_GMAC + 1, rmi_xlr_mac_msgring_handler,
2552 panic("Couldn't register msgring handler\n");
2557 * Not yet if (xlr_board_atx_ii()) { if (register_msgring_handler
2558 * (TX_STN_XGS_0, rmi_xlr_mac_msgring_handler, NULL)) {
2559 * panic("Couldn't register msgring handler for TX_STN_XGS_0\n"); }
2560 * if (register_msgring_handler (TX_STN_XGS_1,
2561 * rmi_xlr_mac_msgring_handler, NULL)) { panic("Couldn't register
2562 * msgring handler for TX_STN_XGS_1\n"); } }