2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012, 2013 Bjoern A. Zeeb
5 * Copyright (c) 2014 Robert N. M. Watson
8 * This software was developed by SRI International and the University of
9 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
10 * ("MRC2"), as part of the DARPA MRC research programme.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Altera Triple-Speed Ethernet MegaCore, Function User Guide
35 * UG-01008-3.0, Software Version: 12.0, June 2012.
36 * Available at the time of writing at:
37 * http://www.altera.com/literature/ug/ug_ethernet.pdf
39 * We are using an Marvell E1111 (Alaska) PHY on the DE4. See mii/e1000phy.c.
43 * - ifOutBroadcastPkts are only counted if both ether dst and src are all-1s;
44 * seems an IP core bug, they count ether broadcasts as multicast. Is this
46 * - figure out why the TX FIFO fill status and intr did not work as expected.
47 * - test 100Mbit/s and 10Mbit/s
48 * - blacklist the one special factory programmed ethernet address (for now
49 * hardcoded, later from loader?)
50 * - resolve all XXX, left as reminders to shake out details later
51 * - Jumbo frame support
54 #include <sys/cdefs.h>
55 __FBSDID("$FreeBSD$");
57 #include "opt_device_polling.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
63 #include <sys/endian.h>
66 #include <sys/module.h>
67 #include <sys/mutex.h>
69 #include <sys/socket.h>
70 #include <sys/sockio.h>
71 #include <sys/types.h>
73 #include <net/ethernet.h>
75 #include <net/if_var.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/if_types.h>
79 #include <net/if_vlan_var.h>
83 #include <machine/bus.h>
84 #include <machine/resource.h>
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
90 #include <dev/altera/atse/if_atsereg.h>
91 #include <dev/altera/atse/a_api.h>
93 MODULE_DEPEND(atse, ether, 1, 1, 1);
94 MODULE_DEPEND(atse, miibus, 1, 1, 1);
97 #define ATSE_WATCHDOG_TIME 5
100 static poll_handler_t atse_poll;
103 /* XXX once we'd do parallel attach, we need a global lock for this. */
104 #define ATSE_ETHERNET_OPTION_BITS_UNDEF 0
105 #define ATSE_ETHERNET_OPTION_BITS_READ 1
106 static int atse_ethernet_option_bits_flag = ATSE_ETHERNET_OPTION_BITS_UNDEF;
107 static uint8_t atse_ethernet_option_bits[ALTERA_ETHERNET_OPTION_BITS_LEN];
109 static int atse_intr_debug_enable = 0;
110 SYSCTL_INT(_debug, OID_AUTO, atse_intr_debug_enable, CTLFLAG_RW,
111 &atse_intr_debug_enable, 0,
112 "Extra debugging output for atse interrupts");
115 * Softc and critical resource locking.
117 #define ATSE_LOCK(_sc) mtx_lock(&(_sc)->atse_mtx)
118 #define ATSE_UNLOCK(_sc) mtx_unlock(&(_sc)->atse_mtx)
119 #define ATSE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->atse_mtx, MA_OWNED)
121 #define ATSE_TX_PENDING(sc) (sc->atse_tx_m != NULL || \
122 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
125 #define DPRINTF(format, ...) printf(format, __VA_ARGS__)
127 #define DPRINTF(format, ...)
130 /* a_api.c functions; factor out? */
132 a_onchip_fifo_mem_core_write(struct resource *res, uint32_t off,
133 uint32_t val4, const char *desc, const char *f, const int l)
136 val4 = htole32(val4);
137 DPRINTF("[%s:%d] FIFOW %s 0x%08x = 0x%08x\n", f, l, desc, off, val4);
138 bus_write_4(res, off, val4);
141 static inline uint32_t
142 a_onchip_fifo_mem_core_read(struct resource *res, uint32_t off,
143 const char *desc, const char *f, const int l)
147 val4 = le32toh(bus_read_4(res, off));
148 DPRINTF("[%s:%d] FIFOR %s 0x%08x = 0x%08x\n", f, l, desc, off, val4);
153 /* The FIFO does an endian conversion, so we must not do it as well. */
154 /* XXX-BZ in fact we should do a htobe32 so le would be fine as well? */
155 #define ATSE_TX_DATA_WRITE(sc, val4) \
156 bus_write_4((sc)->atse_tx_mem_res, A_ONCHIP_FIFO_MEM_CORE_DATA, val4)
158 #define ATSE_TX_META_WRITE(sc, val4) \
159 a_onchip_fifo_mem_core_write((sc)->atse_tx_mem_res, \
160 A_ONCHIP_FIFO_MEM_CORE_METADATA, \
161 (val4), "TXM", __func__, __LINE__)
162 #define ATSE_TX_META_READ(sc) \
163 a_onchip_fifo_mem_core_read((sc)->atse_tx_mem_res, \
164 A_ONCHIP_FIFO_MEM_CORE_METADATA, \
165 "TXM", __func__, __LINE__)
167 #define ATSE_TX_READ_FILL_LEVEL(sc) \
168 a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res, \
169 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_FILL_LEVEL, \
170 "TX_FILL", __func__, __LINE__)
171 #define ATSE_RX_READ_FILL_LEVEL(sc) \
172 a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res, \
173 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_FILL_LEVEL, \
174 "RX_FILL", __func__, __LINE__)
176 /* The FIFO does an endian conversion, so we must not do it as well. */
177 /* XXX-BZ in fact we should do a htobe32 so le would be fine as well? */
178 #define ATSE_RX_DATA_READ(sc) \
179 bus_read_4((sc)->atse_rx_mem_res, A_ONCHIP_FIFO_MEM_CORE_DATA)
180 #define ATSE_RX_META_READ(sc) \
181 a_onchip_fifo_mem_core_read((sc)->atse_rx_mem_res, \
182 A_ONCHIP_FIFO_MEM_CORE_METADATA, \
183 "RXM", __func__, __LINE__)
185 #define ATSE_RX_STATUS_READ(sc) \
186 a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res, \
187 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_I_STATUS, \
188 "RX_EVENT", __func__, __LINE__)
190 #define ATSE_TX_STATUS_READ(sc) \
191 a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res, \
192 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_I_STATUS, \
193 "TX_EVENT", __func__, __LINE__)
195 #define ATSE_RX_EVENT_READ(sc) \
196 a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res, \
197 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
198 "RX_EVENT", __func__, __LINE__)
200 #define ATSE_TX_EVENT_READ(sc) \
201 a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res, \
202 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
203 "TX_EVENT", __func__, __LINE__)
205 #define ATSE_RX_EVENT_CLEAR(sc) \
209 val4 = a_onchip_fifo_mem_core_read( \
210 (sc)->atse_rxc_mem_res, \
211 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
212 "RX_EVENT", __func__, __LINE__); \
214 a_onchip_fifo_mem_core_write( \
215 (sc)->atse_rxc_mem_res, \
216 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
217 val4, "RX_EVENT", __func__, __LINE__); \
219 #define ATSE_TX_EVENT_CLEAR(sc) \
223 val4 = a_onchip_fifo_mem_core_read( \
224 (sc)->atse_txc_mem_res, \
225 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
226 "TX_EVENT", __func__, __LINE__); \
228 a_onchip_fifo_mem_core_write( \
229 (sc)->atse_txc_mem_res, \
230 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
231 val4, "TX_EVENT", __func__, __LINE__); \
234 #define ATSE_RX_EVENTS (A_ONCHIP_FIFO_MEM_CORE_INTR_FULL | \
235 A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW | \
236 A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
237 #define ATSE_RX_INTR_ENABLE(sc) \
238 a_onchip_fifo_mem_core_write((sc)->atse_rxc_mem_res, \
239 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, \
241 "RX_INTR", __func__, __LINE__) /* XXX-BZ review later. */
242 #define ATSE_RX_INTR_DISABLE(sc) \
243 a_onchip_fifo_mem_core_write((sc)->atse_rxc_mem_res, \
244 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, 0, \
245 "RX_INTR", __func__, __LINE__)
246 #define ATSE_RX_INTR_READ(sc) \
247 a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res, \
248 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, \
249 "RX_INTR", __func__, __LINE__)
251 #define ATSE_TX_EVENTS (A_ONCHIP_FIFO_MEM_CORE_INTR_EMPTY | \
252 A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW | \
253 A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
254 #define ATSE_TX_INTR_ENABLE(sc) \
255 a_onchip_fifo_mem_core_write((sc)->atse_txc_mem_res, \
256 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, \
258 "TX_INTR", __func__, __LINE__) /* XXX-BZ review later. */
259 #define ATSE_TX_INTR_DISABLE(sc) \
260 a_onchip_fifo_mem_core_write((sc)->atse_txc_mem_res, \
261 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, 0, \
262 "TX_INTR", __func__, __LINE__)
263 #define ATSE_TX_INTR_READ(sc) \
264 a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res, \
265 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, \
266 "TX_INTR", __func__, __LINE__)
268 static int atse_rx_locked(struct atse_softc *sc);
271 * Register space access macros.
274 csr_write_4(struct atse_softc *sc, uint32_t reg, uint32_t val4,
275 const char *f, const int l)
278 val4 = htole32(val4);
279 DPRINTF("[%s:%d] CSR W %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
280 "atse_mem_res", reg, reg * 4, val4);
281 bus_write_4(sc->atse_mem_res, reg * 4, val4);
284 static inline uint32_t
285 csr_read_4(struct atse_softc *sc, uint32_t reg, const char *f, const int l)
289 val4 = le32toh(bus_read_4(sc->atse_mem_res, reg * 4));
290 DPRINTF("[%s:%d] CSR R %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
291 "atse_mem_res", reg, reg * 4, val4);
297 * See page 5-2 that it's all dword offsets and the MS 16 bits need to be zero
298 * on write and ignored on read.
301 pxx_write_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, uint16_t val,
302 const char *f, const int l, const char *s)
306 val4 = htole32(val & 0x0000ffff);
307 DPRINTF("[%s:%d] %s W %s 0x%08x (0x%08jx) = 0x%08x\n", f, l, s,
308 "atse_mem_res", reg, (bmcr + reg) * 4, val4);
309 bus_write_4(sc->atse_mem_res, (bmcr + reg) * 4, val4);
312 static inline uint16_t
313 pxx_read_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, const char *f,
314 const int l, const char *s)
319 val4 = bus_read_4(sc->atse_mem_res, (bmcr + reg) * 4);
320 val = le32toh(val4) & 0x0000ffff;
321 DPRINTF("[%s:%d] %s R %s 0x%08x (0x%08jx) = 0x%04x\n", f, l, s,
322 "atse_mem_res", reg, (bmcr + reg) * 4, val);
327 #define CSR_WRITE_4(sc, reg, val) \
328 csr_write_4((sc), (reg), (val), __func__, __LINE__)
329 #define CSR_READ_4(sc, reg) \
330 csr_read_4((sc), (reg), __func__, __LINE__)
331 #define PCS_WRITE_2(sc, reg, val) \
332 pxx_write_2((sc), sc->atse_bmcr0, (reg), (val), __func__, __LINE__, \
334 #define PCS_READ_2(sc, reg) \
335 pxx_read_2((sc), sc->atse_bmcr0, (reg), __func__, __LINE__, "PCS")
336 #define PHY_WRITE_2(sc, reg, val) \
337 pxx_write_2((sc), sc->atse_bmcr1, (reg), (val), __func__, __LINE__, \
339 #define PHY_READ_2(sc, reg) \
340 pxx_read_2((sc), sc->atse_bmcr1, (reg), __func__, __LINE__, "PHY")
342 static void atse_tick(void *);
343 static int atse_detach(device_t);
345 devclass_t atse_devclass;
348 atse_tx_locked(struct atse_softc *sc, int *sent)
351 uint32_t val4, fill_level;
355 ATSE_LOCK_ASSERT(sc);
358 KASSERT(m != NULL, ("%s: m is null: sc=%p", __func__, sc));
359 KASSERT(m->m_flags & M_PKTHDR, ("%s: not a pkthdr: m=%p", __func__, m));
362 * Copy to buffer to minimize our pain as we can only store
363 * double words which, after the first mbuf gets out of alignment
366 if (sc->atse_tx_m_offset == 0) {
367 m_copydata(m, 0, m->m_pkthdr.len, sc->atse_tx_buf);
368 sc->atse_tx_buf_len = m->m_pkthdr.len;
371 fill_level = ATSE_TX_READ_FILL_LEVEL(sc);
372 #if 0 /* Returns 0xdeadc0de. */
373 val4 = ATSE_TX_META_READ(sc);
375 if (sc->atse_tx_m_offset == 0) {
376 /* Write start of packet. */
377 val4 = A_ONCHIP_FIFO_MEM_CORE_SOP;
378 val4 &= ~A_ONCHIP_FIFO_MEM_CORE_EOP;
379 ATSE_TX_META_WRITE(sc, val4);
382 /* TX FIFO is single clock mode, so we have the full FIFO. */
384 while ((sc->atse_tx_buf_len - sc->atse_tx_m_offset) > 4 &&
385 fill_level < AVALON_FIFO_TX_BASIC_OPTS_DEPTH) {
387 bcopy(&sc->atse_tx_buf[sc->atse_tx_m_offset], &val4,
389 ATSE_TX_DATA_WRITE(sc, val4);
390 sc->atse_tx_m_offset += sizeof(val4);
394 if (fill_level == AVALON_FIFO_TX_BASIC_OPTS_DEPTH)
395 fill_level = ATSE_TX_READ_FILL_LEVEL(sc);
400 /* Set EOP *before* writing the last symbol. */
401 if (sc->atse_tx_m_offset >= (sc->atse_tx_buf_len - 4) &&
402 fill_level < AVALON_FIFO_TX_BASIC_OPTS_DEPTH) {
404 /* Set EndOfPacket. */
405 val4 = A_ONCHIP_FIFO_MEM_CORE_EOP;
408 leftm = sc->atse_tx_buf_len - sc->atse_tx_m_offset;
409 val4 |= ((4 - leftm) << A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT);
410 ATSE_TX_META_WRITE(sc, val4);
412 /* Write last symbol. */
414 bcopy(sc->atse_tx_buf + sc->atse_tx_m_offset, &val4, leftm);
415 ATSE_TX_DATA_WRITE(sc, val4);
420 /* OK, the packet is gone. */
421 sc->atse_tx_m = NULL;
422 sc->atse_tx_m_offset = 0;
424 /* If anyone is interested give them a copy. */
425 BPF_MTAP(sc->atse_ifp, m);
435 atse_start_locked(struct ifnet *ifp)
437 struct atse_softc *sc;
441 ATSE_LOCK_ASSERT(sc);
443 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
444 IFF_DRV_RUNNING || (sc->atse_flags & ATSE_FLAGS_LINK) == 0)
449 * Disable the watchdog while sending, we are batching packets.
450 * Though we should never reach 5 seconds, and are holding the lock,
453 sc->atse_watchdog_timer = 0;
456 if (sc->atse_tx_m != NULL) {
457 error = atse_tx_locked(sc, &sent);
461 /* We have more space to send so continue ... */
462 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
464 IFQ_DRV_DEQUEUE(&ifp->if_snd, sc->atse_tx_m);
465 sc->atse_tx_m_offset = 0;
466 if (sc->atse_tx_m == NULL)
468 error = atse_tx_locked(sc, &sent);
474 /* If the IP core walks into Nekromanteion try to bail out. */
476 sc->atse_watchdog_timer = ATSE_WATCHDOG_TIME;
480 atse_start(struct ifnet *ifp)
482 struct atse_softc *sc;
486 atse_start_locked(ifp);
491 atse_stop_locked(struct atse_softc *sc)
497 ATSE_LOCK_ASSERT(sc);
499 sc->atse_watchdog_timer = 0;
500 callout_stop(&sc->atse_tick);
503 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
504 ATSE_RX_INTR_DISABLE(sc);
505 ATSE_TX_INTR_DISABLE(sc);
506 ATSE_RX_EVENT_CLEAR(sc);
507 ATSE_TX_EVENT_CLEAR(sc);
509 /* Disable MAC transmit and receive datapath. */
510 mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
511 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
513 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
514 /* Wait for bits to be cleared; i=100 is excessive. */
515 for (i = 0; i < 100; i++) {
516 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
517 if ((val4 & mask) == 0)
521 if ((val4 & mask) != 0)
522 device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
525 sc->atse_flags &= ~ATSE_FLAGS_LINK;
527 /* XXX-BZ free the RX/TX rings. */
533 atse_mchash(struct atse_softc *sc __unused, const uint8_t *addr)
539 for (i = 0; i < ETHER_ADDR_LEN; i++) {
541 for (j = 1; j < 8; j++)
542 y ^= (addr[i] >> j) & 0x01;
550 atse_rxfilter_locked(struct atse_softc *sc)
552 struct ifmultiaddr *ifma;
557 /* XXX-BZ can we find out if we have the MHASH synthesized? */
558 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
559 /* For simplicity always hash full 48 bits of addresses. */
560 if ((val4 & BASE_CFG_COMMAND_CONFIG_MHASH_SEL) != 0)
561 val4 &= ~BASE_CFG_COMMAND_CONFIG_MHASH_SEL;
564 if (ifp->if_flags & IFF_PROMISC)
565 val4 |= BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
567 val4 &= ~BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
569 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
571 if (ifp->if_flags & IFF_ALLMULTI) {
572 /* Accept all multicast addresses. */
573 for (i = 0; i <= MHASH_LEN; i++)
574 CSR_WRITE_4(sc, MHASH_START + i, 0x1);
577 * Can hold MHASH_LEN entries.
578 * XXX-BZ bitstring.h would be more general.
584 * Re-build and re-program hash table. First build the
585 * bit-field "yes" or "no" for each slot per address, then
586 * do all the programming afterwards.
589 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
590 if (ifma->ifma_addr->sa_family != AF_LINK)
593 h |= (1 << atse_mchash(sc,
594 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)));
596 if_maddr_runlock(ifp);
597 for (i = 0; i <= MHASH_LEN; i++)
598 CSR_WRITE_4(sc, MHASH_START + i,
599 (h & (1 << i)) ? 0x01 : 0x00);
606 atse_ethernet_option_bits_read_fdt(device_t dev)
608 struct resource *res;
612 if (atse_ethernet_option_bits_flag & ATSE_ETHERNET_OPTION_BITS_READ)
615 fdev = device_find_child(device_get_parent(dev), "cfi", 0);
620 res = bus_alloc_resource_any(fdev, SYS_RES_MEMORY, &rid,
621 RF_ACTIVE | RF_SHAREABLE);
625 for (i = 0; i < ALTERA_ETHERNET_OPTION_BITS_LEN; i++)
626 atse_ethernet_option_bits[i] = bus_read_1(res,
627 ALTERA_ETHERNET_OPTION_BITS_OFF + i);
629 bus_release_resource(fdev, SYS_RES_MEMORY, rid, res);
630 atse_ethernet_option_bits_flag |= ATSE_ETHERNET_OPTION_BITS_READ;
636 atse_ethernet_option_bits_read(device_t dev)
640 error = atse_ethernet_option_bits_read_fdt(dev);
644 device_printf(dev, "Cannot read Ethernet addresses from flash.\n");
650 atse_get_eth_address(struct atse_softc *sc)
652 unsigned long hostid;
657 * Make sure to only ever do this once. Otherwise a reset would
658 * possibly change our ethernet address, which is not good at all.
660 if (sc->atse_eth_addr[0] != 0x00 || sc->atse_eth_addr[1] != 0x00 ||
661 sc->atse_eth_addr[2] != 0x00)
664 if ((atse_ethernet_option_bits_flag &
665 ATSE_ETHERNET_OPTION_BITS_READ) == 0)
668 val4 = atse_ethernet_option_bits[0] << 24;
669 val4 |= atse_ethernet_option_bits[1] << 16;
670 val4 |= atse_ethernet_option_bits[2] << 8;
671 val4 |= atse_ethernet_option_bits[3];
672 /* They chose "safe". */
673 if (val4 != le32toh(0x00005afe)) {
674 device_printf(sc->atse_dev, "Magic '5afe' is not safe: 0x%08x. "
675 "Falling back to random numbers for hardware address.\n",
680 sc->atse_eth_addr[0] = atse_ethernet_option_bits[4];
681 sc->atse_eth_addr[1] = atse_ethernet_option_bits[5];
682 sc->atse_eth_addr[2] = atse_ethernet_option_bits[6];
683 sc->atse_eth_addr[3] = atse_ethernet_option_bits[7];
684 sc->atse_eth_addr[4] = atse_ethernet_option_bits[8];
685 sc->atse_eth_addr[5] = atse_ethernet_option_bits[9];
687 /* Handle factory default ethernet addresss: 00:07:ed:ff:ed:15 */
688 if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x07 &&
689 sc->atse_eth_addr[2] == 0xed && sc->atse_eth_addr[3] == 0xff &&
690 sc->atse_eth_addr[4] == 0xed && sc->atse_eth_addr[5] == 0x15) {
692 device_printf(sc->atse_dev, "Factory programmed Ethernet "
693 "hardware address blacklisted. Falling back to random "
694 "address to avoid collisions.\n");
695 device_printf(sc->atse_dev, "Please re-program your flash.\n");
699 if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x00 &&
700 sc->atse_eth_addr[2] == 0x00 && sc->atse_eth_addr[3] == 0x00 &&
701 sc->atse_eth_addr[4] == 0x00 && sc->atse_eth_addr[5] == 0x00) {
702 device_printf(sc->atse_dev, "All zero's Ethernet hardware "
703 "address blacklisted. Falling back to random address.\n");
704 device_printf(sc->atse_dev, "Please re-program your flash.\n");
708 if (ETHER_IS_MULTICAST(sc->atse_eth_addr)) {
709 device_printf(sc->atse_dev, "Multicast Ethernet hardware "
710 "address blacklisted. Falling back to random address.\n");
711 device_printf(sc->atse_dev, "Please re-program your flash.\n");
716 * If we find an Altera prefixed address with a 0x0 ending
717 * adjust by device unit. If not and this is not the first
718 * Ethernet, go to random.
720 unit = device_get_unit(sc->atse_dev);
725 device_printf(sc->atse_dev, "We do not support Ethernet "
726 "addresses for more than 16 MACs. Falling back to "
727 "random hadware address.\n");
730 if ((sc->atse_eth_addr[0] & ~0x2) != 0 ||
731 sc->atse_eth_addr[1] != 0x07 || sc->atse_eth_addr[2] != 0xed ||
732 (sc->atse_eth_addr[5] & 0x0f) != 0x0) {
733 device_printf(sc->atse_dev, "Ethernet address not meeting our "
734 "multi-MAC standards. Falling back to random hadware "
738 sc->atse_eth_addr[5] |= (unit & 0x0f);
744 * Fall back to random code we also use on bridge(4).
746 getcredhostid(curthread->td_ucred, &hostid);
748 arc4rand(sc->atse_eth_addr, ETHER_ADDR_LEN, 1);
749 sc->atse_eth_addr[0] &= ~1;/* clear multicast bit */
750 sc->atse_eth_addr[0] |= 2; /* set the LAA bit */
752 sc->atse_eth_addr[0] = 0x2;
753 sc->atse_eth_addr[1] = (hostid >> 24) & 0xff;
754 sc->atse_eth_addr[2] = (hostid >> 16) & 0xff;
755 sc->atse_eth_addr[3] = (hostid >> 8 ) & 0xff;
756 sc->atse_eth_addr[4] = hostid & 0xff;
757 sc->atse_eth_addr[5] = sc->atse_unit & 0xff;
764 atse_set_eth_address(struct atse_softc *sc, int n)
768 v0 = (sc->atse_eth_addr[3] << 24) | (sc->atse_eth_addr[2] << 16) |
769 (sc->atse_eth_addr[1] << 8) | sc->atse_eth_addr[0];
770 v1 = (sc->atse_eth_addr[5] << 8) | sc->atse_eth_addr[4];
772 if (n & ATSE_ETH_ADDR_DEF) {
773 CSR_WRITE_4(sc, BASE_CFG_MAC_0, v0);
774 CSR_WRITE_4(sc, BASE_CFG_MAC_1, v1);
776 if (n & ATSE_ETH_ADDR_SUPP1) {
777 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_0, v0);
778 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_1, v1);
780 if (n & ATSE_ETH_ADDR_SUPP2) {
781 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_0, v0);
782 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_1, v1);
784 if (n & ATSE_ETH_ADDR_SUPP3) {
785 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_0, v0);
786 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_1, v1);
788 if (n & ATSE_ETH_ADDR_SUPP4) {
789 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_0, v0);
790 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_1, v1);
797 atse_reset(struct atse_softc *sc)
803 /* 1. External PHY Initialization using MDIO. */
805 * We select the right MDIO space in atse_attach() and let MII do
809 /* 2. PCS Configuration Register Initialization. */
810 /* a. Set auto negotiation link timer to 1.6ms for SGMII. */
811 PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_0, 0x0D40);
812 PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_1, 0x0003);
814 /* b. Configure SGMII. */
815 val = PCS_EXT_IF_MODE_SGMII_ENA|PCS_EXT_IF_MODE_USE_SGMII_AN;
816 PCS_WRITE_2(sc, PCS_EXT_IF_MODE, val);
818 /* c. Enable auto negotiation. */
819 /* Ignore Bits 6,8,13; should be set,set,unset. */
820 val = PCS_READ_2(sc, PCS_CONTROL);
821 val &= ~(PCS_CONTROL_ISOLATE|PCS_CONTROL_POWERDOWN);
822 val &= ~PCS_CONTROL_LOOPBACK; /* Make this a -link1 option? */
823 val |= PCS_CONTROL_AUTO_NEGOTIATION_ENABLE;
824 PCS_WRITE_2(sc, PCS_CONTROL, val);
827 val = PCS_READ_2(sc, PCS_CONTROL);
828 val |= PCS_CONTROL_RESET;
829 PCS_WRITE_2(sc, PCS_CONTROL, val);
831 /* Wait for reset bit to clear; i=100 is excessive. */
832 for (i = 0; i < 100; i++) {
833 val = PCS_READ_2(sc, PCS_CONTROL);
834 if ((val & PCS_CONTROL_RESET) == 0)
839 if ((val & PCS_CONTROL_RESET) != 0) {
840 device_printf(sc->atse_dev, "PCS reset timed out.\n");
844 /* 3. MAC Configuration Register Initialization. */
845 /* a. Disable MAC transmit and receive datapath. */
846 mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
847 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
849 /* Samples in the manual do have the SW_RESET bit set here, why? */
850 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
851 /* Wait for bits to be cleared; i=100 is excessive. */
852 for (i = 0; i < 100; i++) {
853 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
854 if ((val4 & mask) == 0)
858 if ((val4 & mask) != 0) {
859 device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
862 /* b. MAC FIFO configuration. */
863 CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_EMPTY, FIFO_DEPTH_TX - 16);
864 CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_FULL, 3);
865 CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_EMPTY, 8);
866 CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_EMPTY, FIFO_DEPTH_RX - 16);
867 CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_FULL, 8);
868 CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_EMPTY, 8);
870 CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 16);
871 CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 16);
873 /* For store-and-forward mode, set this threshold to 0. */
874 CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 0);
875 CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 0);
877 /* c. MAC address configuration. */
878 /* Also intialize supplementary addresses to our primary one. */
879 /* XXX-BZ FreeBSD really needs to grow and API for using these. */
880 atse_get_eth_address(sc);
881 atse_set_eth_address(sc, ATSE_ETH_ADDR_ALL);
883 /* d. MAC function configuration. */
884 CSR_WRITE_4(sc, BASE_CFG_FRM_LENGTH, 1518); /* Default. */
885 CSR_WRITE_4(sc, BASE_CFG_TX_IPG_LENGTH, 12);
886 CSR_WRITE_4(sc, BASE_CFG_PAUSE_QUANT, 0xFFFF);
888 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
890 * If 1000BASE-X/SGMII PCS is initialized, set the ETH_SPEED (bit 3)
891 * and ENA_10 (bit 25) in command_config register to 0. If half duplex
892 * is reported in the PHY/PCS status register, set the HD_ENA (bit 10)
893 * to 1 in command_config register.
894 * BZ: We shoot for 1000 instead.
897 val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
899 val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
901 val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
904 * We do not want to set this, otherwise, we could not even send
905 * random raw ethernet frames for various other research. By default
906 * FreeBSD will use the right ether source address.
908 val4 |= BASE_CFG_COMMAND_CONFIG_TX_ADDR_INS;
910 val4 |= BASE_CFG_COMMAND_CONFIG_PAD_EN;
911 val4 &= ~BASE_CFG_COMMAND_CONFIG_CRC_FWD;
913 val4 |= BASE_CFG_COMMAND_CONFIG_CNTL_FRM_ENA;
916 val4 |= BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC;
918 val &= ~BASE_CFG_COMMAND_CONFIG_LOOP_ENA; /* link0? */
919 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
922 * Make sure we do not enable 32bit alignment; FreeBSD cannot
923 * cope with the additional padding (though we should!?).
924 * Also make sure we get the CRC appended.
926 val4 = CSR_READ_4(sc, TX_CMD_STAT);
927 val4 &= ~(TX_CMD_STAT_OMIT_CRC|TX_CMD_STAT_TX_SHIFT16);
928 CSR_WRITE_4(sc, TX_CMD_STAT, val4);
929 val4 = CSR_READ_4(sc, RX_CMD_STAT);
930 val4 &= ~RX_CMD_STAT_RX_SHIFT16;
931 CSR_WRITE_4(sc, RX_CMD_STAT, val4);
934 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
935 val4 |= BASE_CFG_COMMAND_CONFIG_SW_RESET;
936 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
937 /* Wait for bits to be cleared; i=100 is excessive. */
938 for (i = 0; i < 100; i++) {
939 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
940 if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) == 0)
944 if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) != 0) {
945 device_printf(sc->atse_dev, "MAC reset timed out.\n");
949 /* f. Enable MAC transmit and receive datapath. */
950 mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
951 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
953 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
954 /* Wait for bits to be cleared; i=100 is excessive. */
955 for (i = 0; i < 100; i++) {
956 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
957 if ((val4 & mask) == mask)
961 if ((val4 & mask) != mask) {
962 device_printf(sc->atse_dev, "Enabling MAC TX/RX timed out.\n");
970 atse_init_locked(struct atse_softc *sc)
973 struct mii_data *mii;
976 ATSE_LOCK_ASSERT(sc);
979 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
983 * Must update the ether address if changed. Given we do not handle
984 * in atse_ioctl() but it's in the general framework, just always
985 * do it here before atse_reset().
987 eaddr = IF_LLADDR(sc->atse_ifp);
988 bcopy(eaddr, &sc->atse_eth_addr, ETHER_ADDR_LEN);
990 /* Make things frind to halt, cleanup, ... */
991 atse_stop_locked(sc);
995 /* ... and fire up the engine again. */
996 atse_rxfilter_locked(sc);
998 /* Memory rings? DMA engine? */
1000 sc->atse_rx_buf_len = 0;
1001 sc->atse_flags &= ATSE_FLAGS_LINK; /* Preserve. */
1003 #ifdef DEVICE_POLLING
1004 /* Only enable interrupts if we are not polling. */
1005 if (ifp->if_capenable & IFCAP_POLLING) {
1006 ATSE_RX_INTR_DISABLE(sc);
1007 ATSE_TX_INTR_DISABLE(sc);
1008 ATSE_RX_EVENT_CLEAR(sc);
1009 ATSE_TX_EVENT_CLEAR(sc);
1013 ATSE_RX_INTR_ENABLE(sc);
1014 ATSE_TX_INTR_ENABLE(sc);
1017 mii = device_get_softc(sc->atse_miibus);
1019 sc->atse_flags &= ~ATSE_FLAGS_LINK;
1022 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1023 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1025 callout_reset(&sc->atse_tick, hz, atse_tick, sc);
1029 atse_init(void *xsc)
1031 struct atse_softc *sc;
1034 * XXXRW: There is some argument that we should immediately do RX
1035 * processing after enabling interrupts, or one may not fire if there
1036 * are buffered packets.
1038 sc = (struct atse_softc *)xsc;
1040 atse_init_locked(sc);
1045 atse_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1047 struct atse_softc *sc;
1053 ifr = (struct ifreq *)data;
1058 if (ifp->if_flags & IFF_UP) {
1059 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1060 ((ifp->if_flags ^ sc->atse_if_flags) &
1061 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1062 atse_rxfilter_locked(sc);
1064 atse_init_locked(sc);
1065 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1066 atse_stop_locked(sc);
1067 sc->atse_if_flags = ifp->if_flags;
1072 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1073 #ifdef DEVICE_POLLING
1074 if ((mask & IFCAP_POLLING) != 0 &&
1075 (IFCAP_POLLING & ifp->if_capabilities) != 0) {
1076 ifp->if_capenable ^= IFCAP_POLLING;
1077 if ((IFCAP_POLLING & ifp->if_capenable) != 0) {
1079 error = ether_poll_register(atse_poll, ifp);
1084 /* Disable interrupts. */
1085 ATSE_RX_INTR_DISABLE(sc);
1086 ATSE_TX_INTR_DISABLE(sc);
1087 ATSE_RX_EVENT_CLEAR(sc);
1088 ATSE_TX_EVENT_CLEAR(sc);
1091 * Do not allow disabling of polling if we do
1092 * not have interrupts.
1094 } else if (sc->atse_rx_irq_res != NULL ||
1095 sc->atse_tx_irq_res != NULL) {
1096 error = ether_poll_deregister(ifp);
1097 /* Enable interrupts. */
1098 ATSE_RX_INTR_ENABLE(sc);
1099 ATSE_TX_INTR_ENABLE(sc);
1101 ifp->if_capenable ^= IFCAP_POLLING;
1105 #endif /* DEVICE_POLLING */
1111 atse_rxfilter_locked(sc);
1117 struct mii_data *mii;
1120 mii = device_get_softc(sc->atse_miibus);
1121 ifr = (struct ifreq *)data;
1122 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1126 error = ether_ioctl(ifp, command, data);
1134 atse_intr_debug(struct atse_softc *sc, const char *intrname)
1136 uint32_t rxs, rxe, rxi, rxf, txs, txe, txi, txf;
1138 if (!atse_intr_debug_enable)
1141 rxs = ATSE_RX_STATUS_READ(sc);
1142 rxe = ATSE_RX_EVENT_READ(sc);
1143 rxi = ATSE_RX_INTR_READ(sc);
1144 rxf = ATSE_RX_READ_FILL_LEVEL(sc);
1146 txs = ATSE_TX_STATUS_READ(sc);
1147 txe = ATSE_TX_EVENT_READ(sc);
1148 txi = ATSE_TX_INTR_READ(sc);
1149 txf = ATSE_TX_READ_FILL_LEVEL(sc);
1153 "rxs 0x%x rxe 0x%x rxi 0x%x rxf 0x%x "
1154 "txs 0x%x txe 0x%x txi 0x%x txf 0x%x\n",
1157 txs, txe, txi, txf);
1161 atse_watchdog(struct atse_softc *sc)
1164 ATSE_LOCK_ASSERT(sc);
1166 if (sc->atse_watchdog_timer == 0 || --sc->atse_watchdog_timer > 0)
1169 device_printf(sc->atse_dev, "watchdog timeout\n");
1170 if_inc_counter(sc->atse_ifp, IFCOUNTER_OERRORS, 1);
1172 atse_intr_debug(sc, "poll");
1174 sc->atse_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1175 atse_init_locked(sc);
1178 if (!IFQ_DRV_IS_EMPTY(&sc->atse_ifp->if_snd))
1179 atse_start_locked(sc->atse_ifp);
1183 atse_tick(void *xsc)
1185 struct atse_softc *sc;
1186 struct mii_data *mii;
1189 sc = (struct atse_softc *)xsc;
1190 ATSE_LOCK_ASSERT(sc);
1193 mii = device_get_softc(sc->atse_miibus);
1196 if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0)
1197 atse_miibus_statchg(sc->atse_dev);
1198 callout_reset(&sc->atse_tick, hz, atse_tick, sc);
1202 * Set media options.
1205 atse_ifmedia_upd(struct ifnet *ifp)
1207 struct atse_softc *sc;
1208 struct mii_data *mii;
1209 struct mii_softc *miisc;
1215 mii = device_get_softc(sc->atse_miibus);
1216 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1218 error = mii_mediachg(mii);
1225 atse_update_rx_err(struct atse_softc *sc, uint32_t mask)
1229 /* RX error are 6 bits, we only know 4 of them. */
1230 for (i = 0; i < ATSE_RX_ERR_MAX; i++)
1231 if ((mask & (1 << i)) != 0)
1232 sc->atse_rx_err[i]++;
1236 atse_rx_locked(struct atse_softc *sc)
1238 uint32_t fill, i, j;
1239 uint32_t data, meta;
1244 ATSE_LOCK_ASSERT(sc);
1252 if (sc->atse_rx_m == NULL) {
1253 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1256 m->m_len = m->m_pkthdr.len = MCLBYTES;
1257 /* Make sure upper layers will be aligned. */
1258 m_adj(m, ETHER_ALIGN);
1262 fill = ATSE_RX_READ_FILL_LEVEL(sc);
1263 for (i = 0; i < fill; i++) {
1265 * XXX-BZ for whatever reason the FIFO requires the
1266 * the data read before we can access the meta data.
1268 data = ATSE_RX_DATA_READ(sc);
1269 meta = ATSE_RX_META_READ(sc);
1270 if (meta & A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) {
1271 /* XXX-BZ evaluate error. */
1272 atse_update_rx_err(sc, ((meta &
1273 A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >>
1274 A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
1275 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1276 sc->atse_rx_buf_len = 0;
1278 * Should still read till EOP or next SOP.
1280 * XXX-BZ might also depend on
1281 * BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC
1283 sc->atse_flags |= ATSE_FLAGS_ERROR;
1286 if ((meta & A_ONCHIP_FIFO_MEM_CORE_CHANNEL_MASK) != 0)
1287 device_printf(sc->atse_dev, "%s: unexpected "
1288 "channel %u\n", __func__, (meta &
1289 A_ONCHIP_FIFO_MEM_CORE_CHANNEL_MASK) >>
1290 A_ONCHIP_FIFO_MEM_CORE_CHANNEL_SHIFT);
1292 if (meta & A_ONCHIP_FIFO_MEM_CORE_SOP) {
1294 * There is no need to clear SOP between 1st
1295 * and subsequent packet data junks.
1297 if (sc->atse_rx_buf_len != 0 &&
1298 (sc->atse_flags & ATSE_FLAGS_SOP_SEEN) == 0)
1300 device_printf(sc->atse_dev, "%s: SOP "
1301 "without empty buffer: %u\n",
1302 __func__, sc->atse_rx_buf_len);
1303 /* XXX-BZ any better counter? */
1304 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1307 if ((sc->atse_flags & ATSE_FLAGS_SOP_SEEN) == 0)
1309 sc->atse_flags |= ATSE_FLAGS_SOP_SEEN;
1310 sc->atse_rx_buf_len = 0;
1313 #if 0 /* We had to read the data before we could access meta data. See above. */
1314 data = ATSE_RX_DATA_READ(sc);
1316 /* Make sure to not overflow the mbuf data size. */
1317 if (sc->atse_rx_buf_len >= sc->atse_rx_m->m_len -
1320 * XXX-BZ Error. We need more mbufs and are
1321 * not setup for this yet.
1323 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1324 sc->atse_flags |= ATSE_FLAGS_ERROR;
1326 if ((sc->atse_flags & ATSE_FLAGS_ERROR) == 0)
1328 * MUST keep this bcopy as m_data after m_adj
1329 * for IP header aligment is on half-word
1330 * and not word alignment.
1332 bcopy(&data, (uint8_t *)(sc->atse_rx_m->m_data +
1333 sc->atse_rx_buf_len), sizeof(data));
1334 if (meta & A_ONCHIP_FIFO_MEM_CORE_EOP) {
1338 A_ONCHIP_FIFO_MEM_CORE_EMPTY_MASK) >>
1339 A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT;
1340 sc->atse_rx_buf_len += (4 - empty);
1342 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1346 m->m_pkthdr.len = m->m_len =
1347 sc->atse_rx_buf_len;
1348 sc->atse_rx_m = NULL;
1350 sc->atse_rx_buf_len = 0;
1351 sc->atse_flags &= ~ATSE_FLAGS_SOP_SEEN;
1352 if (sc->atse_flags & ATSE_FLAGS_ERROR) {
1353 sc->atse_flags &= ~ATSE_FLAGS_ERROR;
1356 m->m_pkthdr.rcvif = ifp;
1358 (*ifp->if_input)(ifp, m);
1361 #ifdef DEVICE_POLLING
1362 if (ifp->if_capenable & IFCAP_POLLING) {
1363 if (sc->atse_rx_cycles <= 0)
1365 sc->atse_rx_cycles--;
1368 goto outer; /* Need a new mbuf. */
1370 sc->atse_rx_buf_len += sizeof(data);
1374 /* XXX-BZ could optimize in case of another packet waiting. */
1382 * Report current media status.
1385 atse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1387 struct atse_softc *sc;
1388 struct mii_data *mii;
1393 mii = device_get_softc(sc->atse_miibus);
1395 ifmr->ifm_active = mii->mii_media_active;
1396 ifmr->ifm_status = mii->mii_media_status;
1401 atse_rx_intr(void *arg)
1403 struct atse_softc *sc;
1407 sc = (struct atse_softc *)arg;
1411 #ifdef DEVICE_POLLING
1412 if (ifp->if_capenable & IFCAP_POLLING) {
1418 atse_intr_debug(sc, "rx");
1419 rxe = ATSE_RX_EVENT_READ(sc);
1420 if (rxe & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1421 A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1422 /* XXX-BZ ERROR HANDLING. */
1423 atse_update_rx_err(sc, ((rxe &
1424 A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >>
1425 A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
1426 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1430 * There is considerable subtlety in the race-free handling of rx
1431 * interrupts: we must disable interrupts whenever we manipulate the
1432 * FIFO to prevent further interrupts from firing before we are done;
1433 * we must clear the event after processing to prevent the event from
1434 * being immediately reposted due to data remaining; we must clear the
1435 * event mask before reenabling interrupts or risk missing a positive
1436 * edge; and we must recheck everything after completing in case the
1437 * event posted between clearing events and reenabling interrupts. If
1438 * a race is experienced, we must restart the whole mechanism.
1441 ATSE_RX_INTR_DISABLE(sc);
1443 sc->atse_rx_cycles = RX_CYCLES_IN_INTR;
1446 ATSE_RX_EVENT_CLEAR(sc);
1448 /* Disable interrupts if interface is down. */
1449 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1450 ATSE_RX_INTR_ENABLE(sc);
1451 } while (!(ATSE_RX_STATUS_READ(sc) &
1452 A_ONCHIP_FIFO_MEM_CORE_STATUS_EMPTY));
1458 atse_tx_intr(void *arg)
1460 struct atse_softc *sc;
1464 sc = (struct atse_softc *)arg;
1468 #ifdef DEVICE_POLLING
1469 if (ifp->if_capenable & IFCAP_POLLING) {
1475 /* XXX-BZ build histogram. */
1476 atse_intr_debug(sc, "tx");
1477 txe = ATSE_TX_EVENT_READ(sc);
1478 if (txe & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1479 A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1480 /* XXX-BZ ERROR HANDLING. */
1481 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1485 * There is also considerable subtlety in the race-free handling of
1486 * tx interrupts: all processing occurs with interrupts disabled to
1487 * prevent spurious refiring while transmit is in progress (which
1488 * could occur if the FIFO drains while sending -- quite likely); we
1489 * must not clear the event mask until after we've sent, also to
1490 * prevent spurious refiring; once we've cleared the event mask we can
1491 * reenable interrupts, but there is a possible race between clear and
1492 * enable, so we must recheck and potentially repeat the whole process
1493 * if it is detected.
1496 ATSE_TX_INTR_DISABLE(sc);
1497 sc->atse_watchdog_timer = 0;
1498 atse_start_locked(ifp);
1499 ATSE_TX_EVENT_CLEAR(sc);
1501 /* Disable interrupts if interface is down. */
1502 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1503 ATSE_TX_INTR_ENABLE(sc);
1504 } while (ATSE_TX_PENDING(sc) &&
1505 !(ATSE_TX_STATUS_READ(sc) & A_ONCHIP_FIFO_MEM_CORE_STATUS_FULL));
1509 #ifdef DEVICE_POLLING
1511 atse_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1513 struct atse_softc *sc;
1518 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1523 sc->atse_rx_cycles = count;
1524 rx_npkts = atse_rx_locked(sc);
1525 atse_start_locked(ifp);
1527 if (sc->atse_rx_cycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1530 rx = ATSE_RX_EVENT_READ(sc);
1531 tx = ATSE_TX_EVENT_READ(sc);
1533 if (rx & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1534 A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1535 /* XXX-BZ ERROR HANDLING. */
1536 atse_update_rx_err(sc, ((rx &
1537 A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >>
1538 A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
1539 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1541 if (tx & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1542 A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1543 /* XXX-BZ ERROR HANDLING. */
1544 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1546 if (ATSE_TX_READ_FILL_LEVEL(sc) == 0)
1547 sc->atse_watchdog_timer = 0;
1550 if (/* Severe error; if only we could find out. */) {
1551 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1552 atse_init_locked(sc);
1560 #endif /* DEVICE_POLLING */
1562 static struct atse_mac_stats_regs {
1564 const char *descr; /* Mostly copied from Altera datasheet. */
1565 } atse_mac_stats_regs[] = {
1567 { "aFramesTransmittedOK",
1568 "The number of frames that are successfully transmitted including "
1569 "the pause frames." },
1570 { "aFramesReceivedOK",
1571 "The number of frames that are successfully received including the "
1573 { "aFrameCheckSequenceErrors",
1574 "The number of receive frames with CRC error." },
1575 { "aAlignmentErrors",
1576 "The number of receive frames with alignment error." },
1577 { "aOctetsTransmittedOK",
1578 "The lower 32 bits of the number of data and padding octets that "
1579 "are successfully transmitted." },
1580 { "aOctetsReceivedOK",
1581 "The lower 32 bits of the number of data and padding octets that "
1582 " are successfully received." },
1583 { "aTxPAUSEMACCtrlFrames",
1584 "The number of pause frames transmitted." },
1585 { "aRxPAUSEMACCtrlFrames",
1586 "The number received pause frames received." },
1588 "The number of errored frames received." },
1590 "The number of transmit frames with either a FIFO overflow error, "
1591 "a FIFO underflow error, or a error defined by the user "
1594 "The number of valid unicast frames received." },
1595 { "ifInMulticastPkts",
1596 "The number of valid multicast frames received. The count does "
1597 "not include pause frames." },
1598 { "ifInBroadcastPkts",
1599 "The number of valid broadcast frames received." },
1601 "This statistics counter is not in use. The MAC function does not "
1602 "discard frames that are written to the FIFO buffer by the user "
1605 "The number of valid unicast frames transmitted." },
1606 { "ifOutMulticastPkts",
1607 "The number of valid multicast frames transmitted, excluding pause "
1609 { "ifOutBroadcastPkts",
1610 "The number of valid broadcast frames transmitted." },
1611 { "etherStatsDropEvents",
1612 "The number of frames that are dropped due to MAC internal errors "
1613 "when FIFO buffer overflow persists." },
1614 { "etherStatsOctets",
1615 "The lower 32 bits of the total number of octets received. This "
1616 "count includes both good and errored frames." },
1618 "The total number of good and errored frames received." },
1619 { "etherStatsUndersizePkts",
1620 "The number of frames received with length less than 64 bytes. "
1621 "This count does not include errored frames." },
1622 { "etherStatsOversizePkts",
1623 "The number of frames received that are longer than the value "
1624 "configured in the frm_length register. This count does not "
1625 "include errored frames." },
1626 { "etherStatsPkts64Octets",
1627 "The number of 64-byte frames received. This count includes good "
1628 "and errored frames." },
1629 { "etherStatsPkts65to127Octets",
1630 "The number of received good and errored frames between the length "
1631 "of 65 and 127 bytes." },
1632 { "etherStatsPkts128to255Octets",
1633 "The number of received good and errored frames between the length "
1634 "of 128 and 255 bytes." },
1635 { "etherStatsPkts256to511Octets",
1636 "The number of received good and errored frames between the length "
1637 "of 256 and 511 bytes." },
1638 { "etherStatsPkts512to1023Octets",
1639 "The number of received good and errored frames between the length "
1640 "of 512 and 1023 bytes." },
1641 { "etherStatsPkts1024to1518Octets",
1642 "The number of received good and errored frames between the length "
1643 "of 1024 and 1518 bytes." },
1644 { "etherStatsPkts1519toXOctets",
1645 "The number of received good and errored frames between the length "
1646 "of 1519 and the maximum frame length configured in the frm_length "
1648 { "etherStatsJabbers",
1649 "Too long frames with CRC error." },
1650 { "etherStatsFragments",
1651 "Too short frames with CRC error." },
1652 /* 0x39 unused, 0x3a/b non-stats. */
1654 /* Extended Statistics Counters */
1655 { "msb_aOctetsTransmittedOK",
1656 "Upper 32 bits of the number of data and padding octets that are "
1657 "successfully transmitted." },
1658 { "msb_aOctetsReceivedOK",
1659 "Upper 32 bits of the number of data and padding octets that are "
1660 "successfully received." },
1661 { "msb_etherStatsOctets",
1662 "Upper 32 bits of the total number of octets received. This count "
1663 "includes both good and errored frames." }
1667 sysctl_atse_mac_stats_proc(SYSCTL_HANDLER_ARGS)
1669 struct atse_softc *sc;
1670 int error, offset, s;
1675 s = CSR_READ_4(sc, offset);
1676 error = sysctl_handle_int(oidp, &s, 0, req);
1677 if (error || !req->newptr)
1683 static struct atse_rx_err_stats_regs {
1686 } atse_rx_err_stats_regs[] = {
1688 #define ATSE_RX_ERR_FIFO_THRES_EOP 0 /* FIFO threshold reached, on EOP. */
1689 #define ATSE_RX_ERR_ELEN 1 /* Frame/payload length not valid. */
1690 #define ATSE_RX_ERR_CRC32 2 /* CRC-32 error. */
1691 #define ATSE_RX_ERR_FIFO_THRES_TRUNC 3 /* FIFO thresh., truncated frame. */
1692 #define ATSE_RX_ERR_4 4 /* ? */
1693 #define ATSE_RX_ERR_5 5 /* / */
1695 { "rx_err_fifo_thres_eop",
1696 "FIFO threshold reached, reported on EOP." },
1697 { "rx_err_fifo_elen",
1698 "Frame or payload length not valid." },
1699 { "rx_err_fifo_crc32",
1701 { "rx_err_fifo_thres_trunc",
1702 "FIFO threshold reached, truncated frame" },
1710 sysctl_atse_rx_err_stats_proc(SYSCTL_HANDLER_ARGS)
1712 struct atse_softc *sc;
1713 int error, offset, s;
1718 s = sc->atse_rx_err[offset];
1719 error = sysctl_handle_int(oidp, &s, 0, req);
1720 if (error || !req->newptr)
1727 atse_sysctl_stats_attach(device_t dev)
1729 struct sysctl_ctx_list *sctx;
1730 struct sysctl_oid *soid;
1731 struct atse_softc *sc;
1734 sc = device_get_softc(dev);
1735 sctx = device_get_sysctl_ctx(dev);
1736 soid = device_get_sysctl_tree(dev);
1738 /* MAC statistics. */
1739 for (i = 0; i < nitems(atse_mac_stats_regs); i++) {
1740 if (atse_mac_stats_regs[i].name == NULL ||
1741 atse_mac_stats_regs[i].descr == NULL)
1744 SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1745 atse_mac_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
1746 sc, i, sysctl_atse_mac_stats_proc, "IU",
1747 atse_mac_stats_regs[i].descr);
1751 for (i = 0; i < ATSE_RX_ERR_MAX; i++) {
1752 if (atse_rx_err_stats_regs[i].name == NULL ||
1753 atse_rx_err_stats_regs[i].descr == NULL)
1756 SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1757 atse_rx_err_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
1758 sc, i, sysctl_atse_rx_err_stats_proc, "IU",
1759 atse_rx_err_stats_regs[i].descr);
1764 * Generic device handling routines.
1767 atse_attach(device_t dev)
1769 struct atse_softc *sc;
1773 sc = device_get_softc(dev);
1775 atse_ethernet_option_bits_read(dev);
1777 mtx_init(&sc->atse_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1780 callout_init_mtx(&sc->atse_tick, &sc->atse_mtx, 0);
1782 sc->atse_tx_buf = malloc(ETHER_MAX_LEN_JUMBO, M_DEVBUF, M_WAITOK);
1785 * We are only doing single-PHY with this driver currently. The
1786 * defaults would be right so that BASE_CFG_MDIO_ADDR0 points to the
1787 * 1st PHY address (0) apart from the fact that BMCR0 is always
1788 * the PCS mapping, so we always use BMCR1. See Table 5-1 0xA0-0xBF.
1790 #if 0 /* Always PCS. */
1791 sc->atse_bmcr0 = MDIO_0_START;
1792 CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR0, 0x00);
1794 /* Always use matching PHY for atse[0..]. */
1795 sc->atse_phy_addr = device_get_unit(dev);
1796 sc->atse_bmcr1 = MDIO_1_START;
1797 CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR1, sc->atse_phy_addr);
1799 /* Reset the adapter. */
1802 /* Setup interface. */
1803 ifp = sc->atse_ifp = if_alloc(IFT_ETHER);
1805 device_printf(dev, "if_alloc() failed\n");
1810 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1811 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1812 ifp->if_ioctl = atse_ioctl;
1813 ifp->if_start = atse_start;
1814 ifp->if_init = atse_init;
1815 IFQ_SET_MAXLEN(&ifp->if_snd, ATSE_TX_LIST_CNT - 1);
1816 ifp->if_snd.ifq_drv_maxlen = ATSE_TX_LIST_CNT - 1;
1817 IFQ_SET_READY(&ifp->if_snd);
1820 error = mii_attach(dev, &sc->atse_miibus, ifp, atse_ifmedia_upd,
1821 atse_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1823 device_printf(dev, "attaching PHY failed: %d\n", error);
1827 /* Call media-indepedent attach routine. */
1828 ether_ifattach(ifp, sc->atse_eth_addr);
1830 /* Tell the upper layer(s) about vlan mtu support. */
1831 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1832 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1833 ifp->if_capenable = ifp->if_capabilities;
1834 #ifdef DEVICE_POLLING
1835 /* We will enable polling by default if no irqs available. See below. */
1836 ifp->if_capabilities |= IFCAP_POLLING;
1839 /* Hook up interrupts. */
1840 if (sc->atse_rx_irq_res != NULL) {
1841 error = bus_setup_intr(dev, sc->atse_rx_irq_res, INTR_TYPE_NET |
1842 INTR_MPSAFE, NULL, atse_rx_intr, sc, &sc->atse_rx_intrhand);
1844 device_printf(dev, "enabling RX IRQ failed\n");
1845 ether_ifdetach(ifp);
1850 if (sc->atse_tx_irq_res != NULL) {
1851 error = bus_setup_intr(dev, sc->atse_tx_irq_res, INTR_TYPE_NET |
1852 INTR_MPSAFE, NULL, atse_tx_intr, sc, &sc->atse_tx_intrhand);
1854 bus_teardown_intr(dev, sc->atse_rx_irq_res,
1855 sc->atse_rx_intrhand);
1856 device_printf(dev, "enabling TX IRQ failed\n");
1857 ether_ifdetach(ifp);
1862 if ((ifp->if_capenable & IFCAP_POLLING) != 0 ||
1863 (sc->atse_rx_irq_res == NULL && sc->atse_tx_irq_res == NULL)) {
1864 #ifdef DEVICE_POLLING
1865 /* If not on and no IRQs force it on. */
1866 if (sc->atse_rx_irq_res == NULL && sc->atse_tx_irq_res == NULL){
1867 ifp->if_capenable |= IFCAP_POLLING;
1868 device_printf(dev, "forcing to polling due to no "
1871 error = ether_poll_register(atse_poll, ifp);
1875 device_printf(dev, "no DEVICE_POLLING in kernel and no IRQs\n");
1879 ATSE_RX_INTR_ENABLE(sc);
1880 ATSE_TX_INTR_ENABLE(sc);
1888 atse_sysctl_stats_attach(dev);
1894 atse_detach(device_t dev)
1896 struct atse_softc *sc;
1899 sc = device_get_softc(dev);
1900 KASSERT(mtx_initialized(&sc->atse_mtx), ("%s: mutex not initialized",
1901 device_get_nameunit(dev)));
1904 #ifdef DEVICE_POLLING
1905 if (ifp->if_capenable & IFCAP_POLLING)
1906 ether_poll_deregister(ifp);
1909 /* Only cleanup if attach succeeded. */
1910 if (device_is_attached(dev)) {
1912 atse_stop_locked(sc);
1914 callout_drain(&sc->atse_tick);
1915 ether_ifdetach(ifp);
1917 if (sc->atse_miibus != NULL)
1918 device_delete_child(dev, sc->atse_miibus);
1920 if (sc->atse_tx_intrhand)
1921 bus_teardown_intr(dev, sc->atse_tx_irq_res,
1922 sc->atse_tx_intrhand);
1923 if (sc->atse_rx_intrhand)
1924 bus_teardown_intr(dev, sc->atse_rx_irq_res,
1925 sc->atse_rx_intrhand);
1930 if (sc->atse_tx_buf != NULL)
1931 free(sc->atse_tx_buf, M_DEVBUF);
1933 mtx_destroy(&sc->atse_mtx);
1938 /* Shared between nexus and fdt implementation. */
1940 atse_detach_resources(device_t dev)
1942 struct atse_softc *sc;
1944 sc = device_get_softc(dev);
1946 if (sc->atse_txc_mem_res != NULL) {
1947 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_txc_mem_rid,
1948 sc->atse_txc_mem_res);
1949 sc->atse_txc_mem_res = NULL;
1951 if (sc->atse_tx_mem_res != NULL) {
1952 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_tx_mem_rid,
1953 sc->atse_tx_mem_res);
1954 sc->atse_tx_mem_res = NULL;
1956 if (sc->atse_tx_irq_res != NULL) {
1957 bus_release_resource(dev, SYS_RES_IRQ, sc->atse_tx_irq_rid,
1958 sc->atse_tx_irq_res);
1959 sc->atse_tx_irq_res = NULL;
1961 if (sc->atse_rxc_mem_res != NULL) {
1962 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_rxc_mem_rid,
1963 sc->atse_rxc_mem_res);
1964 sc->atse_rxc_mem_res = NULL;
1966 if (sc->atse_rx_mem_res != NULL) {
1967 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_rx_mem_rid,
1968 sc->atse_rx_mem_res);
1969 sc->atse_rx_mem_res = NULL;
1971 if (sc->atse_rx_irq_res != NULL) {
1972 bus_release_resource(dev, SYS_RES_IRQ, sc->atse_rx_irq_rid,
1973 sc->atse_rx_irq_res);
1974 sc->atse_rx_irq_res = NULL;
1976 if (sc->atse_mem_res != NULL) {
1977 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_mem_rid,
1979 sc->atse_mem_res = NULL;
1984 atse_detach_dev(device_t dev)
1988 error = atse_detach(dev);
1990 /* We are basically in undefined state now. */
1991 device_printf(dev, "atse_detach() failed: %d\n", error);
1995 atse_detach_resources(dev);
2001 atse_miibus_readreg(device_t dev, int phy, int reg)
2003 struct atse_softc *sc;
2005 sc = device_get_softc(dev);
2008 * We currently do not support re-mapping of MDIO space on-the-fly
2009 * but de-facto hard-code the phy#.
2011 if (phy != sc->atse_phy_addr)
2014 return (PHY_READ_2(sc, reg));
2018 atse_miibus_writereg(device_t dev, int phy, int reg, int data)
2020 struct atse_softc *sc;
2022 sc = device_get_softc(dev);
2025 * We currently do not support re-mapping of MDIO space on-the-fly
2026 * but de-facto hard-code the phy#.
2028 if (phy != sc->atse_phy_addr)
2031 PHY_WRITE_2(sc, reg, data);
2036 atse_miibus_statchg(device_t dev)
2038 struct atse_softc *sc;
2039 struct mii_data *mii;
2043 sc = device_get_softc(dev);
2044 ATSE_LOCK_ASSERT(sc);
2046 mii = device_get_softc(sc->atse_miibus);
2048 if (mii == NULL || ifp == NULL ||
2049 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2052 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
2054 /* Assume no link. */
2055 sc->atse_flags &= ~ATSE_FLAGS_LINK;
2057 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2058 (IFM_ACTIVE | IFM_AVALID)) {
2060 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2062 val4 |= BASE_CFG_COMMAND_CONFIG_ENA_10;
2063 val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
2064 sc->atse_flags |= ATSE_FLAGS_LINK;
2067 val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
2068 val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
2069 sc->atse_flags |= ATSE_FLAGS_LINK;
2072 val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
2073 val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
2074 sc->atse_flags |= ATSE_FLAGS_LINK;
2081 if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
2082 /* XXX-BZ need to stop the MAC? */
2086 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0)
2087 val4 &= ~BASE_CFG_COMMAND_CONFIG_HD_ENA;
2089 val4 |= BASE_CFG_COMMAND_CONFIG_HD_ENA;
2090 /* XXX-BZ flow control? */
2092 /* Make sure the MAC is activated. */
2093 val4 |= BASE_CFG_COMMAND_CONFIG_TX_ENA;
2094 val4 |= BASE_CFG_COMMAND_CONFIG_RX_ENA;
2096 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);