2 * Copyright (c) 2012, 2013 Bjoern A. Zeeb
3 * Copyright (c) 2014 Robert N. M. Watson
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
8 * ("MRC2"), as part of the DARPA MRC research programme.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Altera Triple-Speed Ethernet MegaCore, Function User Guide
33 * UG-01008-3.0, Software Version: 12.0, June 2012.
34 * Available at the time of writing at:
35 * http://www.altera.com/literature/ug/ug_ethernet.pdf
37 * We are using an Marvell E1111 (Alaska) PHY on the DE4. See mii/e1000phy.c.
41 * - ifOutBroadcastPkts are only counted if both ether dst and src are all-1s;
42 * seems an IP core bug, they count ether broadcasts as multicast. Is this
44 * - figure out why the TX FIFO fill status and intr did not work as expected.
45 * - test 100Mbit/s and 10Mbit/s
46 * - blacklist the one special factory programmed ethernet address (for now
47 * hardcoded, later from loader?)
48 * - resolve all XXX, left as reminders to shake out details later
49 * - Jumbo frame support
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
55 #include "opt_device_polling.h"
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/kernel.h>
61 #include <sys/endian.h>
64 #include <sys/module.h>
65 #include <sys/mutex.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/types.h>
71 #include <net/ethernet.h>
73 #include <net/if_var.h>
74 #include <net/if_dl.h>
75 #include <net/if_media.h>
76 #include <net/if_types.h>
77 #include <net/if_vlan_var.h>
81 #include <machine/bus.h>
82 #include <machine/resource.h>
85 #include <dev/mii/mii.h>
86 #include <dev/mii/miivar.h>
88 #include <dev/altera/atse/if_atsereg.h>
89 #include <dev/altera/atse/a_api.h>
91 MODULE_DEPEND(atse, ether, 1, 1, 1);
92 MODULE_DEPEND(atse, miibus, 1, 1, 1);
95 #define ATSE_WATCHDOG_TIME 5
98 static poll_handler_t atse_poll;
101 /* XXX once we'd do parallel attach, we need a global lock for this. */
102 #define ATSE_ETHERNET_OPTION_BITS_UNDEF 0
103 #define ATSE_ETHERNET_OPTION_BITS_READ 1
104 static int atse_ethernet_option_bits_flag = ATSE_ETHERNET_OPTION_BITS_UNDEF;
105 static uint8_t atse_ethernet_option_bits[ALTERA_ETHERNET_OPTION_BITS_LEN];
107 static int atse_intr_debug_enable = 0;
108 SYSCTL_INT(_debug, OID_AUTO, atse_intr_debug_enable, CTLFLAG_RW,
109 &atse_intr_debug_enable, 0,
110 "Extra debugging output for atse interrupts");
113 * Softc and critical resource locking.
115 #define ATSE_LOCK(_sc) mtx_lock(&(_sc)->atse_mtx)
116 #define ATSE_UNLOCK(_sc) mtx_unlock(&(_sc)->atse_mtx)
117 #define ATSE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->atse_mtx, MA_OWNED)
119 #define ATSE_TX_PENDING(sc) (sc->atse_tx_m != NULL || \
120 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
123 #define DPRINTF(format, ...) printf(format, __VA_ARGS__)
125 #define DPRINTF(format, ...)
128 /* a_api.c functions; factor out? */
130 a_onchip_fifo_mem_core_write(struct resource *res, uint32_t off,
131 uint32_t val4, const char *desc, const char *f, const int l)
134 val4 = htole32(val4);
135 DPRINTF("[%s:%d] FIFOW %s 0x%08x = 0x%08x\n", f, l, desc, off, val4);
136 bus_write_4(res, off, val4);
139 static inline uint32_t
140 a_onchip_fifo_mem_core_read(struct resource *res, uint32_t off,
141 const char *desc, const char *f, const int l)
145 val4 = le32toh(bus_read_4(res, off));
146 DPRINTF("[%s:%d] FIFOR %s 0x%08x = 0x%08x\n", f, l, desc, off, val4);
151 /* The FIFO does an endian conversion, so we must not do it as well. */
152 /* XXX-BZ in fact we should do a htobe32 so le would be fine as well? */
153 #define ATSE_TX_DATA_WRITE(sc, val4) \
154 bus_write_4((sc)->atse_tx_mem_res, A_ONCHIP_FIFO_MEM_CORE_DATA, val4)
156 #define ATSE_TX_META_WRITE(sc, val4) \
157 a_onchip_fifo_mem_core_write((sc)->atse_tx_mem_res, \
158 A_ONCHIP_FIFO_MEM_CORE_METADATA, \
159 (val4), "TXM", __func__, __LINE__)
160 #define ATSE_TX_META_READ(sc) \
161 a_onchip_fifo_mem_core_read((sc)->atse_tx_mem_res, \
162 A_ONCHIP_FIFO_MEM_CORE_METADATA, \
163 "TXM", __func__, __LINE__)
165 #define ATSE_TX_READ_FILL_LEVEL(sc) \
166 a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res, \
167 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_FILL_LEVEL, \
168 "TX_FILL", __func__, __LINE__)
169 #define ATSE_RX_READ_FILL_LEVEL(sc) \
170 a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res, \
171 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_FILL_LEVEL, \
172 "RX_FILL", __func__, __LINE__)
174 /* The FIFO does an endian conversion, so we must not do it as well. */
175 /* XXX-BZ in fact we should do a htobe32 so le would be fine as well? */
176 #define ATSE_RX_DATA_READ(sc) \
177 bus_read_4((sc)->atse_rx_mem_res, A_ONCHIP_FIFO_MEM_CORE_DATA)
178 #define ATSE_RX_META_READ(sc) \
179 a_onchip_fifo_mem_core_read((sc)->atse_rx_mem_res, \
180 A_ONCHIP_FIFO_MEM_CORE_METADATA, \
181 "RXM", __func__, __LINE__)
183 #define ATSE_RX_STATUS_READ(sc) \
184 a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res, \
185 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_I_STATUS, \
186 "RX_EVENT", __func__, __LINE__)
188 #define ATSE_TX_STATUS_READ(sc) \
189 a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res, \
190 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_I_STATUS, \
191 "TX_EVENT", __func__, __LINE__)
193 #define ATSE_RX_EVENT_READ(sc) \
194 a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res, \
195 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
196 "RX_EVENT", __func__, __LINE__)
198 #define ATSE_TX_EVENT_READ(sc) \
199 a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res, \
200 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
201 "TX_EVENT", __func__, __LINE__)
203 #define ATSE_RX_EVENT_CLEAR(sc) \
207 val4 = a_onchip_fifo_mem_core_read( \
208 (sc)->atse_rxc_mem_res, \
209 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
210 "RX_EVENT", __func__, __LINE__); \
212 a_onchip_fifo_mem_core_write( \
213 (sc)->atse_rxc_mem_res, \
214 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
215 val4, "RX_EVENT", __func__, __LINE__); \
217 #define ATSE_TX_EVENT_CLEAR(sc) \
221 val4 = a_onchip_fifo_mem_core_read( \
222 (sc)->atse_txc_mem_res, \
223 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
224 "TX_EVENT", __func__, __LINE__); \
226 a_onchip_fifo_mem_core_write( \
227 (sc)->atse_txc_mem_res, \
228 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, \
229 val4, "TX_EVENT", __func__, __LINE__); \
232 #define ATSE_RX_EVENTS (A_ONCHIP_FIFO_MEM_CORE_INTR_FULL | \
233 A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW | \
234 A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
235 #define ATSE_RX_INTR_ENABLE(sc) \
236 a_onchip_fifo_mem_core_write((sc)->atse_rxc_mem_res, \
237 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, \
239 "RX_INTR", __func__, __LINE__) /* XXX-BZ review later. */
240 #define ATSE_RX_INTR_DISABLE(sc) \
241 a_onchip_fifo_mem_core_write((sc)->atse_rxc_mem_res, \
242 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, 0, \
243 "RX_INTR", __func__, __LINE__)
244 #define ATSE_RX_INTR_READ(sc) \
245 a_onchip_fifo_mem_core_read((sc)->atse_rxc_mem_res, \
246 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, \
247 "RX_INTR", __func__, __LINE__)
249 #define ATSE_TX_EVENTS (A_ONCHIP_FIFO_MEM_CORE_INTR_EMPTY | \
250 A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW | \
251 A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
252 #define ATSE_TX_INTR_ENABLE(sc) \
253 a_onchip_fifo_mem_core_write((sc)->atse_txc_mem_res, \
254 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, \
256 "TX_INTR", __func__, __LINE__) /* XXX-BZ review later. */
257 #define ATSE_TX_INTR_DISABLE(sc) \
258 a_onchip_fifo_mem_core_write((sc)->atse_txc_mem_res, \
259 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, 0, \
260 "TX_INTR", __func__, __LINE__)
261 #define ATSE_TX_INTR_READ(sc) \
262 a_onchip_fifo_mem_core_read((sc)->atse_txc_mem_res, \
263 A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE, \
264 "TX_INTR", __func__, __LINE__)
266 static int atse_rx_locked(struct atse_softc *sc);
269 * Register space access macros.
272 csr_write_4(struct atse_softc *sc, uint32_t reg, uint32_t val4,
273 const char *f, const int l)
276 val4 = htole32(val4);
277 DPRINTF("[%s:%d] CSR W %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
278 "atse_mem_res", reg, reg * 4, val4);
279 bus_write_4(sc->atse_mem_res, reg * 4, val4);
282 static inline uint32_t
283 csr_read_4(struct atse_softc *sc, uint32_t reg, const char *f, const int l)
287 val4 = le32toh(bus_read_4(sc->atse_mem_res, reg * 4));
288 DPRINTF("[%s:%d] CSR R %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
289 "atse_mem_res", reg, reg * 4, val4);
295 * See page 5-2 that it's all dword offsets and the MS 16 bits need to be zero
296 * on write and ignored on read.
299 pxx_write_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, uint16_t val,
300 const char *f, const int l, const char *s)
304 val4 = htole32(val & 0x0000ffff);
305 DPRINTF("[%s:%d] %s W %s 0x%08x (0x%08jx) = 0x%08x\n", f, l, s,
306 "atse_mem_res", reg, (bmcr + reg) * 4, val4);
307 bus_write_4(sc->atse_mem_res, (bmcr + reg) * 4, val4);
310 static inline uint16_t
311 pxx_read_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, const char *f,
312 const int l, const char *s)
317 val4 = bus_read_4(sc->atse_mem_res, (bmcr + reg) * 4);
318 val = le32toh(val4) & 0x0000ffff;
319 DPRINTF("[%s:%d] %s R %s 0x%08x (0x%08jx) = 0x%04x\n", f, l, s,
320 "atse_mem_res", reg, (bmcr + reg) * 4, val);
325 #define CSR_WRITE_4(sc, reg, val) \
326 csr_write_4((sc), (reg), (val), __func__, __LINE__)
327 #define CSR_READ_4(sc, reg) \
328 csr_read_4((sc), (reg), __func__, __LINE__)
329 #define PCS_WRITE_2(sc, reg, val) \
330 pxx_write_2((sc), sc->atse_bmcr0, (reg), (val), __func__, __LINE__, \
332 #define PCS_READ_2(sc, reg) \
333 pxx_read_2((sc), sc->atse_bmcr0, (reg), __func__, __LINE__, "PCS")
334 #define PHY_WRITE_2(sc, reg, val) \
335 pxx_write_2((sc), sc->atse_bmcr1, (reg), (val), __func__, __LINE__, \
337 #define PHY_READ_2(sc, reg) \
338 pxx_read_2((sc), sc->atse_bmcr1, (reg), __func__, __LINE__, "PHY")
340 static void atse_tick(void *);
341 static int atse_detach(device_t);
343 devclass_t atse_devclass;
346 atse_tx_locked(struct atse_softc *sc, int *sent)
349 uint32_t val4, fill_level;
353 ATSE_LOCK_ASSERT(sc);
356 KASSERT(m != NULL, ("%s: m is null: sc=%p", __func__, sc));
357 KASSERT(m->m_flags & M_PKTHDR, ("%s: not a pkthdr: m=%p", __func__, m));
360 * Copy to buffer to minimize our pain as we can only store
361 * double words which, after the first mbuf gets out of alignment
364 if (sc->atse_tx_m_offset == 0) {
365 m_copydata(m, 0, m->m_pkthdr.len, sc->atse_tx_buf);
366 sc->atse_tx_buf_len = m->m_pkthdr.len;
369 fill_level = ATSE_TX_READ_FILL_LEVEL(sc);
370 #if 0 /* Returns 0xdeadc0de. */
371 val4 = ATSE_TX_META_READ(sc);
373 if (sc->atse_tx_m_offset == 0) {
374 /* Write start of packet. */
375 val4 = A_ONCHIP_FIFO_MEM_CORE_SOP;
376 val4 &= ~A_ONCHIP_FIFO_MEM_CORE_EOP;
377 ATSE_TX_META_WRITE(sc, val4);
380 /* TX FIFO is single clock mode, so we have the full FIFO. */
382 while ((sc->atse_tx_buf_len - sc->atse_tx_m_offset) > 4 &&
383 fill_level < AVALON_FIFO_TX_BASIC_OPTS_DEPTH) {
385 bcopy(&sc->atse_tx_buf[sc->atse_tx_m_offset], &val4,
387 ATSE_TX_DATA_WRITE(sc, val4);
388 sc->atse_tx_m_offset += sizeof(val4);
392 if (fill_level == AVALON_FIFO_TX_BASIC_OPTS_DEPTH)
393 fill_level = ATSE_TX_READ_FILL_LEVEL(sc);
398 /* Set EOP *before* writing the last symbol. */
399 if (sc->atse_tx_m_offset >= (sc->atse_tx_buf_len - 4) &&
400 fill_level < AVALON_FIFO_TX_BASIC_OPTS_DEPTH) {
402 /* Set EndOfPacket. */
403 val4 = A_ONCHIP_FIFO_MEM_CORE_EOP;
406 leftm = sc->atse_tx_buf_len - sc->atse_tx_m_offset;
407 val4 |= ((4 - leftm) << A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT);
408 ATSE_TX_META_WRITE(sc, val4);
410 /* Write last symbol. */
412 bcopy(sc->atse_tx_buf + sc->atse_tx_m_offset, &val4, leftm);
413 ATSE_TX_DATA_WRITE(sc, val4);
418 /* OK, the packet is gone. */
419 sc->atse_tx_m = NULL;
420 sc->atse_tx_m_offset = 0;
422 /* If anyone is interested give them a copy. */
423 BPF_MTAP(sc->atse_ifp, m);
433 atse_start_locked(struct ifnet *ifp)
435 struct atse_softc *sc;
439 ATSE_LOCK_ASSERT(sc);
441 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
442 IFF_DRV_RUNNING || (sc->atse_flags & ATSE_FLAGS_LINK) == 0)
447 * Disable the watchdog while sending, we are batching packets.
448 * Though we should never reach 5 seconds, and are holding the lock,
451 sc->atse_watchdog_timer = 0;
454 if (sc->atse_tx_m != NULL) {
455 error = atse_tx_locked(sc, &sent);
459 /* We have more space to send so continue ... */
460 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
462 IFQ_DRV_DEQUEUE(&ifp->if_snd, sc->atse_tx_m);
463 sc->atse_tx_m_offset = 0;
464 if (sc->atse_tx_m == NULL)
466 error = atse_tx_locked(sc, &sent);
472 /* If the IP core walks into Nekromanteion try to bail out. */
474 sc->atse_watchdog_timer = ATSE_WATCHDOG_TIME;
478 atse_start(struct ifnet *ifp)
480 struct atse_softc *sc;
484 atse_start_locked(ifp);
489 atse_stop_locked(struct atse_softc *sc)
495 ATSE_LOCK_ASSERT(sc);
497 sc->atse_watchdog_timer = 0;
498 callout_stop(&sc->atse_tick);
501 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
502 ATSE_RX_INTR_DISABLE(sc);
503 ATSE_TX_INTR_DISABLE(sc);
504 ATSE_RX_EVENT_CLEAR(sc);
505 ATSE_TX_EVENT_CLEAR(sc);
507 /* Disable MAC transmit and receive datapath. */
508 mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
509 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
511 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
512 /* Wait for bits to be cleared; i=100 is excessive. */
513 for (i = 0; i < 100; i++) {
514 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
515 if ((val4 & mask) == 0)
519 if ((val4 & mask) != 0)
520 device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
523 sc->atse_flags &= ~ATSE_FLAGS_LINK;
525 /* XXX-BZ free the RX/TX rings. */
531 atse_mchash(struct atse_softc *sc __unused, const uint8_t *addr)
537 for (i = 0; i < ETHER_ADDR_LEN; i++) {
539 for (j = 1; j < 8; j++)
540 y ^= (addr[i] >> j) & 0x01;
548 atse_rxfilter_locked(struct atse_softc *sc)
550 struct ifmultiaddr *ifma;
555 /* XXX-BZ can we find out if we have the MHASH synthesized? */
556 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
557 /* For simplicity always hash full 48 bits of addresses. */
558 if ((val4 & BASE_CFG_COMMAND_CONFIG_MHASH_SEL) != 0)
559 val4 &= ~BASE_CFG_COMMAND_CONFIG_MHASH_SEL;
562 if (ifp->if_flags & IFF_PROMISC)
563 val4 |= BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
565 val4 &= ~BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
567 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
569 if (ifp->if_flags & IFF_ALLMULTI) {
570 /* Accept all multicast addresses. */
571 for (i = 0; i <= MHASH_LEN; i++)
572 CSR_WRITE_4(sc, MHASH_START + i, 0x1);
575 * Can hold MHASH_LEN entries.
576 * XXX-BZ bitstring.h would be more general.
582 * Re-build and re-program hash table. First build the
583 * bit-field "yes" or "no" for each slot per address, then
584 * do all the programming afterwards.
587 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
588 if (ifma->ifma_addr->sa_family != AF_LINK)
591 h |= (1 << atse_mchash(sc,
592 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)));
594 if_maddr_runlock(ifp);
595 for (i = 0; i <= MHASH_LEN; i++)
596 CSR_WRITE_4(sc, MHASH_START + i,
597 (h & (1 << i)) ? 0x01 : 0x00);
604 atse_ethernet_option_bits_read_fdt(device_t dev)
606 struct resource *res;
610 if (atse_ethernet_option_bits_flag & ATSE_ETHERNET_OPTION_BITS_READ)
613 fdev = device_find_child(device_get_parent(dev), "cfi", 0);
618 res = bus_alloc_resource_any(fdev, SYS_RES_MEMORY, &rid,
619 RF_ACTIVE | RF_SHAREABLE);
623 for (i = 0; i < ALTERA_ETHERNET_OPTION_BITS_LEN; i++)
624 atse_ethernet_option_bits[i] = bus_read_1(res,
625 ALTERA_ETHERNET_OPTION_BITS_OFF + i);
627 bus_release_resource(fdev, SYS_RES_MEMORY, rid, res);
628 atse_ethernet_option_bits_flag |= ATSE_ETHERNET_OPTION_BITS_READ;
634 atse_ethernet_option_bits_read(device_t dev)
638 error = atse_ethernet_option_bits_read_fdt(dev);
642 device_printf(dev, "Cannot read Ethernet addresses from flash.\n");
648 atse_get_eth_address(struct atse_softc *sc)
650 unsigned long hostid;
655 * Make sure to only ever do this once. Otherwise a reset would
656 * possibly change our ethernet address, which is not good at all.
658 if (sc->atse_eth_addr[0] != 0x00 || sc->atse_eth_addr[1] != 0x00 ||
659 sc->atse_eth_addr[2] != 0x00)
662 if ((atse_ethernet_option_bits_flag &
663 ATSE_ETHERNET_OPTION_BITS_READ) == 0)
666 val4 = atse_ethernet_option_bits[0] << 24;
667 val4 |= atse_ethernet_option_bits[1] << 16;
668 val4 |= atse_ethernet_option_bits[2] << 8;
669 val4 |= atse_ethernet_option_bits[3];
670 /* They chose "safe". */
671 if (val4 != le32toh(0x00005afe)) {
672 device_printf(sc->atse_dev, "Magic '5afe' is not safe: 0x%08x. "
673 "Falling back to random numbers for hardware address.\n",
678 sc->atse_eth_addr[0] = atse_ethernet_option_bits[4];
679 sc->atse_eth_addr[1] = atse_ethernet_option_bits[5];
680 sc->atse_eth_addr[2] = atse_ethernet_option_bits[6];
681 sc->atse_eth_addr[3] = atse_ethernet_option_bits[7];
682 sc->atse_eth_addr[4] = atse_ethernet_option_bits[8];
683 sc->atse_eth_addr[5] = atse_ethernet_option_bits[9];
685 /* Handle factory default ethernet addresss: 00:07:ed:ff:ed:15 */
686 if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x07 &&
687 sc->atse_eth_addr[2] == 0xed && sc->atse_eth_addr[3] == 0xff &&
688 sc->atse_eth_addr[4] == 0xed && sc->atse_eth_addr[5] == 0x15) {
690 device_printf(sc->atse_dev, "Factory programmed Ethernet "
691 "hardware address blacklisted. Falling back to random "
692 "address to avoid collisions.\n");
693 device_printf(sc->atse_dev, "Please re-program your flash.\n");
697 if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x00 &&
698 sc->atse_eth_addr[2] == 0x00 && sc->atse_eth_addr[3] == 0x00 &&
699 sc->atse_eth_addr[4] == 0x00 && sc->atse_eth_addr[5] == 0x00) {
700 device_printf(sc->atse_dev, "All zero's Ethernet hardware "
701 "address blacklisted. Falling back to random address.\n");
702 device_printf(sc->atse_dev, "Please re-program your flash.\n");
706 if (ETHER_IS_MULTICAST(sc->atse_eth_addr)) {
707 device_printf(sc->atse_dev, "Multicast Ethernet hardware "
708 "address blacklisted. Falling back to random address.\n");
709 device_printf(sc->atse_dev, "Please re-program your flash.\n");
714 * If we find an Altera prefixed address with a 0x0 ending
715 * adjust by device unit. If not and this is not the first
716 * Ethernet, go to random.
718 unit = device_get_unit(sc->atse_dev);
723 device_printf(sc->atse_dev, "We do not support Ethernet "
724 "addresses for more than 16 MACs. Falling back to "
725 "random hadware address.\n");
728 if ((sc->atse_eth_addr[0] & ~0x2) != 0 ||
729 sc->atse_eth_addr[1] != 0x07 || sc->atse_eth_addr[2] != 0xed ||
730 (sc->atse_eth_addr[5] & 0x0f) != 0x0) {
731 device_printf(sc->atse_dev, "Ethernet address not meeting our "
732 "multi-MAC standards. Falling back to random hadware "
736 sc->atse_eth_addr[5] |= (unit & 0x0f);
742 * Fall back to random code we also use on bridge(4).
744 getcredhostid(curthread->td_ucred, &hostid);
746 arc4rand(sc->atse_eth_addr, ETHER_ADDR_LEN, 1);
747 sc->atse_eth_addr[0] &= ~1;/* clear multicast bit */
748 sc->atse_eth_addr[0] |= 2; /* set the LAA bit */
750 sc->atse_eth_addr[0] = 0x2;
751 sc->atse_eth_addr[1] = (hostid >> 24) & 0xff;
752 sc->atse_eth_addr[2] = (hostid >> 16) & 0xff;
753 sc->atse_eth_addr[3] = (hostid >> 8 ) & 0xff;
754 sc->atse_eth_addr[4] = hostid & 0xff;
755 sc->atse_eth_addr[5] = sc->atse_unit & 0xff;
762 atse_set_eth_address(struct atse_softc *sc, int n)
766 v0 = (sc->atse_eth_addr[3] << 24) | (sc->atse_eth_addr[2] << 16) |
767 (sc->atse_eth_addr[1] << 8) | sc->atse_eth_addr[0];
768 v1 = (sc->atse_eth_addr[5] << 8) | sc->atse_eth_addr[4];
770 if (n & ATSE_ETH_ADDR_DEF) {
771 CSR_WRITE_4(sc, BASE_CFG_MAC_0, v0);
772 CSR_WRITE_4(sc, BASE_CFG_MAC_1, v1);
774 if (n & ATSE_ETH_ADDR_SUPP1) {
775 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_0, v0);
776 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_1, v1);
778 if (n & ATSE_ETH_ADDR_SUPP2) {
779 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_0, v0);
780 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_1, v1);
782 if (n & ATSE_ETH_ADDR_SUPP3) {
783 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_0, v0);
784 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_1, v1);
786 if (n & ATSE_ETH_ADDR_SUPP4) {
787 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_0, v0);
788 CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_1, v1);
795 atse_reset(struct atse_softc *sc)
801 /* 1. External PHY Initialization using MDIO. */
803 * We select the right MDIO space in atse_attach() and let MII do
807 /* 2. PCS Configuration Register Initialization. */
808 /* a. Set auto negotiation link timer to 1.6ms for SGMII. */
809 PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_0, 0x0D40);
810 PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_1, 0x0003);
812 /* b. Configure SGMII. */
813 val = PCS_EXT_IF_MODE_SGMII_ENA|PCS_EXT_IF_MODE_USE_SGMII_AN;
814 PCS_WRITE_2(sc, PCS_EXT_IF_MODE, val);
816 /* c. Enable auto negotiation. */
817 /* Ignore Bits 6,8,13; should be set,set,unset. */
818 val = PCS_READ_2(sc, PCS_CONTROL);
819 val &= ~(PCS_CONTROL_ISOLATE|PCS_CONTROL_POWERDOWN);
820 val &= ~PCS_CONTROL_LOOPBACK; /* Make this a -link1 option? */
821 val |= PCS_CONTROL_AUTO_NEGOTIATION_ENABLE;
822 PCS_WRITE_2(sc, PCS_CONTROL, val);
825 val = PCS_READ_2(sc, PCS_CONTROL);
826 val |= PCS_CONTROL_RESET;
827 PCS_WRITE_2(sc, PCS_CONTROL, val);
829 /* Wait for reset bit to clear; i=100 is excessive. */
830 for (i = 0; i < 100; i++) {
831 val = PCS_READ_2(sc, PCS_CONTROL);
832 if ((val & PCS_CONTROL_RESET) == 0)
837 if ((val & PCS_CONTROL_RESET) != 0) {
838 device_printf(sc->atse_dev, "PCS reset timed out.\n");
842 /* 3. MAC Configuration Register Initialization. */
843 /* a. Disable MAC transmit and receive datapath. */
844 mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
845 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
847 /* Samples in the manual do have the SW_RESET bit set here, why? */
848 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
849 /* Wait for bits to be cleared; i=100 is excessive. */
850 for (i = 0; i < 100; i++) {
851 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
852 if ((val4 & mask) == 0)
856 if ((val4 & mask) != 0) {
857 device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
860 /* b. MAC FIFO configuration. */
861 CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_EMPTY, FIFO_DEPTH_TX - 16);
862 CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_FULL, 3);
863 CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_EMPTY, 8);
864 CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_EMPTY, FIFO_DEPTH_RX - 16);
865 CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_FULL, 8);
866 CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_EMPTY, 8);
868 CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 16);
869 CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 16);
871 /* For store-and-forward mode, set this threshold to 0. */
872 CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 0);
873 CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 0);
875 /* c. MAC address configuration. */
876 /* Also intialize supplementary addresses to our primary one. */
877 /* XXX-BZ FreeBSD really needs to grow and API for using these. */
878 atse_get_eth_address(sc);
879 atse_set_eth_address(sc, ATSE_ETH_ADDR_ALL);
881 /* d. MAC function configuration. */
882 CSR_WRITE_4(sc, BASE_CFG_FRM_LENGTH, 1518); /* Default. */
883 CSR_WRITE_4(sc, BASE_CFG_TX_IPG_LENGTH, 12);
884 CSR_WRITE_4(sc, BASE_CFG_PAUSE_QUANT, 0xFFFF);
886 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
888 * If 1000BASE-X/SGMII PCS is initialized, set the ETH_SPEED (bit 3)
889 * and ENA_10 (bit 25) in command_config register to 0. If half duplex
890 * is reported in the PHY/PCS status register, set the HD_ENA (bit 10)
891 * to 1 in command_config register.
892 * BZ: We shoot for 1000 instead.
895 val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
897 val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
899 val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
902 * We do not want to set this, otherwise, we could not even send
903 * random raw ethernet frames for various other research. By default
904 * FreeBSD will use the right ether source address.
906 val4 |= BASE_CFG_COMMAND_CONFIG_TX_ADDR_INS;
908 val4 |= BASE_CFG_COMMAND_CONFIG_PAD_EN;
909 val4 &= ~BASE_CFG_COMMAND_CONFIG_CRC_FWD;
911 val4 |= BASE_CFG_COMMAND_CONFIG_CNTL_FRM_ENA;
914 val4 |= BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC;
916 val &= ~BASE_CFG_COMMAND_CONFIG_LOOP_ENA; /* link0? */
917 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
920 * Make sure we do not enable 32bit alignment; FreeBSD cannot
921 * cope with the additional padding (though we should!?).
922 * Also make sure we get the CRC appended.
924 val4 = CSR_READ_4(sc, TX_CMD_STAT);
925 val4 &= ~(TX_CMD_STAT_OMIT_CRC|TX_CMD_STAT_TX_SHIFT16);
926 CSR_WRITE_4(sc, TX_CMD_STAT, val4);
927 val4 = CSR_READ_4(sc, RX_CMD_STAT);
928 val4 &= ~RX_CMD_STAT_RX_SHIFT16;
929 CSR_WRITE_4(sc, RX_CMD_STAT, val4);
932 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
933 val4 |= BASE_CFG_COMMAND_CONFIG_SW_RESET;
934 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
935 /* Wait for bits to be cleared; i=100 is excessive. */
936 for (i = 0; i < 100; i++) {
937 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
938 if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) == 0)
942 if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) != 0) {
943 device_printf(sc->atse_dev, "MAC reset timed out.\n");
947 /* f. Enable MAC transmit and receive datapath. */
948 mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
949 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
951 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
952 /* Wait for bits to be cleared; i=100 is excessive. */
953 for (i = 0; i < 100; i++) {
954 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
955 if ((val4 & mask) == mask)
959 if ((val4 & mask) != mask) {
960 device_printf(sc->atse_dev, "Enabling MAC TX/RX timed out.\n");
968 atse_init_locked(struct atse_softc *sc)
971 struct mii_data *mii;
974 ATSE_LOCK_ASSERT(sc);
977 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
981 * Must update the ether address if changed. Given we do not handle
982 * in atse_ioctl() but it's in the general framework, just always
983 * do it here before atse_reset().
985 eaddr = IF_LLADDR(sc->atse_ifp);
986 bcopy(eaddr, &sc->atse_eth_addr, ETHER_ADDR_LEN);
988 /* Make things frind to halt, cleanup, ... */
989 atse_stop_locked(sc);
993 /* ... and fire up the engine again. */
994 atse_rxfilter_locked(sc);
996 /* Memory rings? DMA engine? */
998 sc->atse_rx_buf_len = 0;
999 sc->atse_flags &= ATSE_FLAGS_LINK; /* Preserve. */
1001 #ifdef DEVICE_POLLING
1002 /* Only enable interrupts if we are not polling. */
1003 if (ifp->if_capenable & IFCAP_POLLING) {
1004 ATSE_RX_INTR_DISABLE(sc);
1005 ATSE_TX_INTR_DISABLE(sc);
1006 ATSE_RX_EVENT_CLEAR(sc);
1007 ATSE_TX_EVENT_CLEAR(sc);
1011 ATSE_RX_INTR_ENABLE(sc);
1012 ATSE_TX_INTR_ENABLE(sc);
1015 mii = device_get_softc(sc->atse_miibus);
1017 sc->atse_flags &= ~ATSE_FLAGS_LINK;
1020 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1021 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1023 callout_reset(&sc->atse_tick, hz, atse_tick, sc);
1027 atse_init(void *xsc)
1029 struct atse_softc *sc;
1032 * XXXRW: There is some argument that we should immediately do RX
1033 * processing after enabling interrupts, or one may not fire if there
1034 * are buffered packets.
1036 sc = (struct atse_softc *)xsc;
1038 atse_init_locked(sc);
1043 atse_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1045 struct atse_softc *sc;
1051 ifr = (struct ifreq *)data;
1056 if (ifp->if_flags & IFF_UP) {
1057 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1058 ((ifp->if_flags ^ sc->atse_if_flags) &
1059 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1060 atse_rxfilter_locked(sc);
1062 atse_init_locked(sc);
1063 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1064 atse_stop_locked(sc);
1065 sc->atse_if_flags = ifp->if_flags;
1070 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1071 #ifdef DEVICE_POLLING
1072 if ((mask & IFCAP_POLLING) != 0 &&
1073 (IFCAP_POLLING & ifp->if_capabilities) != 0) {
1074 ifp->if_capenable ^= IFCAP_POLLING;
1075 if ((IFCAP_POLLING & ifp->if_capenable) != 0) {
1077 error = ether_poll_register(atse_poll, ifp);
1082 /* Disable interrupts. */
1083 ATSE_RX_INTR_DISABLE(sc);
1084 ATSE_TX_INTR_DISABLE(sc);
1085 ATSE_RX_EVENT_CLEAR(sc);
1086 ATSE_TX_EVENT_CLEAR(sc);
1089 * Do not allow disabling of polling if we do
1090 * not have interrupts.
1092 } else if (sc->atse_rx_irq_res != NULL ||
1093 sc->atse_tx_irq_res != NULL) {
1094 error = ether_poll_deregister(ifp);
1095 /* Enable interrupts. */
1096 ATSE_RX_INTR_ENABLE(sc);
1097 ATSE_TX_INTR_ENABLE(sc);
1099 ifp->if_capenable ^= IFCAP_POLLING;
1103 #endif /* DEVICE_POLLING */
1109 atse_rxfilter_locked(sc);
1115 struct mii_data *mii;
1118 mii = device_get_softc(sc->atse_miibus);
1119 ifr = (struct ifreq *)data;
1120 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1124 error = ether_ioctl(ifp, command, data);
1132 atse_intr_debug(struct atse_softc *sc, const char *intrname)
1134 uint32_t rxs, rxe, rxi, rxf, txs, txe, txi, txf;
1136 if (!atse_intr_debug_enable)
1139 rxs = ATSE_RX_STATUS_READ(sc);
1140 rxe = ATSE_RX_EVENT_READ(sc);
1141 rxi = ATSE_RX_INTR_READ(sc);
1142 rxf = ATSE_RX_READ_FILL_LEVEL(sc);
1144 txs = ATSE_TX_STATUS_READ(sc);
1145 txe = ATSE_TX_EVENT_READ(sc);
1146 txi = ATSE_TX_INTR_READ(sc);
1147 txf = ATSE_TX_READ_FILL_LEVEL(sc);
1151 "rxs 0x%x rxe 0x%x rxi 0x%x rxf 0x%x "
1152 "txs 0x%x txe 0x%x txi 0x%x txf 0x%x\n",
1155 txs, txe, txi, txf);
1159 atse_watchdog(struct atse_softc *sc)
1162 ATSE_LOCK_ASSERT(sc);
1164 if (sc->atse_watchdog_timer == 0 || --sc->atse_watchdog_timer > 0)
1167 device_printf(sc->atse_dev, "watchdog timeout\n");
1168 if_inc_counter(sc->atse_ifp, IFCOUNTER_OERRORS, 1);
1170 atse_intr_debug(sc, "poll");
1172 sc->atse_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1173 atse_init_locked(sc);
1176 if (!IFQ_DRV_IS_EMPTY(&sc->atse_ifp->if_snd))
1177 atse_start_locked(sc->atse_ifp);
1181 atse_tick(void *xsc)
1183 struct atse_softc *sc;
1184 struct mii_data *mii;
1187 sc = (struct atse_softc *)xsc;
1188 ATSE_LOCK_ASSERT(sc);
1191 mii = device_get_softc(sc->atse_miibus);
1194 if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0)
1195 atse_miibus_statchg(sc->atse_dev);
1196 callout_reset(&sc->atse_tick, hz, atse_tick, sc);
1200 * Set media options.
1203 atse_ifmedia_upd(struct ifnet *ifp)
1205 struct atse_softc *sc;
1206 struct mii_data *mii;
1207 struct mii_softc *miisc;
1213 mii = device_get_softc(sc->atse_miibus);
1214 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1216 error = mii_mediachg(mii);
1223 atse_update_rx_err(struct atse_softc *sc, uint32_t mask)
1227 /* RX error are 6 bits, we only know 4 of them. */
1228 for (i = 0; i < ATSE_RX_ERR_MAX; i++)
1229 if ((mask & (1 << i)) != 0)
1230 sc->atse_rx_err[i]++;
1234 atse_rx_locked(struct atse_softc *sc)
1236 uint32_t fill, i, j;
1237 uint32_t data, meta;
1242 ATSE_LOCK_ASSERT(sc);
1250 if (sc->atse_rx_m == NULL) {
1251 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1254 m->m_len = m->m_pkthdr.len = MCLBYTES;
1255 /* Make sure upper layers will be aligned. */
1256 m_adj(m, ETHER_ALIGN);
1260 fill = ATSE_RX_READ_FILL_LEVEL(sc);
1261 for (i = 0; i < fill; i++) {
1263 * XXX-BZ for whatever reason the FIFO requires the
1264 * the data read before we can access the meta data.
1266 data = ATSE_RX_DATA_READ(sc);
1267 meta = ATSE_RX_META_READ(sc);
1268 if (meta & A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) {
1269 /* XXX-BZ evaluate error. */
1270 atse_update_rx_err(sc, ((meta &
1271 A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >>
1272 A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
1273 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1274 sc->atse_rx_buf_len = 0;
1276 * Should still read till EOP or next SOP.
1278 * XXX-BZ might also depend on
1279 * BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC
1281 sc->atse_flags |= ATSE_FLAGS_ERROR;
1284 if ((meta & A_ONCHIP_FIFO_MEM_CORE_CHANNEL_MASK) != 0)
1285 device_printf(sc->atse_dev, "%s: unexpected "
1286 "channel %u\n", __func__, (meta &
1287 A_ONCHIP_FIFO_MEM_CORE_CHANNEL_MASK) >>
1288 A_ONCHIP_FIFO_MEM_CORE_CHANNEL_SHIFT);
1290 if (meta & A_ONCHIP_FIFO_MEM_CORE_SOP) {
1292 * There is no need to clear SOP between 1st
1293 * and subsequent packet data junks.
1295 if (sc->atse_rx_buf_len != 0 &&
1296 (sc->atse_flags & ATSE_FLAGS_SOP_SEEN) == 0)
1298 device_printf(sc->atse_dev, "%s: SOP "
1299 "without empty buffer: %u\n",
1300 __func__, sc->atse_rx_buf_len);
1301 /* XXX-BZ any better counter? */
1302 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1305 if ((sc->atse_flags & ATSE_FLAGS_SOP_SEEN) == 0)
1307 sc->atse_flags |= ATSE_FLAGS_SOP_SEEN;
1308 sc->atse_rx_buf_len = 0;
1311 #if 0 /* We had to read the data before we could access meta data. See above. */
1312 data = ATSE_RX_DATA_READ(sc);
1314 /* Make sure to not overflow the mbuf data size. */
1315 if (sc->atse_rx_buf_len >= sc->atse_rx_m->m_len -
1318 * XXX-BZ Error. We need more mbufs and are
1319 * not setup for this yet.
1321 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1322 sc->atse_flags |= ATSE_FLAGS_ERROR;
1324 if ((sc->atse_flags & ATSE_FLAGS_ERROR) == 0)
1326 * MUST keep this bcopy as m_data after m_adj
1327 * for IP header aligment is on half-word
1328 * and not word alignment.
1330 bcopy(&data, (uint8_t *)(sc->atse_rx_m->m_data +
1331 sc->atse_rx_buf_len), sizeof(data));
1332 if (meta & A_ONCHIP_FIFO_MEM_CORE_EOP) {
1336 A_ONCHIP_FIFO_MEM_CORE_EMPTY_MASK) >>
1337 A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT;
1338 sc->atse_rx_buf_len += (4 - empty);
1340 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1344 m->m_pkthdr.len = m->m_len =
1345 sc->atse_rx_buf_len;
1346 sc->atse_rx_m = NULL;
1348 sc->atse_rx_buf_len = 0;
1349 sc->atse_flags &= ~ATSE_FLAGS_SOP_SEEN;
1350 if (sc->atse_flags & ATSE_FLAGS_ERROR) {
1351 sc->atse_flags &= ~ATSE_FLAGS_ERROR;
1354 m->m_pkthdr.rcvif = ifp;
1356 (*ifp->if_input)(ifp, m);
1359 #ifdef DEVICE_POLLING
1360 if (ifp->if_capenable & IFCAP_POLLING) {
1361 if (sc->atse_rx_cycles <= 0)
1363 sc->atse_rx_cycles--;
1366 goto outer; /* Need a new mbuf. */
1368 sc->atse_rx_buf_len += sizeof(data);
1372 /* XXX-BZ could optimize in case of another packet waiting. */
1380 * Report current media status.
1383 atse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1385 struct atse_softc *sc;
1386 struct mii_data *mii;
1391 mii = device_get_softc(sc->atse_miibus);
1393 ifmr->ifm_active = mii->mii_media_active;
1394 ifmr->ifm_status = mii->mii_media_status;
1399 atse_rx_intr(void *arg)
1401 struct atse_softc *sc;
1405 sc = (struct atse_softc *)arg;
1409 #ifdef DEVICE_POLLING
1410 if (ifp->if_capenable & IFCAP_POLLING) {
1416 atse_intr_debug(sc, "rx");
1417 rxe = ATSE_RX_EVENT_READ(sc);
1418 if (rxe & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1419 A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1420 /* XXX-BZ ERROR HANDLING. */
1421 atse_update_rx_err(sc, ((rxe &
1422 A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >>
1423 A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
1424 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1428 * There is considerable subtlety in the race-free handling of rx
1429 * interrupts: we must disable interrupts whenever we manipulate the
1430 * FIFO to prevent further interrupts from firing before we are done;
1431 * we must clear the event after processing to prevent the event from
1432 * being immediately reposted due to data remaining; we must clear the
1433 * event mask before reenabling interrupts or risk missing a positive
1434 * edge; and we must recheck everything after completing in case the
1435 * event posted between clearing events and reenabling interrupts. If
1436 * a race is experienced, we must restart the whole mechanism.
1439 ATSE_RX_INTR_DISABLE(sc);
1441 sc->atse_rx_cycles = RX_CYCLES_IN_INTR;
1444 ATSE_RX_EVENT_CLEAR(sc);
1446 /* Disable interrupts if interface is down. */
1447 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1448 ATSE_RX_INTR_ENABLE(sc);
1449 } while (!(ATSE_RX_STATUS_READ(sc) &
1450 A_ONCHIP_FIFO_MEM_CORE_STATUS_EMPTY));
1456 atse_tx_intr(void *arg)
1458 struct atse_softc *sc;
1462 sc = (struct atse_softc *)arg;
1466 #ifdef DEVICE_POLLING
1467 if (ifp->if_capenable & IFCAP_POLLING) {
1473 /* XXX-BZ build histogram. */
1474 atse_intr_debug(sc, "tx");
1475 txe = ATSE_TX_EVENT_READ(sc);
1476 if (txe & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1477 A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1478 /* XXX-BZ ERROR HANDLING. */
1479 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1483 * There is also considerable subtlety in the race-free handling of
1484 * tx interrupts: all processing occurs with interrupts disabled to
1485 * prevent spurious refiring while transmit is in progress (which
1486 * could occur if the FIFO drains while sending -- quite likely); we
1487 * must not clear the event mask until after we've sent, also to
1488 * prevent spurious refiring; once we've cleared the event mask we can
1489 * reenable interrupts, but there is a possible race between clear and
1490 * enable, so we must recheck and potentially repeat the whole process
1491 * if it is detected.
1494 ATSE_TX_INTR_DISABLE(sc);
1495 sc->atse_watchdog_timer = 0;
1496 atse_start_locked(ifp);
1497 ATSE_TX_EVENT_CLEAR(sc);
1499 /* Disable interrupts if interface is down. */
1500 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1501 ATSE_TX_INTR_ENABLE(sc);
1502 } while (ATSE_TX_PENDING(sc) &&
1503 !(ATSE_TX_STATUS_READ(sc) & A_ONCHIP_FIFO_MEM_CORE_STATUS_FULL));
1507 #ifdef DEVICE_POLLING
1509 atse_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1511 struct atse_softc *sc;
1516 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1521 sc->atse_rx_cycles = count;
1522 rx_npkts = atse_rx_locked(sc);
1523 atse_start_locked(ifp);
1525 if (sc->atse_rx_cycles > 0 || cmd == POLL_AND_CHECK_STATUS) {
1528 rx = ATSE_RX_EVENT_READ(sc);
1529 tx = ATSE_TX_EVENT_READ(sc);
1531 if (rx & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1532 A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1533 /* XXX-BZ ERROR HANDLING. */
1534 atse_update_rx_err(sc, ((rx &
1535 A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >>
1536 A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
1537 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1539 if (tx & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW|
1540 A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
1541 /* XXX-BZ ERROR HANDLING. */
1542 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1544 if (ATSE_TX_READ_FILL_LEVEL(sc) == 0)
1545 sc->atse_watchdog_timer = 0;
1548 if (/* Severe error; if only we could find out. */) {
1549 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1550 atse_init_locked(sc);
1558 #endif /* DEVICE_POLLING */
1560 static struct atse_mac_stats_regs {
1562 const char *descr; /* Mostly copied from Altera datasheet. */
1563 } atse_mac_stats_regs[] = {
1565 { "aFramesTransmittedOK",
1566 "The number of frames that are successfully transmitted including "
1567 "the pause frames." },
1568 { "aFramesReceivedOK",
1569 "The number of frames that are successfully received including the "
1571 { "aFrameCheckSequenceErrors",
1572 "The number of receive frames with CRC error." },
1573 { "aAlignmentErrors",
1574 "The number of receive frames with alignment error." },
1575 { "aOctetsTransmittedOK",
1576 "The lower 32 bits of the number of data and padding octets that "
1577 "are successfully transmitted." },
1578 { "aOctetsReceivedOK",
1579 "The lower 32 bits of the number of data and padding octets that "
1580 " are successfully received." },
1581 { "aTxPAUSEMACCtrlFrames",
1582 "The number of pause frames transmitted." },
1583 { "aRxPAUSEMACCtrlFrames",
1584 "The number received pause frames received." },
1586 "The number of errored frames received." },
1588 "The number of transmit frames with either a FIFO overflow error, "
1589 "a FIFO underflow error, or a error defined by the user "
1592 "The number of valid unicast frames received." },
1593 { "ifInMulticastPkts",
1594 "The number of valid multicast frames received. The count does "
1595 "not include pause frames." },
1596 { "ifInBroadcastPkts",
1597 "The number of valid broadcast frames received." },
1599 "This statistics counter is not in use. The MAC function does not "
1600 "discard frames that are written to the FIFO buffer by the user "
1603 "The number of valid unicast frames transmitted." },
1604 { "ifOutMulticastPkts",
1605 "The number of valid multicast frames transmitted, excluding pause "
1607 { "ifOutBroadcastPkts",
1608 "The number of valid broadcast frames transmitted." },
1609 { "etherStatsDropEvents",
1610 "The number of frames that are dropped due to MAC internal errors "
1611 "when FIFO buffer overflow persists." },
1612 { "etherStatsOctets",
1613 "The lower 32 bits of the total number of octets received. This "
1614 "count includes both good and errored frames." },
1616 "The total number of good and errored frames received." },
1617 { "etherStatsUndersizePkts",
1618 "The number of frames received with length less than 64 bytes. "
1619 "This count does not include errored frames." },
1620 { "etherStatsOversizePkts",
1621 "The number of frames received that are longer than the value "
1622 "configured in the frm_length register. This count does not "
1623 "include errored frames." },
1624 { "etherStatsPkts64Octets",
1625 "The number of 64-byte frames received. This count includes good "
1626 "and errored frames." },
1627 { "etherStatsPkts65to127Octets",
1628 "The number of received good and errored frames between the length "
1629 "of 65 and 127 bytes." },
1630 { "etherStatsPkts128to255Octets",
1631 "The number of received good and errored frames between the length "
1632 "of 128 and 255 bytes." },
1633 { "etherStatsPkts256to511Octets",
1634 "The number of received good and errored frames between the length "
1635 "of 256 and 511 bytes." },
1636 { "etherStatsPkts512to1023Octets",
1637 "The number of received good and errored frames between the length "
1638 "of 512 and 1023 bytes." },
1639 { "etherStatsPkts1024to1518Octets",
1640 "The number of received good and errored frames between the length "
1641 "of 1024 and 1518 bytes." },
1642 { "etherStatsPkts1519toXOctets",
1643 "The number of received good and errored frames between the length "
1644 "of 1519 and the maximum frame length configured in the frm_length "
1646 { "etherStatsJabbers",
1647 "Too long frames with CRC error." },
1648 { "etherStatsFragments",
1649 "Too short frames with CRC error." },
1650 /* 0x39 unused, 0x3a/b non-stats. */
1652 /* Extended Statistics Counters */
1653 { "msb_aOctetsTransmittedOK",
1654 "Upper 32 bits of the number of data and padding octets that are "
1655 "successfully transmitted." },
1656 { "msb_aOctetsReceivedOK",
1657 "Upper 32 bits of the number of data and padding octets that are "
1658 "successfully received." },
1659 { "msb_etherStatsOctets",
1660 "Upper 32 bits of the total number of octets received. This count "
1661 "includes both good and errored frames." }
1665 sysctl_atse_mac_stats_proc(SYSCTL_HANDLER_ARGS)
1667 struct atse_softc *sc;
1668 int error, offset, s;
1673 s = CSR_READ_4(sc, offset);
1674 error = sysctl_handle_int(oidp, &s, 0, req);
1675 if (error || !req->newptr)
1681 static struct atse_rx_err_stats_regs {
1684 } atse_rx_err_stats_regs[] = {
1686 #define ATSE_RX_ERR_FIFO_THRES_EOP 0 /* FIFO threshold reached, on EOP. */
1687 #define ATSE_RX_ERR_ELEN 1 /* Frame/payload length not valid. */
1688 #define ATSE_RX_ERR_CRC32 2 /* CRC-32 error. */
1689 #define ATSE_RX_ERR_FIFO_THRES_TRUNC 3 /* FIFO thresh., truncated frame. */
1690 #define ATSE_RX_ERR_4 4 /* ? */
1691 #define ATSE_RX_ERR_5 5 /* / */
1693 { "rx_err_fifo_thres_eop",
1694 "FIFO threshold reached, reported on EOP." },
1695 { "rx_err_fifo_elen",
1696 "Frame or payload length not valid." },
1697 { "rx_err_fifo_crc32",
1699 { "rx_err_fifo_thres_trunc",
1700 "FIFO threshold reached, truncated frame" },
1708 sysctl_atse_rx_err_stats_proc(SYSCTL_HANDLER_ARGS)
1710 struct atse_softc *sc;
1711 int error, offset, s;
1716 s = sc->atse_rx_err[offset];
1717 error = sysctl_handle_int(oidp, &s, 0, req);
1718 if (error || !req->newptr)
1725 atse_sysctl_stats_attach(device_t dev)
1727 struct sysctl_ctx_list *sctx;
1728 struct sysctl_oid *soid;
1729 struct atse_softc *sc;
1732 sc = device_get_softc(dev);
1733 sctx = device_get_sysctl_ctx(dev);
1734 soid = device_get_sysctl_tree(dev);
1736 /* MAC statistics. */
1737 for (i = 0; i < nitems(atse_mac_stats_regs); i++) {
1738 if (atse_mac_stats_regs[i].name == NULL ||
1739 atse_mac_stats_regs[i].descr == NULL)
1742 SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1743 atse_mac_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
1744 sc, i, sysctl_atse_mac_stats_proc, "IU",
1745 atse_mac_stats_regs[i].descr);
1749 for (i = 0; i < ATSE_RX_ERR_MAX; i++) {
1750 if (atse_rx_err_stats_regs[i].name == NULL ||
1751 atse_rx_err_stats_regs[i].descr == NULL)
1754 SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
1755 atse_rx_err_stats_regs[i].name, CTLTYPE_UINT|CTLFLAG_RD,
1756 sc, i, sysctl_atse_rx_err_stats_proc, "IU",
1757 atse_rx_err_stats_regs[i].descr);
1762 * Generic device handling routines.
1765 atse_attach(device_t dev)
1767 struct atse_softc *sc;
1771 sc = device_get_softc(dev);
1773 atse_ethernet_option_bits_read(dev);
1775 mtx_init(&sc->atse_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1778 callout_init_mtx(&sc->atse_tick, &sc->atse_mtx, 0);
1780 sc->atse_tx_buf = malloc(ETHER_MAX_LEN_JUMBO, M_DEVBUF, M_WAITOK);
1783 * We are only doing single-PHY with this driver currently. The
1784 * defaults would be right so that BASE_CFG_MDIO_ADDR0 points to the
1785 * 1st PHY address (0) apart from the fact that BMCR0 is always
1786 * the PCS mapping, so we always use BMCR1. See Table 5-1 0xA0-0xBF.
1788 #if 0 /* Always PCS. */
1789 sc->atse_bmcr0 = MDIO_0_START;
1790 CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR0, 0x00);
1792 /* Always use matching PHY for atse[0..]. */
1793 sc->atse_phy_addr = device_get_unit(dev);
1794 sc->atse_bmcr1 = MDIO_1_START;
1795 CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR1, sc->atse_phy_addr);
1797 /* Reset the adapter. */
1800 /* Setup interface. */
1801 ifp = sc->atse_ifp = if_alloc(IFT_ETHER);
1803 device_printf(dev, "if_alloc() failed\n");
1808 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1809 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1810 ifp->if_ioctl = atse_ioctl;
1811 ifp->if_start = atse_start;
1812 ifp->if_init = atse_init;
1813 IFQ_SET_MAXLEN(&ifp->if_snd, ATSE_TX_LIST_CNT - 1);
1814 ifp->if_snd.ifq_drv_maxlen = ATSE_TX_LIST_CNT - 1;
1815 IFQ_SET_READY(&ifp->if_snd);
1818 error = mii_attach(dev, &sc->atse_miibus, ifp, atse_ifmedia_upd,
1819 atse_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
1821 device_printf(dev, "attaching PHY failed: %d\n", error);
1825 /* Call media-indepedent attach routine. */
1826 ether_ifattach(ifp, sc->atse_eth_addr);
1828 /* Tell the upper layer(s) about vlan mtu support. */
1829 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1830 ifp->if_capabilities |= IFCAP_VLAN_MTU;
1831 ifp->if_capenable = ifp->if_capabilities;
1832 #ifdef DEVICE_POLLING
1833 /* We will enable polling by default if no irqs available. See below. */
1834 ifp->if_capabilities |= IFCAP_POLLING;
1837 /* Hook up interrupts. */
1838 if (sc->atse_rx_irq_res != NULL) {
1839 error = bus_setup_intr(dev, sc->atse_rx_irq_res, INTR_TYPE_NET |
1840 INTR_MPSAFE, NULL, atse_rx_intr, sc, &sc->atse_rx_intrhand);
1842 device_printf(dev, "enabling RX IRQ failed\n");
1843 ether_ifdetach(ifp);
1848 if (sc->atse_tx_irq_res != NULL) {
1849 error = bus_setup_intr(dev, sc->atse_tx_irq_res, INTR_TYPE_NET |
1850 INTR_MPSAFE, NULL, atse_tx_intr, sc, &sc->atse_tx_intrhand);
1852 bus_teardown_intr(dev, sc->atse_rx_irq_res,
1853 sc->atse_rx_intrhand);
1854 device_printf(dev, "enabling TX IRQ failed\n");
1855 ether_ifdetach(ifp);
1860 if ((ifp->if_capenable & IFCAP_POLLING) != 0 ||
1861 (sc->atse_rx_irq_res == NULL && sc->atse_tx_irq_res == NULL)) {
1862 #ifdef DEVICE_POLLING
1863 /* If not on and no IRQs force it on. */
1864 if (sc->atse_rx_irq_res == NULL && sc->atse_tx_irq_res == NULL){
1865 ifp->if_capenable |= IFCAP_POLLING;
1866 device_printf(dev, "forcing to polling due to no "
1869 error = ether_poll_register(atse_poll, ifp);
1873 device_printf(dev, "no DEVICE_POLLING in kernel and no IRQs\n");
1877 ATSE_RX_INTR_ENABLE(sc);
1878 ATSE_TX_INTR_ENABLE(sc);
1886 atse_sysctl_stats_attach(dev);
1892 atse_detach(device_t dev)
1894 struct atse_softc *sc;
1897 sc = device_get_softc(dev);
1898 KASSERT(mtx_initialized(&sc->atse_mtx), ("%s: mutex not initialized",
1899 device_get_nameunit(dev)));
1902 #ifdef DEVICE_POLLING
1903 if (ifp->if_capenable & IFCAP_POLLING)
1904 ether_poll_deregister(ifp);
1907 /* Only cleanup if attach succeeded. */
1908 if (device_is_attached(dev)) {
1910 atse_stop_locked(sc);
1912 callout_drain(&sc->atse_tick);
1913 ether_ifdetach(ifp);
1915 if (sc->atse_miibus != NULL)
1916 device_delete_child(dev, sc->atse_miibus);
1918 if (sc->atse_tx_intrhand)
1919 bus_teardown_intr(dev, sc->atse_tx_irq_res,
1920 sc->atse_tx_intrhand);
1921 if (sc->atse_rx_intrhand)
1922 bus_teardown_intr(dev, sc->atse_rx_irq_res,
1923 sc->atse_rx_intrhand);
1928 if (sc->atse_tx_buf != NULL)
1929 free(sc->atse_tx_buf, M_DEVBUF);
1931 mtx_destroy(&sc->atse_mtx);
1936 /* Shared between nexus and fdt implementation. */
1938 atse_detach_resources(device_t dev)
1940 struct atse_softc *sc;
1942 sc = device_get_softc(dev);
1944 if (sc->atse_txc_mem_res != NULL) {
1945 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_txc_mem_rid,
1946 sc->atse_txc_mem_res);
1947 sc->atse_txc_mem_res = NULL;
1949 if (sc->atse_tx_mem_res != NULL) {
1950 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_tx_mem_rid,
1951 sc->atse_tx_mem_res);
1952 sc->atse_tx_mem_res = NULL;
1954 if (sc->atse_tx_irq_res != NULL) {
1955 bus_release_resource(dev, SYS_RES_IRQ, sc->atse_tx_irq_rid,
1956 sc->atse_tx_irq_res);
1957 sc->atse_tx_irq_res = NULL;
1959 if (sc->atse_rxc_mem_res != NULL) {
1960 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_rxc_mem_rid,
1961 sc->atse_rxc_mem_res);
1962 sc->atse_rxc_mem_res = NULL;
1964 if (sc->atse_rx_mem_res != NULL) {
1965 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_rx_mem_rid,
1966 sc->atse_rx_mem_res);
1967 sc->atse_rx_mem_res = NULL;
1969 if (sc->atse_rx_irq_res != NULL) {
1970 bus_release_resource(dev, SYS_RES_IRQ, sc->atse_rx_irq_rid,
1971 sc->atse_rx_irq_res);
1972 sc->atse_rx_irq_res = NULL;
1974 if (sc->atse_mem_res != NULL) {
1975 bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_mem_rid,
1977 sc->atse_mem_res = NULL;
1982 atse_detach_dev(device_t dev)
1986 error = atse_detach(dev);
1988 /* We are basically in undefined state now. */
1989 device_printf(dev, "atse_detach() failed: %d\n", error);
1993 atse_detach_resources(dev);
1999 atse_miibus_readreg(device_t dev, int phy, int reg)
2001 struct atse_softc *sc;
2003 sc = device_get_softc(dev);
2006 * We currently do not support re-mapping of MDIO space on-the-fly
2007 * but de-facto hard-code the phy#.
2009 if (phy != sc->atse_phy_addr)
2012 return (PHY_READ_2(sc, reg));
2016 atse_miibus_writereg(device_t dev, int phy, int reg, int data)
2018 struct atse_softc *sc;
2020 sc = device_get_softc(dev);
2023 * We currently do not support re-mapping of MDIO space on-the-fly
2024 * but de-facto hard-code the phy#.
2026 if (phy != sc->atse_phy_addr)
2029 PHY_WRITE_2(sc, reg, data);
2034 atse_miibus_statchg(device_t dev)
2036 struct atse_softc *sc;
2037 struct mii_data *mii;
2041 sc = device_get_softc(dev);
2042 ATSE_LOCK_ASSERT(sc);
2044 mii = device_get_softc(sc->atse_miibus);
2046 if (mii == NULL || ifp == NULL ||
2047 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2050 val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
2052 /* Assume no link. */
2053 sc->atse_flags &= ~ATSE_FLAGS_LINK;
2055 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2056 (IFM_ACTIVE | IFM_AVALID)) {
2058 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2060 val4 |= BASE_CFG_COMMAND_CONFIG_ENA_10;
2061 val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
2062 sc->atse_flags |= ATSE_FLAGS_LINK;
2065 val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
2066 val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
2067 sc->atse_flags |= ATSE_FLAGS_LINK;
2070 val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
2071 val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
2072 sc->atse_flags |= ATSE_FLAGS_LINK;
2079 if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
2080 /* XXX-BZ need to stop the MAC? */
2084 if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0)
2085 val4 &= ~BASE_CFG_COMMAND_CONFIG_HD_ENA;
2087 val4 |= BASE_CFG_COMMAND_CONFIG_HD_ENA;
2088 /* XXX-BZ flow control? */
2090 /* Make sure the MAC is activated. */
2091 val4 |= BASE_CFG_COMMAND_CONFIG_TX_ENA;
2092 val4 |= BASE_CFG_COMMAND_CONFIG_RX_ENA;
2094 CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);