2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
5 * Copyright (c) 2015 Peter Grehan <grehan@freebsd.org>
6 * Copyright (c) 2013 Jeremiah Lott, Avere Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer
14 * in this position and unchanged.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/types.h>
36 #ifndef WITHOUT_CAPSICUM
37 #include <sys/capsicum.h>
39 #include <sys/limits.h>
40 #include <sys/ioctl.h>
42 #include <net/ethernet.h>
43 #include <netinet/in.h>
44 #include <netinet/tcp.h>
46 #ifndef WITHOUT_CAPSICUM
47 #include <capsicum_helpers.h>
59 #include <pthread_np.h>
61 #include "e1000_regs.h"
62 #include "e1000_defines.h"
69 /* Hardware/register definitions XXX: move some to common code. */
70 #define E82545_VENDOR_ID_INTEL 0x8086
71 #define E82545_DEV_ID_82545EM_COPPER 0x100F
72 #define E82545_SUBDEV_ID 0x1008
74 #define E82545_REVISION_4 4
76 #define E82545_MDIC_DATA_MASK 0x0000FFFF
77 #define E82545_MDIC_OP_MASK 0x0c000000
78 #define E82545_MDIC_IE 0x20000000
80 #define E82545_EECD_FWE_DIS 0x00000010 /* Flash writes disabled */
81 #define E82545_EECD_FWE_EN 0x00000020 /* Flash writes enabled */
82 #define E82545_EECD_FWE_MASK 0x00000030 /* Flash writes mask */
84 #define E82545_BAR_REGISTER 0
85 #define E82545_BAR_REGISTER_LEN (128*1024)
86 #define E82545_BAR_FLASH 1
87 #define E82545_BAR_FLASH_LEN (64*1024)
88 #define E82545_BAR_IO 2
89 #define E82545_BAR_IO_LEN 8
91 #define E82545_IOADDR 0x00000000
92 #define E82545_IODATA 0x00000004
93 #define E82545_IO_REGISTER_MAX 0x0001FFFF
94 #define E82545_IO_FLASH_BASE 0x00080000
95 #define E82545_IO_FLASH_MAX 0x000FFFFF
97 #define E82545_ARRAY_ENTRY(reg, offset) (reg + (offset<<2))
98 #define E82545_RAR_MAX 15
99 #define E82545_MTA_MAX 127
100 #define E82545_VFTA_MAX 127
102 /* Slightly modified from the driver versions, hardcoded for 3 opcode bits,
103 * followed by 6 address bits.
104 * TODO: make opcode bits and addr bits configurable?
105 * NVM Commands - Microwire */
106 #define E82545_NVM_OPCODE_BITS 3
107 #define E82545_NVM_ADDR_BITS 6
108 #define E82545_NVM_DATA_BITS 16
109 #define E82545_NVM_OPADDR_BITS (E82545_NVM_OPCODE_BITS + E82545_NVM_ADDR_BITS)
110 #define E82545_NVM_ADDR_MASK ((1 << E82545_NVM_ADDR_BITS)-1)
111 #define E82545_NVM_OPCODE_MASK \
112 (((1 << E82545_NVM_OPCODE_BITS) - 1) << E82545_NVM_ADDR_BITS)
113 #define E82545_NVM_OPCODE_READ (0x6 << E82545_NVM_ADDR_BITS) /* read */
114 #define E82545_NVM_OPCODE_WRITE (0x5 << E82545_NVM_ADDR_BITS) /* write */
115 #define E82545_NVM_OPCODE_ERASE (0x7 << E82545_NVM_ADDR_BITS) /* erase */
116 #define E82545_NVM_OPCODE_EWEN (0x4 << E82545_NVM_ADDR_BITS) /* wr-enable */
118 #define E82545_NVM_EEPROM_SIZE 64 /* 64 * 16-bit values == 128K */
120 #define E1000_ICR_SRPD 0x00010000
122 /* This is an arbitrary number. There is no hard limit on the chip. */
123 #define I82545_MAX_TXSEGS 64
125 /* Legacy receive descriptor */
126 struct e1000_rx_desc {
127 uint64_t buffer_addr; /* Address of the descriptor's data buffer */
128 uint16_t length; /* Length of data DMAed into data buffer */
129 uint16_t csum; /* Packet checksum */
130 uint8_t status; /* Descriptor status */
131 uint8_t errors; /* Descriptor Errors */
135 /* Transmit descriptor types */
136 #define E1000_TXD_MASK (E1000_TXD_CMD_DEXT | 0x00F00000)
137 #define E1000_TXD_TYP_L (0)
138 #define E1000_TXD_TYP_C (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C)
139 #define E1000_TXD_TYP_D (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)
141 /* Legacy transmit descriptor */
142 struct e1000_tx_desc {
143 uint64_t buffer_addr; /* Address of the descriptor's data buffer */
147 uint16_t length; /* Data buffer length */
148 uint8_t cso; /* Checksum offset */
149 uint8_t cmd; /* Descriptor control */
155 uint8_t status; /* Descriptor status */
156 uint8_t css; /* Checksum start */
162 /* Context descriptor */
163 struct e1000_context_desc {
167 uint8_t ipcss; /* IP checksum start */
168 uint8_t ipcso; /* IP checksum offset */
169 uint16_t ipcse; /* IP checksum end */
175 uint8_t tucss; /* TCP checksum start */
176 uint8_t tucso; /* TCP checksum offset */
177 uint16_t tucse; /* TCP checksum end */
180 uint32_t cmd_and_length;
184 uint8_t status; /* Descriptor status */
185 uint8_t hdr_len; /* Header length */
186 uint16_t mss; /* Maximum segment size */
191 /* Data descriptor */
192 struct e1000_data_desc {
193 uint64_t buffer_addr; /* Address of the descriptor's buffer address */
197 uint16_t length; /* Data buffer length */
205 uint8_t status; /* Descriptor status */
206 uint8_t popts; /* Packet Options */
212 union e1000_tx_udesc {
213 struct e1000_tx_desc td;
214 struct e1000_context_desc cd;
215 struct e1000_data_desc dd;
218 /* Tx checksum info for a packet. */
220 int ck_valid; /* ck_info is valid */
221 uint8_t ck_start; /* start byte of cksum calcuation */
222 uint8_t ck_off; /* offset of cksum insertion */
223 uint16_t ck_len; /* length of cksum calc: 0 is to packet-end */
229 static int e82545_debug = 0;
230 #define DPRINTF(msg,params...) if (e82545_debug) fprintf(stderr, "e82545: " msg, params)
231 #define WPRINTF(msg,params...) fprintf(stderr, "e82545: " msg, params)
233 #define MIN(a,b) (((a)<(b))?(a):(b))
234 #define MAX(a,b) (((a)>(b))?(a):(b))
236 /* s/w representation of the RAL/RAH regs */
240 struct ether_addr eu_eth;
244 struct e82545_softc {
245 struct pci_devinst *esc_pi;
246 struct vmctx *esc_ctx;
247 struct mevent *esc_mevp;
248 struct mevent *esc_mevpitr;
249 pthread_mutex_t esc_mtx;
250 struct ether_addr esc_mac;
254 uint32_t esc_CTRL; /* x0000 device ctl */
255 uint32_t esc_FCAL; /* x0028 flow ctl addr lo */
256 uint32_t esc_FCAH; /* x002C flow ctl addr hi */
257 uint32_t esc_FCT; /* x0030 flow ctl type */
258 uint32_t esc_VET; /* x0038 VLAN eth type */
259 uint32_t esc_FCTTV; /* x0170 flow ctl tx timer */
260 uint32_t esc_LEDCTL; /* x0E00 LED control */
261 uint32_t esc_PBA; /* x1000 pkt buffer allocation */
263 /* Interrupt control */
264 int esc_irq_asserted;
265 uint32_t esc_ICR; /* x00C0 cause read/clear */
266 uint32_t esc_ITR; /* x00C4 intr throttling */
267 uint32_t esc_ICS; /* x00C8 cause set */
268 uint32_t esc_IMS; /* x00D0 mask set/read */
269 uint32_t esc_IMC; /* x00D8 mask clear */
272 union e1000_tx_udesc *esc_txdesc;
273 struct e1000_context_desc esc_txctx;
274 pthread_t esc_tx_tid;
275 pthread_cond_t esc_tx_cond;
278 uint32_t esc_TXCW; /* x0178 transmit config */
279 uint32_t esc_TCTL; /* x0400 transmit ctl */
280 uint32_t esc_TIPG; /* x0410 inter-packet gap */
281 uint16_t esc_AIT; /* x0458 Adaptive Interframe Throttle */
282 uint64_t esc_tdba; /* verified 64-bit desc table addr */
283 uint32_t esc_TDBAL; /* x3800 desc table addr, low bits */
284 uint32_t esc_TDBAH; /* x3804 desc table addr, hi 32-bits */
285 uint32_t esc_TDLEN; /* x3808 # descriptors in bytes */
286 uint16_t esc_TDH; /* x3810 desc table head idx */
287 uint16_t esc_TDHr; /* internal read version of TDH */
288 uint16_t esc_TDT; /* x3818 desc table tail idx */
289 uint32_t esc_TIDV; /* x3820 intr delay */
290 uint32_t esc_TXDCTL; /* x3828 desc control */
291 uint32_t esc_TADV; /* x382C intr absolute delay */
293 /* L2 frame acceptance */
294 struct eth_uni esc_uni[16]; /* 16 x unicast MAC addresses */
295 uint32_t esc_fmcast[128]; /* Multicast filter bit-match */
296 uint32_t esc_fvlan[128]; /* VLAN 4096-bit filter */
299 struct e1000_rx_desc *esc_rxdesc;
300 pthread_cond_t esc_rx_cond;
304 uint32_t esc_RCTL; /* x0100 receive ctl */
305 uint32_t esc_FCRTL; /* x2160 flow cntl thresh, low */
306 uint32_t esc_FCRTH; /* x2168 flow cntl thresh, hi */
307 uint64_t esc_rdba; /* verified 64-bit desc table addr */
308 uint32_t esc_RDBAL; /* x2800 desc table addr, low bits */
309 uint32_t esc_RDBAH; /* x2804 desc table addr, hi 32-bits*/
310 uint32_t esc_RDLEN; /* x2808 #descriptors */
311 uint16_t esc_RDH; /* x2810 desc table head idx */
312 uint16_t esc_RDT; /* x2818 desc table tail idx */
313 uint32_t esc_RDTR; /* x2820 intr delay */
314 uint32_t esc_RXDCTL; /* x2828 desc control */
315 uint32_t esc_RADV; /* x282C intr absolute delay */
316 uint32_t esc_RSRPD; /* x2C00 recv small packet detect */
317 uint32_t esc_RXCSUM; /* x5000 receive cksum ctl */
319 /* IO Port register access */
322 /* Shadow copy of MDIC */
323 uint32_t mdi_control;
324 /* Shadow copy of EECD */
325 uint32_t eeprom_control;
326 /* Latest NVM in/out */
330 uint32_t missed_pkt_count; /* dropped for no room in rx queue */
331 uint32_t pkt_rx_by_size[6];
332 uint32_t pkt_tx_by_size[6];
333 uint32_t good_pkt_rx_count;
334 uint32_t bcast_pkt_rx_count;
335 uint32_t mcast_pkt_rx_count;
336 uint32_t good_pkt_tx_count;
337 uint32_t bcast_pkt_tx_count;
338 uint32_t mcast_pkt_tx_count;
339 uint32_t oversize_rx_count;
340 uint32_t tso_tx_count;
341 uint64_t good_octets_rx;
342 uint64_t good_octets_tx;
343 uint64_t missed_octets; /* counts missed and oversized */
345 uint8_t nvm_bits:6; /* number of bits remaining in/out */
347 #define E82545_NVM_MODE_OPADDR 0x0
348 #define E82545_NVM_MODE_DATAIN 0x1
349 #define E82545_NVM_MODE_DATAOUT 0x2
351 uint16_t eeprom_data[E82545_NVM_EEPROM_SIZE];
354 static void e82545_reset(struct e82545_softc *sc, int dev);
355 static void e82545_rx_enable(struct e82545_softc *sc);
356 static void e82545_rx_disable(struct e82545_softc *sc);
357 static void e82545_tap_callback(int fd, enum ev_type type, void *param);
358 static void e82545_tx_start(struct e82545_softc *sc);
359 static void e82545_tx_enable(struct e82545_softc *sc);
360 static void e82545_tx_disable(struct e82545_softc *sc);
363 e82545_size_stat_index(uint32_t size)
367 } else if (size >= 1024) {
371 return (ffs(size) - 6);
376 e82545_init_eeprom(struct e82545_softc *sc)
378 uint16_t checksum, i;
381 sc->eeprom_data[NVM_MAC_ADDR] = ((uint16_t)sc->esc_mac.octet[0]) |
382 (((uint16_t)sc->esc_mac.octet[1]) << 8);
383 sc->eeprom_data[NVM_MAC_ADDR+1] = ((uint16_t)sc->esc_mac.octet[2]) |
384 (((uint16_t)sc->esc_mac.octet[3]) << 8);
385 sc->eeprom_data[NVM_MAC_ADDR+2] = ((uint16_t)sc->esc_mac.octet[4]) |
386 (((uint16_t)sc->esc_mac.octet[5]) << 8);
389 sc->eeprom_data[NVM_SUB_DEV_ID] = E82545_SUBDEV_ID;
390 sc->eeprom_data[NVM_SUB_VEN_ID] = E82545_VENDOR_ID_INTEL;
391 sc->eeprom_data[NVM_DEV_ID] = E82545_DEV_ID_82545EM_COPPER;
392 sc->eeprom_data[NVM_VEN_ID] = E82545_VENDOR_ID_INTEL;
394 /* fill in the checksum */
396 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
397 checksum += sc->eeprom_data[i];
399 checksum = NVM_SUM - checksum;
400 sc->eeprom_data[NVM_CHECKSUM_REG] = checksum;
401 DPRINTF("eeprom checksum: 0x%x\r\n", checksum);
405 e82545_write_mdi(struct e82545_softc *sc, uint8_t reg_addr,
406 uint8_t phy_addr, uint32_t data)
408 DPRINTF("Write mdi reg:0x%x phy:0x%x data: 0x%x\r\n", reg_addr, phy_addr, data);
412 e82545_read_mdi(struct e82545_softc *sc, uint8_t reg_addr,
415 //DPRINTF("Read mdi reg:0x%x phy:0x%x\r\n", reg_addr, phy_addr);
418 return (MII_SR_LINK_STATUS | MII_SR_AUTONEG_CAPS |
419 MII_SR_AUTONEG_COMPLETE);
420 case PHY_AUTONEG_ADV:
421 return NWAY_AR_SELECTOR_FIELD;
424 case PHY_1000T_STATUS:
425 return (SR_1000T_LP_FD_CAPS | SR_1000T_REMOTE_RX_STATUS |
426 SR_1000T_LOCAL_RX_STATUS);
428 return (M88E1011_I_PHY_ID >> 16) & 0xFFFF;
430 return (M88E1011_I_PHY_ID | E82545_REVISION_4) & 0xFFFF;
432 DPRINTF("Unknown mdi read reg:0x%x phy:0x%x\r\n", reg_addr, phy_addr);
439 e82545_eecd_strobe(struct e82545_softc *sc)
441 /* Microwire state machine */
443 DPRINTF("eeprom state machine srtobe "
444 "0x%x 0x%x 0x%x 0x%x\r\n",
445 sc->nvm_mode, sc->nvm_bits,
446 sc->nvm_opaddr, sc->nvm_data);*/
448 if (sc->nvm_bits == 0) {
449 DPRINTF("eeprom state machine not expecting data! "
450 "0x%x 0x%x 0x%x 0x%x\r\n",
451 sc->nvm_mode, sc->nvm_bits,
452 sc->nvm_opaddr, sc->nvm_data);
456 if (sc->nvm_mode == E82545_NVM_MODE_DATAOUT) {
458 if (sc->nvm_data & 0x8000) {
459 sc->eeprom_control |= E1000_EECD_DO;
461 sc->eeprom_control &= ~E1000_EECD_DO;
464 if (sc->nvm_bits == 0) {
465 /* read done, back to opcode mode. */
467 sc->nvm_mode = E82545_NVM_MODE_OPADDR;
468 sc->nvm_bits = E82545_NVM_OPADDR_BITS;
470 } else if (sc->nvm_mode == E82545_NVM_MODE_DATAIN) {
473 if (sc->eeprom_control & E1000_EECD_DI) {
476 if (sc->nvm_bits == 0) {
478 uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
479 uint16_t addr = sc->nvm_opaddr & E82545_NVM_ADDR_MASK;
480 if (op != E82545_NVM_OPCODE_WRITE) {
481 DPRINTF("Illegal eeprom write op 0x%x\r\n",
483 } else if (addr >= E82545_NVM_EEPROM_SIZE) {
484 DPRINTF("Illegal eeprom write addr 0x%x\r\n",
487 DPRINTF("eeprom write eeprom[0x%x] = 0x%x\r\n",
489 sc->eeprom_data[addr] = sc->nvm_data;
491 /* back to opcode mode */
493 sc->nvm_mode = E82545_NVM_MODE_OPADDR;
494 sc->nvm_bits = E82545_NVM_OPADDR_BITS;
496 } else if (sc->nvm_mode == E82545_NVM_MODE_OPADDR) {
497 sc->nvm_opaddr <<= 1;
498 if (sc->eeprom_control & E1000_EECD_DI) {
501 if (sc->nvm_bits == 0) {
502 uint16_t op = sc->nvm_opaddr & E82545_NVM_OPCODE_MASK;
504 case E82545_NVM_OPCODE_EWEN:
505 DPRINTF("eeprom write enable: 0x%x\r\n",
507 /* back to opcode mode */
509 sc->nvm_mode = E82545_NVM_MODE_OPADDR;
510 sc->nvm_bits = E82545_NVM_OPADDR_BITS;
512 case E82545_NVM_OPCODE_READ:
514 uint16_t addr = sc->nvm_opaddr &
515 E82545_NVM_ADDR_MASK;
516 sc->nvm_mode = E82545_NVM_MODE_DATAOUT;
517 sc->nvm_bits = E82545_NVM_DATA_BITS;
518 if (addr < E82545_NVM_EEPROM_SIZE) {
519 sc->nvm_data = sc->eeprom_data[addr];
520 DPRINTF("eeprom read: eeprom[0x%x] = 0x%x\r\n",
523 DPRINTF("eeprom illegal read: 0x%x\r\n",
529 case E82545_NVM_OPCODE_WRITE:
530 sc->nvm_mode = E82545_NVM_MODE_DATAIN;
531 sc->nvm_bits = E82545_NVM_DATA_BITS;
535 DPRINTF("eeprom unknown op: 0x%x\r\r",
537 /* back to opcode mode */
539 sc->nvm_mode = E82545_NVM_MODE_OPADDR;
540 sc->nvm_bits = E82545_NVM_OPADDR_BITS;
544 DPRINTF("eeprom state machine wrong state! "
545 "0x%x 0x%x 0x%x 0x%x\r\n",
546 sc->nvm_mode, sc->nvm_bits,
547 sc->nvm_opaddr, sc->nvm_data);
552 e82545_itr_callback(int fd, enum ev_type type, void *param)
555 struct e82545_softc *sc = param;
557 pthread_mutex_lock(&sc->esc_mtx);
558 new = sc->esc_ICR & sc->esc_IMS;
559 if (new && !sc->esc_irq_asserted) {
560 DPRINTF("itr callback: lintr assert %x\r\n", new);
561 sc->esc_irq_asserted = 1;
562 pci_lintr_assert(sc->esc_pi);
564 mevent_delete(sc->esc_mevpitr);
565 sc->esc_mevpitr = NULL;
567 pthread_mutex_unlock(&sc->esc_mtx);
571 e82545_icr_assert(struct e82545_softc *sc, uint32_t bits)
575 DPRINTF("icr assert: 0x%x\r\n", bits);
578 * An interrupt is only generated if bits are set that
579 * aren't already in the ICR, these bits are unmasked,
580 * and there isn't an interrupt already pending.
582 new = bits & ~sc->esc_ICR & sc->esc_IMS;
586 DPRINTF("icr assert: masked %x, ims %x\r\n", new, sc->esc_IMS);
587 } else if (sc->esc_mevpitr != NULL) {
588 DPRINTF("icr assert: throttled %x, ims %x\r\n", new, sc->esc_IMS);
589 } else if (!sc->esc_irq_asserted) {
590 DPRINTF("icr assert: lintr assert %x\r\n", new);
591 sc->esc_irq_asserted = 1;
592 pci_lintr_assert(sc->esc_pi);
593 if (sc->esc_ITR != 0) {
594 sc->esc_mevpitr = mevent_add(
595 (sc->esc_ITR + 3905) / 3906, /* 256ns -> 1ms */
596 EVF_TIMER, e82545_itr_callback, sc);
602 e82545_ims_change(struct e82545_softc *sc, uint32_t bits)
607 * Changing the mask may allow previously asserted
608 * but masked interrupt requests to generate an interrupt.
610 new = bits & sc->esc_ICR & ~sc->esc_IMS;
614 DPRINTF("ims change: masked %x, ims %x\r\n", new, sc->esc_IMS);
615 } else if (sc->esc_mevpitr != NULL) {
616 DPRINTF("ims change: throttled %x, ims %x\r\n", new, sc->esc_IMS);
617 } else if (!sc->esc_irq_asserted) {
618 DPRINTF("ims change: lintr assert %x\n\r", new);
619 sc->esc_irq_asserted = 1;
620 pci_lintr_assert(sc->esc_pi);
621 if (sc->esc_ITR != 0) {
622 sc->esc_mevpitr = mevent_add(
623 (sc->esc_ITR + 3905) / 3906, /* 256ns -> 1ms */
624 EVF_TIMER, e82545_itr_callback, sc);
630 e82545_icr_deassert(struct e82545_softc *sc, uint32_t bits)
633 DPRINTF("icr deassert: 0x%x\r\n", bits);
634 sc->esc_ICR &= ~bits;
637 * If there are no longer any interrupt sources and there
638 * was an asserted interrupt, clear it
640 if (sc->esc_irq_asserted && !(sc->esc_ICR & sc->esc_IMS)) {
641 DPRINTF("icr deassert: lintr deassert %x\r\n", bits);
642 pci_lintr_deassert(sc->esc_pi);
643 sc->esc_irq_asserted = 0;
648 e82545_intr_write(struct e82545_softc *sc, uint32_t offset, uint32_t value)
651 DPRINTF("intr_write: off %x, val %x\n\r", offset, value);
655 e82545_icr_deassert(sc, value);
661 sc->esc_ICS = value; /* not used: store for debug */
662 e82545_icr_assert(sc, value);
665 e82545_ims_change(sc, value);
668 sc->esc_IMC = value; /* for debug */
669 sc->esc_IMS &= ~value;
670 // XXX clear interrupts if all ICR bits now masked
671 // and interrupt was pending ?
679 e82545_intr_read(struct e82545_softc *sc, uint32_t offset)
685 DPRINTF("intr_read: off %x\n\r", offset);
689 retval = sc->esc_ICR;
691 e82545_icr_deassert(sc, ~0);
694 retval = sc->esc_ITR;
697 /* write-only register */
700 retval = sc->esc_IMS;
703 /* write-only register */
713 e82545_devctl(struct e82545_softc *sc, uint32_t val)
716 sc->esc_CTRL = val & ~E1000_CTRL_RST;
718 if (val & E1000_CTRL_RST) {
719 DPRINTF("e1k: s/w reset, ctl %x\n", val);
722 /* XXX check for phy reset ? */
726 e82545_rx_update_rdba(struct e82545_softc *sc)
729 /* XXX verify desc base/len within phys mem range */
730 sc->esc_rdba = (uint64_t)sc->esc_RDBAH << 32 |
733 /* Cache host mapping of guest descriptor array */
734 sc->esc_rxdesc = paddr_guest2host(sc->esc_ctx,
735 sc->esc_rdba, sc->esc_RDLEN);
739 e82545_rx_ctl(struct e82545_softc *sc, uint32_t val)
743 on = ((val & E1000_RCTL_EN) == E1000_RCTL_EN);
745 /* Save RCTL after stripping reserved bits 31:27,24,21,14,11:10,0 */
746 sc->esc_RCTL = val & ~0xF9204c01;
748 DPRINTF("rx_ctl - %s RCTL %x, val %x\n",
749 on ? "on" : "off", sc->esc_RCTL, val);
751 /* state change requested */
752 if (on != sc->esc_rx_enabled) {
754 /* Catch disallowed/unimplemented settings */
755 //assert(!(val & E1000_RCTL_LBM_TCVR));
757 if (sc->esc_RCTL & E1000_RCTL_LBM_TCVR) {
758 sc->esc_rx_loopback = 1;
760 sc->esc_rx_loopback = 0;
763 e82545_rx_update_rdba(sc);
764 e82545_rx_enable(sc);
766 e82545_rx_disable(sc);
767 sc->esc_rx_loopback = 0;
769 sc->esc_rxdesc = NULL;
775 e82545_tx_update_tdba(struct e82545_softc *sc)
778 /* XXX verify desc base/len within phys mem range */
779 sc->esc_tdba = (uint64_t)sc->esc_TDBAH << 32 | sc->esc_TDBAL;
781 /* Cache host mapping of guest descriptor array */
782 sc->esc_txdesc = paddr_guest2host(sc->esc_ctx, sc->esc_tdba,
787 e82545_tx_ctl(struct e82545_softc *sc, uint32_t val)
791 on = ((val & E1000_TCTL_EN) == E1000_TCTL_EN);
793 /* ignore TCTL_EN settings that don't change state */
794 if (on == sc->esc_tx_enabled)
798 e82545_tx_update_tdba(sc);
799 e82545_tx_enable(sc);
801 e82545_tx_disable(sc);
803 sc->esc_txdesc = NULL;
806 /* Save TCTL value after stripping reserved bits 31:25,23,2,0 */
807 sc->esc_TCTL = val & ~0xFE800005;
811 e82545_bufsz(uint32_t rctl)
814 switch (rctl & (E1000_RCTL_BSEX | E1000_RCTL_SZ_256)) {
815 case (E1000_RCTL_SZ_2048): return (2048);
816 case (E1000_RCTL_SZ_1024): return (1024);
817 case (E1000_RCTL_SZ_512): return (512);
818 case (E1000_RCTL_SZ_256): return (256);
819 case (E1000_RCTL_BSEX|E1000_RCTL_SZ_16384): return (16384);
820 case (E1000_RCTL_BSEX|E1000_RCTL_SZ_8192): return (8192);
821 case (E1000_RCTL_BSEX|E1000_RCTL_SZ_4096): return (4096);
823 return (256); /* Forbidden value. */
826 static uint8_t dummybuf[2048];
828 /* XXX one packet at a time until this is debugged */
830 e82545_tap_callback(int fd, enum ev_type type, void *param)
832 struct e82545_softc *sc = param;
833 struct e1000_rx_desc *rxd;
834 struct iovec vec[64];
835 int left, len, lim, maxpktsz, maxpktdesc, bufsz, i, n, size;
837 uint16_t *tp, tag, head;
839 pthread_mutex_lock(&sc->esc_mtx);
840 DPRINTF("rx_run: head %x, tail %x\r\n", sc->esc_RDH, sc->esc_RDT);
842 if (!sc->esc_rx_enabled || sc->esc_rx_loopback) {
843 DPRINTF("rx disabled (!%d || %d) -- packet(s) dropped\r\n",
844 sc->esc_rx_enabled, sc->esc_rx_loopback);
845 while (read(sc->esc_tapfd, dummybuf, sizeof(dummybuf)) > 0) {
849 bufsz = e82545_bufsz(sc->esc_RCTL);
850 maxpktsz = (sc->esc_RCTL & E1000_RCTL_LPE) ? 16384 : 1522;
851 maxpktdesc = (maxpktsz + bufsz - 1) / bufsz;
852 size = sc->esc_RDLEN / 16;
854 left = (size + sc->esc_RDT - head) % size;
855 if (left < maxpktdesc) {
856 DPRINTF("rx overflow (%d < %d) -- packet(s) dropped\r\n",
858 while (read(sc->esc_tapfd, dummybuf, sizeof(dummybuf)) > 0) {
863 sc->esc_rx_active = 1;
864 pthread_mutex_unlock(&sc->esc_mtx);
866 for (lim = size / 4; lim > 0 && left >= maxpktdesc; lim -= n) {
868 /* Grab rx descriptor pointed to by the head pointer */
869 for (i = 0; i < maxpktdesc; i++) {
870 rxd = &sc->esc_rxdesc[(head + i) % size];
871 vec[i].iov_base = paddr_guest2host(sc->esc_ctx,
872 rxd->buffer_addr, bufsz);
873 vec[i].iov_len = bufsz;
875 len = readv(sc->esc_tapfd, vec, maxpktdesc);
877 DPRINTF("tap: readv() returned %d\n", len);
882 * Adjust the packet length based on whether the CRC needs
883 * to be stripped or if the packet is less than the minimum
886 if (len < ETHER_MIN_LEN - ETHER_CRC_LEN)
887 len = ETHER_MIN_LEN - ETHER_CRC_LEN;
888 if (!(sc->esc_RCTL & E1000_RCTL_SECRC))
889 len += ETHER_CRC_LEN;
890 n = (len + bufsz - 1) / bufsz;
892 DPRINTF("packet read %d bytes, %d segs, head %d\r\n",
895 /* Apply VLAN filter. */
896 tp = (uint16_t *)vec[0].iov_base + 6;
897 if ((sc->esc_RCTL & E1000_RCTL_VFE) &&
898 (ntohs(tp[0]) == sc->esc_VET)) {
899 tag = ntohs(tp[1]) & 0x0fff;
900 if ((sc->esc_fvlan[tag >> 5] &
901 (1 << (tag & 0x1f))) != 0) {
902 DPRINTF("known VLAN %d\r\n", tag);
904 DPRINTF("unknown VLAN %d\r\n", tag);
910 /* Update all consumed descriptors. */
911 for (i = 0; i < n - 1; i++) {
912 rxd = &sc->esc_rxdesc[(head + i) % size];
917 rxd->status = E1000_RXD_STAT_DD;
919 rxd = &sc->esc_rxdesc[(head + i) % size];
920 rxd->length = len % bufsz;
924 /* XXX signal no checksum for now */
925 rxd->status = E1000_RXD_STAT_PIF | E1000_RXD_STAT_IXSM |
926 E1000_RXD_STAT_EOP | E1000_RXD_STAT_DD;
928 /* Schedule receive interrupts. */
929 if (len <= sc->esc_RSRPD) {
930 cause |= E1000_ICR_SRPD | E1000_ICR_RXT0;
932 /* XXX: RDRT and RADV timers should be here. */
933 cause |= E1000_ICR_RXT0;
936 head = (head + n) % size;
941 pthread_mutex_lock(&sc->esc_mtx);
942 sc->esc_rx_active = 0;
943 if (sc->esc_rx_enabled == 0)
944 pthread_cond_signal(&sc->esc_rx_cond);
947 /* Respect E1000_RCTL_RDMTS */
948 left = (size + sc->esc_RDT - head) % size;
949 if (left < (size >> (((sc->esc_RCTL >> 8) & 3) + 1)))
950 cause |= E1000_ICR_RXDMT0;
951 /* Assert all accumulated interrupts. */
953 e82545_icr_assert(sc, cause);
955 DPRINTF("rx_run done: head %x, tail %x\r\n", sc->esc_RDH, sc->esc_RDT);
956 pthread_mutex_unlock(&sc->esc_mtx);
960 e82545_carry(uint32_t sum)
963 sum = (sum & 0xFFFF) + (sum >> 16);
970 e82545_buf_checksum(uint8_t *buf, int len)
975 /* Checksum all the pairs of bytes first... */
976 for (i = 0; i < (len & ~1U); i += 2)
977 sum += *((u_int16_t *)(buf + i));
980 * If there's a single byte left over, checksum it, too.
981 * Network byte order is big-endian, so the remaining byte is
985 sum += htons(buf[i] << 8);
987 return (e82545_carry(sum));
991 e82545_iov_checksum(struct iovec *iov, int iovcnt, int off, int len)
996 /* Skip completely unneeded vectors. */
997 while (iovcnt > 0 && iov->iov_len <= off && off > 0) {
1003 /* Calculate checksum of requested range. */
1005 while (len > 0 && iovcnt > 0) {
1006 now = MIN(len, iov->iov_len - off);
1007 s = e82545_buf_checksum(iov->iov_base + off, now);
1008 sum += odd ? (s << 8) : s;
1016 return (e82545_carry(sum));
1020 * Return the transmit descriptor type.
1023 e82545_txdesc_type(uint32_t lower)
1029 if (lower & E1000_TXD_CMD_DEXT)
1030 type = lower & E1000_TXD_MASK;
1036 e82545_transmit_checksum(struct iovec *iov, int iovcnt, struct ck_info *ck)
1041 DPRINTF("tx cksum: iovcnt/s/off/len %d/%d/%d/%d\r\n",
1042 iovcnt, ck->ck_start, ck->ck_off, ck->ck_len);
1043 cklen = ck->ck_len ? ck->ck_len - ck->ck_start + 1 : INT_MAX;
1044 cksum = e82545_iov_checksum(iov, iovcnt, ck->ck_start, cklen);
1045 *(uint16_t *)((uint8_t *)iov[0].iov_base + ck->ck_off) = ~cksum;
1049 e82545_transmit_backend(struct e82545_softc *sc, struct iovec *iov, int iovcnt)
1052 if (sc->esc_tapfd == -1)
1055 (void) writev(sc->esc_tapfd, iov, iovcnt);
1059 e82545_transmit_done(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1060 uint16_t dsize, int *tdwb)
1062 union e1000_tx_udesc *dsc;
1064 for ( ; head != tail; head = (head + 1) % dsize) {
1065 dsc = &sc->esc_txdesc[head];
1066 if (dsc->td.lower.data & E1000_TXD_CMD_RS) {
1067 dsc->td.upper.data |= E1000_TXD_STAT_DD;
1074 e82545_transmit(struct e82545_softc *sc, uint16_t head, uint16_t tail,
1075 uint16_t dsize, uint16_t *rhead, int *tdwb)
1077 uint8_t *hdr, *hdrp;
1078 struct iovec iovb[I82545_MAX_TXSEGS + 2];
1079 struct iovec tiov[I82545_MAX_TXSEGS + 2];
1080 struct e1000_context_desc *cd;
1081 struct ck_info ckinfo[2];
1083 union e1000_tx_udesc *dsc;
1084 int desc, dtype, len, ntype, iovcnt, tlen, hdrlen, vlen, tcp, tso;
1085 int mss, paylen, seg, tiovcnt, left, now, nleft, nnow, pv, pvoff;
1086 uint32_t tcpsum, tcpseq;
1087 uint16_t ipcs, tcpcs, ipid, ohead;
1089 ckinfo[0].ck_valid = ckinfo[1].ck_valid = 0;
1096 /* iovb[0/1] may be used for writable copy of headers. */
1099 for (desc = 0; ; desc++, head = (head + 1) % dsize) {
1104 dsc = &sc->esc_txdesc[head];
1105 dtype = e82545_txdesc_type(dsc->td.lower.data);
1109 case E1000_TXD_TYP_C:
1110 DPRINTF("tx ctxt desc idx %d: %016jx "
1112 head, dsc->td.buffer_addr,
1113 dsc->td.upper.data, dsc->td.lower.data);
1114 /* Save context and return */
1115 sc->esc_txctx = dsc->cd;
1117 case E1000_TXD_TYP_L:
1118 DPRINTF("tx legacy desc idx %d: %08x%08x\r\n",
1119 head, dsc->td.upper.data, dsc->td.lower.data);
1121 * legacy cksum start valid in first descriptor
1124 ckinfo[0].ck_start = dsc->td.upper.fields.css;
1126 case E1000_TXD_TYP_D:
1127 DPRINTF("tx data desc idx %d: %08x%08x\r\n",
1128 head, dsc->td.upper.data, dsc->td.lower.data);
1135 /* Descriptor type must be consistent */
1136 assert(dtype == ntype);
1137 DPRINTF("tx next desc idx %d: %08x%08x\r\n",
1138 head, dsc->td.upper.data, dsc->td.lower.data);
1141 len = (dtype == E1000_TXD_TYP_L) ? dsc->td.lower.flags.length :
1142 dsc->dd.lower.data & 0xFFFFF;
1145 /* Strip checksum supplied by guest. */
1146 if ((dsc->td.lower.data & E1000_TXD_CMD_EOP) != 0 &&
1147 (dsc->td.lower.data & E1000_TXD_CMD_IFCS) == 0)
1150 if (iovcnt < I82545_MAX_TXSEGS) {
1151 iov[iovcnt].iov_base = paddr_guest2host(
1152 sc->esc_ctx, dsc->td.buffer_addr, len);
1153 iov[iovcnt].iov_len = len;
1159 * Pull out info that is valid in the final descriptor
1160 * and exit descriptor loop.
1162 if (dsc->td.lower.data & E1000_TXD_CMD_EOP) {
1163 if (dtype == E1000_TXD_TYP_L) {
1164 if (dsc->td.lower.data & E1000_TXD_CMD_IC) {
1165 ckinfo[0].ck_valid = 1;
1167 dsc->td.lower.flags.cso;
1168 ckinfo[0].ck_len = 0;
1171 cd = &sc->esc_txctx;
1172 if (dsc->dd.lower.data & E1000_TXD_CMD_TSE)
1174 if (dsc->dd.upper.fields.popts &
1175 E1000_TXD_POPTS_IXSM)
1176 ckinfo[0].ck_valid = 1;
1177 if (dsc->dd.upper.fields.popts &
1178 E1000_TXD_POPTS_IXSM || tso) {
1179 ckinfo[0].ck_start =
1180 cd->lower_setup.ip_fields.ipcss;
1182 cd->lower_setup.ip_fields.ipcso;
1184 cd->lower_setup.ip_fields.ipcse;
1186 if (dsc->dd.upper.fields.popts &
1187 E1000_TXD_POPTS_TXSM)
1188 ckinfo[1].ck_valid = 1;
1189 if (dsc->dd.upper.fields.popts &
1190 E1000_TXD_POPTS_TXSM || tso) {
1191 ckinfo[1].ck_start =
1192 cd->upper_setup.tcp_fields.tucss;
1194 cd->upper_setup.tcp_fields.tucso;
1196 cd->upper_setup.tcp_fields.tucse;
1203 if (iovcnt > I82545_MAX_TXSEGS) {
1204 WPRINTF("tx too many descriptors (%d > %d) -- dropped\r\n",
1205 iovcnt, I82545_MAX_TXSEGS);
1210 /* Estimate writable space for VLAN header insertion. */
1211 if ((sc->esc_CTRL & E1000_CTRL_VME) &&
1212 (dsc->td.lower.data & E1000_TXD_CMD_VLE)) {
1213 hdrlen = ETHER_ADDR_LEN*2;
1214 vlen = ETHER_VLAN_ENCAP_LEN;
1217 /* Estimate required writable space for checksums. */
1218 if (ckinfo[0].ck_valid)
1219 hdrlen = MAX(hdrlen, ckinfo[0].ck_off + 2);
1220 if (ckinfo[1].ck_valid)
1221 hdrlen = MAX(hdrlen, ckinfo[1].ck_off + 2);
1222 /* Round up writable space to the first vector. */
1223 if (hdrlen != 0 && iov[0].iov_len > hdrlen &&
1224 iov[0].iov_len < hdrlen + 100)
1225 hdrlen = iov[0].iov_len;
1227 /* In case of TSO header length provided by software. */
1228 hdrlen = sc->esc_txctx.tcp_seg_setup.fields.hdr_len;
1231 /* Allocate, fill and prepend writable header vector. */
1233 hdr = __builtin_alloca(hdrlen + vlen);
1235 for (left = hdrlen, hdrp = hdr; left > 0;
1236 left -= now, hdrp += now) {
1237 now = MIN(left, iov->iov_len);
1238 memcpy(hdrp, iov->iov_base, now);
1239 iov->iov_base += now;
1240 iov->iov_len -= now;
1241 if (iov->iov_len == 0) {
1248 iov->iov_base = hdr;
1249 iov->iov_len = hdrlen;
1252 /* Insert VLAN tag. */
1254 hdr -= ETHER_VLAN_ENCAP_LEN;
1255 memmove(hdr, hdr + ETHER_VLAN_ENCAP_LEN, ETHER_ADDR_LEN*2);
1256 hdrlen += ETHER_VLAN_ENCAP_LEN;
1257 hdr[ETHER_ADDR_LEN*2 + 0] = sc->esc_VET >> 8;
1258 hdr[ETHER_ADDR_LEN*2 + 1] = sc->esc_VET & 0xff;
1259 hdr[ETHER_ADDR_LEN*2 + 2] = dsc->td.upper.fields.special >> 8;
1260 hdr[ETHER_ADDR_LEN*2 + 3] = dsc->td.upper.fields.special & 0xff;
1261 iov->iov_base = hdr;
1262 iov->iov_len += ETHER_VLAN_ENCAP_LEN;
1263 /* Correct checksum offsets after VLAN tag insertion. */
1264 ckinfo[0].ck_start += ETHER_VLAN_ENCAP_LEN;
1265 ckinfo[0].ck_off += ETHER_VLAN_ENCAP_LEN;
1266 if (ckinfo[0].ck_len != 0)
1267 ckinfo[0].ck_len += ETHER_VLAN_ENCAP_LEN;
1268 ckinfo[1].ck_start += ETHER_VLAN_ENCAP_LEN;
1269 ckinfo[1].ck_off += ETHER_VLAN_ENCAP_LEN;
1270 if (ckinfo[1].ck_len != 0)
1271 ckinfo[1].ck_len += ETHER_VLAN_ENCAP_LEN;
1274 /* Simple non-TSO case. */
1276 /* Calculate checksums and transmit. */
1277 if (ckinfo[0].ck_valid)
1278 e82545_transmit_checksum(iov, iovcnt, &ckinfo[0]);
1279 if (ckinfo[1].ck_valid)
1280 e82545_transmit_checksum(iov, iovcnt, &ckinfo[1]);
1281 e82545_transmit_backend(sc, iov, iovcnt);
1286 tcp = (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_TCP) != 0;
1287 mss = sc->esc_txctx.tcp_seg_setup.fields.mss;
1288 paylen = (sc->esc_txctx.cmd_and_length & 0x000fffff);
1289 DPRINTF("tx %s segmentation offload %d+%d/%d bytes %d iovs\r\n",
1290 tcp ? "TCP" : "UDP", hdrlen, paylen, mss, iovcnt);
1291 ipid = ntohs(*(uint16_t *)&hdr[ckinfo[0].ck_start + 4]);
1292 tcpseq = ntohl(*(uint32_t *)&hdr[ckinfo[1].ck_start + 4]);
1293 ipcs = *(uint16_t *)&hdr[ckinfo[0].ck_off];
1295 if (ckinfo[1].ck_valid) /* Save partial pseudo-header checksum. */
1296 tcpcs = *(uint16_t *)&hdr[ckinfo[1].ck_off];
1299 for (seg = 0, left = paylen; left > 0; seg++, left -= now) {
1300 now = MIN(left, mss);
1302 /* Construct IOVs for the segment. */
1303 /* Include whole original header. */
1304 tiov[0].iov_base = hdr;
1305 tiov[0].iov_len = hdrlen;
1307 /* Include respective part of payload IOV. */
1308 for (nleft = now; pv < iovcnt && nleft > 0; nleft -= nnow) {
1309 nnow = MIN(nleft, iov[pv].iov_len - pvoff);
1310 tiov[tiovcnt].iov_base = iov[pv].iov_base + pvoff;
1311 tiov[tiovcnt++].iov_len = nnow;
1312 if (pvoff + nnow == iov[pv].iov_len) {
1318 DPRINTF("tx segment %d %d+%d bytes %d iovs\r\n",
1319 seg, hdrlen, now, tiovcnt);
1321 /* Update IP header. */
1322 if (sc->esc_txctx.cmd_and_length & E1000_TXD_CMD_IP) {
1323 /* IPv4 -- set length and ID */
1324 *(uint16_t *)&hdr[ckinfo[0].ck_start + 2] =
1325 htons(hdrlen - ckinfo[0].ck_start + now);
1326 *(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1329 /* IPv6 -- set length */
1330 *(uint16_t *)&hdr[ckinfo[0].ck_start + 4] =
1331 htons(hdrlen - ckinfo[0].ck_start - 40 +
1335 /* Update pseudo-header checksum. */
1337 tcpsum += htons(hdrlen - ckinfo[1].ck_start + now);
1339 /* Update TCP/UDP headers. */
1341 /* Update sequence number and FIN/PUSH flags. */
1342 *(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1343 htonl(tcpseq + paylen - left);
1345 hdr[ckinfo[1].ck_start + 13] &=
1346 ~(TH_FIN | TH_PUSH);
1349 /* Update payload length. */
1350 *(uint32_t *)&hdr[ckinfo[1].ck_start + 4] =
1351 hdrlen - ckinfo[1].ck_start + now;
1354 /* Calculate checksums and transmit. */
1355 if (ckinfo[0].ck_valid) {
1356 *(uint16_t *)&hdr[ckinfo[0].ck_off] = ipcs;
1357 e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[0]);
1359 if (ckinfo[1].ck_valid) {
1360 *(uint16_t *)&hdr[ckinfo[1].ck_off] =
1361 e82545_carry(tcpsum);
1362 e82545_transmit_checksum(tiov, tiovcnt, &ckinfo[1]);
1364 e82545_transmit_backend(sc, tiov, tiovcnt);
1368 head = (head + 1) % dsize;
1369 e82545_transmit_done(sc, ohead, head, dsize, tdwb);
1376 e82545_tx_run(struct e82545_softc *sc)
1379 uint16_t head, rhead, tail, size;
1380 int lim, tdwb, sent;
1384 size = sc->esc_TDLEN / 16;
1385 DPRINTF("tx_run: head %x, rhead %x, tail %x\r\n",
1386 sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1388 pthread_mutex_unlock(&sc->esc_mtx);
1391 for (lim = size / 4; sc->esc_tx_enabled && lim > 0; lim -= sent) {
1392 sent = e82545_transmit(sc, head, tail, size, &rhead, &tdwb);
1397 pthread_mutex_lock(&sc->esc_mtx);
1400 sc->esc_TDHr = rhead;
1403 cause |= E1000_ICR_TXDW;
1404 if (lim != size / 4 && sc->esc_TDH == sc->esc_TDT)
1405 cause |= E1000_ICR_TXQE;
1407 e82545_icr_assert(sc, cause);
1409 DPRINTF("tx_run done: head %x, rhead %x, tail %x\r\n",
1410 sc->esc_TDH, sc->esc_TDHr, sc->esc_TDT);
1413 static _Noreturn void *
1414 e82545_tx_thread(void *param)
1416 struct e82545_softc *sc = param;
1418 pthread_mutex_lock(&sc->esc_mtx);
1420 while (!sc->esc_tx_enabled || sc->esc_TDHr == sc->esc_TDT) {
1421 if (sc->esc_tx_enabled && sc->esc_TDHr != sc->esc_TDT)
1423 sc->esc_tx_active = 0;
1424 if (sc->esc_tx_enabled == 0)
1425 pthread_cond_signal(&sc->esc_tx_cond);
1426 pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1428 sc->esc_tx_active = 1;
1430 /* Process some tx descriptors. Lock dropped inside. */
1436 e82545_tx_start(struct e82545_softc *sc)
1439 if (sc->esc_tx_active == 0)
1440 pthread_cond_signal(&sc->esc_tx_cond);
1444 e82545_tx_enable(struct e82545_softc *sc)
1447 sc->esc_tx_enabled = 1;
1451 e82545_tx_disable(struct e82545_softc *sc)
1454 sc->esc_tx_enabled = 0;
1455 while (sc->esc_tx_active)
1456 pthread_cond_wait(&sc->esc_tx_cond, &sc->esc_mtx);
1460 e82545_rx_enable(struct e82545_softc *sc)
1463 sc->esc_rx_enabled = 1;
1467 e82545_rx_disable(struct e82545_softc *sc)
1470 sc->esc_rx_enabled = 0;
1471 while (sc->esc_rx_active)
1472 pthread_cond_wait(&sc->esc_rx_cond, &sc->esc_mtx);
1476 e82545_write_ra(struct e82545_softc *sc, int reg, uint32_t wval)
1484 eu = &sc->esc_uni[idx];
1488 eu->eu_valid = ((wval & E1000_RAH_AV) == E1000_RAH_AV);
1489 eu->eu_addrsel = (wval >> 16) & 0x3;
1490 eu->eu_eth.octet[5] = wval >> 8;
1491 eu->eu_eth.octet[4] = wval;
1494 eu->eu_eth.octet[3] = wval >> 24;
1495 eu->eu_eth.octet[2] = wval >> 16;
1496 eu->eu_eth.octet[1] = wval >> 8;
1497 eu->eu_eth.octet[0] = wval;
1502 e82545_read_ra(struct e82545_softc *sc, int reg)
1511 eu = &sc->esc_uni[idx];
1515 retval = (eu->eu_valid << 31) |
1516 (eu->eu_addrsel << 16) |
1517 (eu->eu_eth.octet[5] << 8) |
1518 eu->eu_eth.octet[4];
1521 retval = (eu->eu_eth.octet[3] << 24) |
1522 (eu->eu_eth.octet[2] << 16) |
1523 (eu->eu_eth.octet[1] << 8) |
1524 eu->eu_eth.octet[0];
1531 e82545_write_register(struct e82545_softc *sc, uint32_t offset, uint32_t value)
1536 DPRINTF("Unaligned register write offset:0x%x value:0x%x\r\n", offset, value);
1539 DPRINTF("Register write: 0x%x value: 0x%x\r\n", offset, value);
1543 case E1000_CTRL_DUP:
1544 e82545_devctl(sc, value);
1547 sc->esc_FCAL = value;
1550 sc->esc_FCAH = value & ~0xFFFF0000;
1553 sc->esc_FCT = value & ~0xFFFF0000;
1556 sc->esc_VET = value & ~0xFFFF0000;
1559 sc->esc_FCTTV = value & ~0xFFFF0000;
1562 sc->esc_LEDCTL = value & ~0x30303000;
1565 sc->esc_PBA = value & 0x0000FF80;
1572 e82545_intr_write(sc, offset, value);
1575 e82545_rx_ctl(sc, value);
1578 sc->esc_FCRTL = value & ~0xFFFF0007;
1581 sc->esc_FCRTH = value & ~0xFFFF0007;
1583 case E1000_RDBAL(0):
1584 sc->esc_RDBAL = value & ~0xF;
1585 if (sc->esc_rx_enabled) {
1586 /* Apparently legal: update cached address */
1587 e82545_rx_update_rdba(sc);
1590 case E1000_RDBAH(0):
1591 assert(!sc->esc_rx_enabled);
1592 sc->esc_RDBAH = value;
1594 case E1000_RDLEN(0):
1595 assert(!sc->esc_rx_enabled);
1596 sc->esc_RDLEN = value & ~0xFFF0007F;
1599 /* XXX should only ever be zero ? Range check ? */
1600 sc->esc_RDH = value;
1603 /* XXX if this opens up the rx ring, do something ? */
1604 sc->esc_RDT = value;
1607 /* ignore FPD bit 31 */
1608 sc->esc_RDTR = value & ~0xFFFF0000;
1610 case E1000_RXDCTL(0):
1611 sc->esc_RXDCTL = value & ~0xFEC0C0C0;
1614 sc->esc_RADV = value & ~0xFFFF0000;
1617 sc->esc_RSRPD = value & ~0xFFFFF000;
1620 sc->esc_RXCSUM = value & ~0xFFFFF800;
1623 sc->esc_TXCW = value & ~0x3FFF0000;
1626 e82545_tx_ctl(sc, value);
1629 sc->esc_TIPG = value;
1632 sc->esc_AIT = value;
1634 case E1000_TDBAL(0):
1635 sc->esc_TDBAL = value & ~0xF;
1636 if (sc->esc_tx_enabled) {
1637 /* Apparently legal */
1638 e82545_tx_update_tdba(sc);
1641 case E1000_TDBAH(0):
1642 //assert(!sc->esc_tx_enabled);
1643 sc->esc_TDBAH = value;
1645 case E1000_TDLEN(0):
1646 //assert(!sc->esc_tx_enabled);
1647 sc->esc_TDLEN = value & ~0xFFF0007F;
1650 //assert(!sc->esc_tx_enabled);
1651 /* XXX should only ever be zero ? Range check ? */
1652 sc->esc_TDHr = sc->esc_TDH = value;
1655 /* XXX range check ? */
1656 sc->esc_TDT = value;
1657 if (sc->esc_tx_enabled)
1658 e82545_tx_start(sc);
1661 sc->esc_TIDV = value & ~0xFFFF0000;
1663 case E1000_TXDCTL(0):
1664 //assert(!sc->esc_tx_enabled);
1665 sc->esc_TXDCTL = value & ~0xC0C0C0;
1668 sc->esc_TADV = value & ~0xFFFF0000;
1670 case E1000_RAL(0) ... E1000_RAH(15):
1671 /* convert to u32 offset */
1672 ridx = (offset - E1000_RAL(0)) >> 2;
1673 e82545_write_ra(sc, ridx, value);
1675 case E1000_MTA ... (E1000_MTA + (127*4)):
1676 sc->esc_fmcast[(offset - E1000_MTA) >> 2] = value;
1678 case E1000_VFTA ... (E1000_VFTA + (127*4)):
1679 sc->esc_fvlan[(offset - E1000_VFTA) >> 2] = value;
1683 //DPRINTF("EECD write 0x%x -> 0x%x\r\n", sc->eeprom_control, value);
1684 /* edge triggered low->high */
1685 uint32_t eecd_strobe = ((sc->eeprom_control & E1000_EECD_SK) ?
1686 0 : (value & E1000_EECD_SK));
1687 uint32_t eecd_mask = (E1000_EECD_SK|E1000_EECD_CS|
1688 E1000_EECD_DI|E1000_EECD_REQ);
1689 sc->eeprom_control &= ~eecd_mask;
1690 sc->eeprom_control |= (value & eecd_mask);
1691 /* grant/revoke immediately */
1692 if (value & E1000_EECD_REQ) {
1693 sc->eeprom_control |= E1000_EECD_GNT;
1695 sc->eeprom_control &= ~E1000_EECD_GNT;
1697 if (eecd_strobe && (sc->eeprom_control & E1000_EECD_CS)) {
1698 e82545_eecd_strobe(sc);
1704 uint8_t reg_addr = (uint8_t)((value & E1000_MDIC_REG_MASK) >>
1705 E1000_MDIC_REG_SHIFT);
1706 uint8_t phy_addr = (uint8_t)((value & E1000_MDIC_PHY_MASK) >>
1707 E1000_MDIC_PHY_SHIFT);
1709 (value & ~(E1000_MDIC_ERROR|E1000_MDIC_DEST));
1710 if ((value & E1000_MDIC_READY) != 0) {
1711 DPRINTF("Incorrect MDIC ready bit: 0x%x\r\n", value);
1714 switch (value & E82545_MDIC_OP_MASK) {
1715 case E1000_MDIC_OP_READ:
1716 sc->mdi_control &= ~E82545_MDIC_DATA_MASK;
1717 sc->mdi_control |= e82545_read_mdi(sc, reg_addr, phy_addr);
1719 case E1000_MDIC_OP_WRITE:
1720 e82545_write_mdi(sc, reg_addr, phy_addr,
1721 value & E82545_MDIC_DATA_MASK);
1724 DPRINTF("Unknown MDIC op: 0x%x\r\n", value);
1727 /* TODO: barrier? */
1728 sc->mdi_control |= E1000_MDIC_READY;
1729 if (value & E82545_MDIC_IE) {
1730 // TODO: generate interrupt
1738 DPRINTF("Unknown write register: 0x%x value:%x\r\n", offset, value);
1744 e82545_read_register(struct e82545_softc *sc, uint32_t offset)
1750 DPRINTF("Unaligned register read offset:0x%x\r\n", offset);
1754 DPRINTF("Register read: 0x%x\r\n", offset);
1758 retval = sc->esc_CTRL;
1761 retval = E1000_STATUS_FD | E1000_STATUS_LU |
1762 E1000_STATUS_SPEED_1000;
1765 retval = sc->esc_FCAL;
1768 retval = sc->esc_FCAH;
1771 retval = sc->esc_FCT;
1774 retval = sc->esc_VET;
1777 retval = sc->esc_FCTTV;
1780 retval = sc->esc_LEDCTL;
1783 retval = sc->esc_PBA;
1790 retval = e82545_intr_read(sc, offset);
1793 retval = sc->esc_RCTL;
1796 retval = sc->esc_FCRTL;
1799 retval = sc->esc_FCRTH;
1801 case E1000_RDBAL(0):
1802 retval = sc->esc_RDBAL;
1804 case E1000_RDBAH(0):
1805 retval = sc->esc_RDBAH;
1807 case E1000_RDLEN(0):
1808 retval = sc->esc_RDLEN;
1811 retval = sc->esc_RDH;
1814 retval = sc->esc_RDT;
1817 retval = sc->esc_RDTR;
1819 case E1000_RXDCTL(0):
1820 retval = sc->esc_RXDCTL;
1823 retval = sc->esc_RADV;
1826 retval = sc->esc_RSRPD;
1829 retval = sc->esc_RXCSUM;
1832 retval = sc->esc_TXCW;
1835 retval = sc->esc_TCTL;
1838 retval = sc->esc_TIPG;
1841 retval = sc->esc_AIT;
1843 case E1000_TDBAL(0):
1844 retval = sc->esc_TDBAL;
1846 case E1000_TDBAH(0):
1847 retval = sc->esc_TDBAH;
1849 case E1000_TDLEN(0):
1850 retval = sc->esc_TDLEN;
1853 retval = sc->esc_TDH;
1856 retval = sc->esc_TDT;
1859 retval = sc->esc_TIDV;
1861 case E1000_TXDCTL(0):
1862 retval = sc->esc_TXDCTL;
1865 retval = sc->esc_TADV;
1867 case E1000_RAL(0) ... E1000_RAH(15):
1868 /* convert to u32 offset */
1869 ridx = (offset - E1000_RAL(0)) >> 2;
1870 retval = e82545_read_ra(sc, ridx);
1872 case E1000_MTA ... (E1000_MTA + (127*4)):
1873 retval = sc->esc_fmcast[(offset - E1000_MTA) >> 2];
1875 case E1000_VFTA ... (E1000_VFTA + (127*4)):
1876 retval = sc->esc_fvlan[(offset - E1000_VFTA) >> 2];
1879 //DPRINTF("EECD read %x\r\n", sc->eeprom_control);
1880 retval = sc->eeprom_control;
1883 retval = sc->mdi_control;
1888 /* stats that we emulate. */
1890 retval = sc->missed_pkt_count;
1893 retval = sc->pkt_rx_by_size[0];
1896 retval = sc->pkt_rx_by_size[1];
1899 retval = sc->pkt_rx_by_size[2];
1902 retval = sc->pkt_rx_by_size[3];
1905 retval = sc->pkt_rx_by_size[4];
1908 retval = sc->pkt_rx_by_size[5];
1911 retval = sc->good_pkt_rx_count;
1914 retval = sc->bcast_pkt_rx_count;
1917 retval = sc->mcast_pkt_rx_count;
1921 retval = sc->good_pkt_tx_count;
1924 retval = (uint32_t)sc->good_octets_rx;
1927 retval = (uint32_t)(sc->good_octets_rx >> 32);
1931 retval = (uint32_t)sc->good_octets_tx;
1935 retval = (uint32_t)(sc->good_octets_tx >> 32);
1938 retval = sc->oversize_rx_count;
1941 retval = (uint32_t)(sc->good_octets_rx + sc->missed_octets);
1944 retval = (uint32_t)((sc->good_octets_rx +
1945 sc->missed_octets) >> 32);
1948 retval = sc->good_pkt_rx_count + sc->missed_pkt_count +
1949 sc->oversize_rx_count;
1952 retval = sc->pkt_tx_by_size[0];
1955 retval = sc->pkt_tx_by_size[1];
1958 retval = sc->pkt_tx_by_size[2];
1961 retval = sc->pkt_tx_by_size[3];
1964 retval = sc->pkt_tx_by_size[4];
1967 retval = sc->pkt_tx_by_size[5];
1970 retval = sc->mcast_pkt_tx_count;
1973 retval = sc->bcast_pkt_tx_count;
1976 retval = sc->tso_tx_count;
1978 /* stats that are always 0. */
1980 case E1000_ALGNERRC:
2009 DPRINTF("Unknown read register: 0x%x\r\n", offset);
2018 e82545_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2019 uint64_t offset, int size, uint64_t value)
2021 struct e82545_softc *sc;
2023 //DPRINTF("Write bar:%d offset:0x%lx value:0x%lx size:%d\r\n", baridx, offset, value, size);
2027 pthread_mutex_lock(&sc->esc_mtx);
2034 DPRINTF("Wrong io addr write sz:%d value:0x%lx\r\n", size, value);
2036 sc->io_addr = (uint32_t)value;
2040 DPRINTF("Wrong io data write size:%d value:0x%lx\r\n", size, value);
2041 } else if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2042 DPRINTF("Non-register io write addr:0x%x value:0x%lx\r\n", sc->io_addr, value);
2044 e82545_write_register(sc, sc->io_addr,
2048 DPRINTF("Unknown io bar write offset:0x%lx value:0x%lx size:%d\r\n", offset, value, size);
2052 case E82545_BAR_REGISTER:
2054 DPRINTF("Wrong register write size:%d offset:0x%lx value:0x%lx\r\n", size, offset, value);
2056 e82545_write_register(sc, (uint32_t)offset,
2060 DPRINTF("Unknown write bar:%d off:0x%lx val:0x%lx size:%d\r\n",
2061 baridx, offset, value, size);
2064 pthread_mutex_unlock(&sc->esc_mtx);
2068 e82545_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2069 uint64_t offset, int size)
2071 struct e82545_softc *sc;
2074 //DPRINTF("Read bar:%d offset:0x%lx size:%d\r\n", baridx, offset, size);
2078 pthread_mutex_lock(&sc->esc_mtx);
2085 DPRINTF("Wrong io addr read sz:%d\r\n", size);
2087 retval = sc->io_addr;
2091 DPRINTF("Wrong io data read sz:%d\r\n", size);
2093 if (sc->io_addr > E82545_IO_REGISTER_MAX) {
2094 DPRINTF("Non-register io read addr:0x%x\r\n",
2097 retval = e82545_read_register(sc, sc->io_addr);
2100 DPRINTF("Unknown io bar read offset:0x%lx size:%d\r\n",
2105 case E82545_BAR_REGISTER:
2107 DPRINTF("Wrong register read size:%d offset:0x%lx\r\n",
2110 retval = e82545_read_register(sc, (uint32_t)offset);
2113 DPRINTF("Unknown read bar:%d offset:0x%lx size:%d\r\n",
2114 baridx, offset, size);
2118 pthread_mutex_unlock(&sc->esc_mtx);
2124 e82545_reset(struct e82545_softc *sc, int drvr)
2128 e82545_rx_disable(sc);
2129 e82545_tx_disable(sc);
2131 /* clear outstanding interrupts */
2132 if (sc->esc_irq_asserted)
2133 pci_lintr_deassert(sc->esc_pi);
2143 sc->esc_LEDCTL = 0x07061302;
2144 sc->esc_PBA = 0x00100030;
2146 /* start nvm in opcode mode. */
2148 sc->nvm_mode = E82545_NVM_MODE_OPADDR;
2149 sc->nvm_bits = E82545_NVM_OPADDR_BITS;
2150 sc->eeprom_control = E1000_EECD_PRES | E82545_EECD_FWE_EN;
2151 e82545_init_eeprom(sc);
2162 memset(sc->esc_fvlan, 0, sizeof(sc->esc_fvlan));
2163 memset(sc->esc_fmcast, 0, sizeof(sc->esc_fmcast));
2164 memset(sc->esc_uni, 0, sizeof(sc->esc_uni));
2166 /* XXX not necessary on 82545 ?? */
2167 sc->esc_uni[0].eu_valid = 1;
2168 memcpy(sc->esc_uni[0].eu_eth.octet, sc->esc_mac.octet,
2171 /* Clear RAH valid bits */
2172 for (i = 0; i < 16; i++)
2173 sc->esc_uni[i].eu_valid = 0;
2188 sc->esc_RXDCTL = (1 << 24) | (1 << 16); /* default GRAN/WTHRESH */
2202 sc->esc_txdesc = NULL;
2207 sc->esc_TDHr = sc->esc_TDH = 0;
2212 e82545_open_tap(struct e82545_softc *sc, char *opts)
2215 #ifndef WITHOUT_CAPSICUM
2216 cap_rights_t rights;
2224 strcpy(tbuf, "/dev/");
2225 strlcat(tbuf, opts, sizeof(tbuf));
2227 sc->esc_tapfd = open(tbuf, O_RDWR);
2228 if (sc->esc_tapfd == -1) {
2229 DPRINTF("unable to open tap device %s\n", opts);
2234 * Set non-blocking and register for read
2235 * notifications with the event loop
2238 if (ioctl(sc->esc_tapfd, FIONBIO, &opt) < 0) {
2239 WPRINTF("tap device O_NONBLOCK failed: %d\n", errno);
2240 close(sc->esc_tapfd);
2244 #ifndef WITHOUT_CAPSICUM
2245 cap_rights_init(&rights, CAP_EVENT, CAP_READ, CAP_WRITE);
2246 if (caph_rights_limit(sc->esc_tapfd, &rights) == -1)
2247 errx(EX_OSERR, "Unable to apply rights for sandbox");
2250 sc->esc_mevp = mevent_add(sc->esc_tapfd,
2252 e82545_tap_callback,
2254 if (sc->esc_mevp == NULL) {
2255 DPRINTF("Could not register mevent %d\n", EVF_READ);
2256 close(sc->esc_tapfd);
2262 e82545_parsemac(char *mac_str, uint8_t *mac_addr)
2264 struct ether_addr *ea;
2266 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
2268 tmpstr = strsep(&mac_str,"=");
2269 if ((mac_str != NULL) && (!strcmp(tmpstr,"mac"))) {
2270 ea = ether_aton(mac_str);
2271 if (ea == NULL || ETHER_IS_MULTICAST(ea->octet) ||
2272 memcmp(ea->octet, zero_addr, ETHER_ADDR_LEN) == 0) {
2273 fprintf(stderr, "Invalid MAC %s\n", mac_str);
2276 memcpy(mac_addr, ea->octet, ETHER_ADDR_LEN);
2282 e82545_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2284 DPRINTF("Loading with options: %s\r\n", opts);
2287 unsigned char digest[16];
2289 struct e82545_softc *sc;
2294 /* Setup our softc */
2295 sc = calloc(1, sizeof(*sc));
2301 pthread_mutex_init(&sc->esc_mtx, NULL);
2302 pthread_cond_init(&sc->esc_rx_cond, NULL);
2303 pthread_cond_init(&sc->esc_tx_cond, NULL);
2304 pthread_create(&sc->esc_tx_tid, NULL, e82545_tx_thread, sc);
2305 snprintf(nstr, sizeof(nstr), "e82545-%d:%d tx", pi->pi_slot,
2307 pthread_set_name_np(sc->esc_tx_tid, nstr);
2309 pci_set_cfgdata16(pi, PCIR_DEVICE, E82545_DEV_ID_82545EM_COPPER);
2310 pci_set_cfgdata16(pi, PCIR_VENDOR, E82545_VENDOR_ID_INTEL);
2311 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_NETWORK);
2312 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_NETWORK_ETHERNET);
2313 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, E82545_SUBDEV_ID);
2314 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, E82545_VENDOR_ID_INTEL);
2316 pci_set_cfgdata8(pi, PCIR_HDRTYPE, PCIM_HDRTYPE_NORMAL);
2317 pci_set_cfgdata8(pi, PCIR_INTPIN, 0x1);
2319 /* TODO: this card also supports msi, but the freebsd driver for it
2320 * does not, so I have not implemented it. */
2321 pci_lintr_request(pi);
2323 pci_emul_alloc_bar(pi, E82545_BAR_REGISTER, PCIBAR_MEM32,
2324 E82545_BAR_REGISTER_LEN);
2325 pci_emul_alloc_bar(pi, E82545_BAR_FLASH, PCIBAR_MEM32,
2326 E82545_BAR_FLASH_LEN);
2327 pci_emul_alloc_bar(pi, E82545_BAR_IO, PCIBAR_IO,
2331 * Attempt to open the tap device and read the MAC address
2332 * if specified. Copied from virtio-net, slightly modified.
2339 devname = vtopts = strdup(opts);
2340 (void) strsep(&vtopts, ",");
2342 if (vtopts != NULL) {
2343 err = e82545_parsemac(vtopts, sc->esc_mac.octet);
2351 if (strncmp(devname, "tap", 3) == 0 ||
2352 strncmp(devname, "vmnet", 5) == 0)
2353 e82545_open_tap(sc, devname);
2359 * The default MAC address is the standard NetApp OUI of 00-a0-98,
2360 * followed by an MD5 of the PCI slot/func number and dev name
2362 if (!mac_provided) {
2363 snprintf(nstr, sizeof(nstr), "%d-%d-%s", pi->pi_slot,
2364 pi->pi_func, vmname);
2367 MD5Update(&mdctx, nstr, strlen(nstr));
2368 MD5Final(digest, &mdctx);
2370 sc->esc_mac.octet[0] = 0x00;
2371 sc->esc_mac.octet[1] = 0xa0;
2372 sc->esc_mac.octet[2] = 0x98;
2373 sc->esc_mac.octet[3] = digest[0];
2374 sc->esc_mac.octet[4] = digest[1];
2375 sc->esc_mac.octet[5] = digest[2];
2378 /* H/w initiated reset */
2379 e82545_reset(sc, 0);
2384 struct pci_devemu pci_de_e82545 = {
2386 .pe_init = e82545_init,
2387 .pe_barwrite = e82545_write,
2388 .pe_barread = e82545_read
2390 PCI_EMUL_SET(pci_de_e82545);