2 * Copyright (c) 2016 Matt Macy <mmacy@nextbsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <net/rss_config.h>
32 #include <netinet/in_rss.h>
36 #define DPRINTF device_printf
41 /*********************************************************************
42 * Local Function prototypes
43 *********************************************************************/
44 static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower);
45 static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower);
46 static int em_isc_txd_encap(void *arg, if_pkt_info_t pi);
47 static void em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx);
48 static int em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear);
49 static void em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
50 uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused);
51 static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx);
52 static int em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
54 static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
56 static void lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
57 uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused);
59 static int lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx,
61 static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
63 static void lem_receive_checksum(int status, int errors, if_rxd_info_t ri);
64 static void em_receive_checksum(uint32_t status, if_rxd_info_t ri);
65 extern int em_intr(void *arg);
67 struct if_txrx em_txrx = {
70 em_isc_txd_credits_update,
78 struct if_txrx lem_txrx = {
81 em_isc_txd_credits_update,
82 lem_isc_rxd_available,
89 extern if_shared_ctx_t em_sctx;
91 /**********************************************************************
93 * Setup work for hardware segmentation offload (TSO) on
94 * adapters using advanced tx descriptors
96 **********************************************************************/
98 em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
100 if_softc_ctx_t scctx = adapter->shared;
101 struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
102 struct tx_ring *txr = &que->txr;
103 struct e1000_context_desc *TXD;
104 struct em_txbuffer *tx_buffer;
107 hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
108 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
109 E1000_TXD_DTYP_D | /* Data descr type */
110 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
112 /* IP and/or TCP header checksum calculation and insertion. */
113 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
116 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
117 tx_buffer = &txr->tx_buffers[cur];
120 * Start offset for header checksum calculation.
121 * End offset for header checksum calculation.
122 * Offset of place put the checksum.
124 TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen;
125 TXD->lower_setup.ip_fields.ipcse =
126 htole16(pi->ipi_ehdrlen + pi->ipi_ip_hlen - 1);
127 TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum);
130 * Start offset for payload checksum calculation.
131 * End offset for payload checksum calculation.
132 * Offset of place to put the checksum.
134 TXD->upper_setup.tcp_fields.tucss = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
135 TXD->upper_setup.tcp_fields.tucse = 0;
136 TXD->upper_setup.tcp_fields.tucso =
137 pi->ipi_ehdrlen + pi->ipi_ip_hlen + offsetof(struct tcphdr, th_sum);
140 * Payload size per packet w/o any headers.
141 * Length of all headers up to payload.
143 TXD->tcp_seg_setup.fields.mss = htole16(pi->ipi_tso_segsz);
144 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
146 TXD->cmd_and_length = htole32(adapter->txd_cmd |
147 E1000_TXD_CMD_DEXT | /* Extended descr */
148 E1000_TXD_CMD_TSE | /* TSE context */
149 E1000_TXD_CMD_IP | /* Do IP csum */
150 E1000_TXD_CMD_TCP | /* Do TCP checksum */
151 (pi->ipi_len - hdr_len)); /* Total len */
155 if (++cur == scctx->isc_ntxd[0]) {
158 DPRINTF(iflib_get_dev(adapter->ctx), "%s: pidx: %d cur: %d\n", __FUNCTION__, pi->ipi_pidx, cur);
162 #define TSO_WORKAROUND 4
163 #define DONT_FORCE_CTX 1
166 /*********************************************************************
167 * The offload context is protocol specific (TCP/UDP) and thus
168 * only needs to be set when the protocol changes. The occasion
169 * of a context change can be a performance detriment, and
170 * might be better just disabled. The reason arises in the way
171 * in which the controller supports pipelined requests from the
172 * Tx data DMA. Up to four requests can be pipelined, and they may
173 * belong to the same packet or to multiple packets. However all
174 * requests for one packet are issued before a request is issued
175 * for a subsequent packet and if a request for the next packet
176 * requires a context change, that request will be stalled
177 * until the previous request completes. This means setting up
178 * a new context effectively disables pipelined Tx data DMA which
179 * in turn greatly slow down performance to send small sized
181 **********************************************************************/
184 em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
186 struct e1000_context_desc *TXD = NULL;
187 if_softc_ctx_t scctx = adapter->shared;
188 struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
189 struct tx_ring *txr = &que->txr;
190 struct em_txbuffer *tx_buffer;
191 int csum_flags = pi->ipi_csum_flags;
196 hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
197 cmd = adapter->txd_cmd;
200 * The 82574L can only remember the *last* context used
201 * regardless of queue that it was use for. We cannot reuse
202 * contexts on this hardware platform and must generate a new
203 * context every time. 82574L hardware spec, section 7.2.6,
206 if (DONT_FORCE_CTX &&
207 adapter->tx_num_queues == 1 &&
208 txr->csum_lhlen == pi->ipi_ehdrlen &&
209 txr->csum_iphlen == pi->ipi_ip_hlen &&
210 txr->csum_flags == csum_flags) {
212 * Same csum offload context as the previous packets;
215 *txd_upper = txr->csum_txd_upper;
216 *txd_lower = txr->csum_txd_lower;
220 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
221 if (csum_flags & CSUM_IP) {
222 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
224 * Start offset for header checksum calculation.
225 * End offset for header checksum calculation.
226 * Offset of place to put the checksum.
228 TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen;
229 TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len);
230 TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum);
231 cmd |= E1000_TXD_CMD_IP;
234 if (csum_flags & (CSUM_TCP|CSUM_UDP)) {
237 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
238 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
240 if (csum_flags & CSUM_TCP) {
241 tucso = hdr_len + offsetof(struct tcphdr, th_sum);
242 cmd |= E1000_TXD_CMD_TCP;
244 tucso = hdr_len + offsetof(struct udphdr, uh_sum);
245 TXD->upper_setup.tcp_fields.tucss = hdr_len;
246 TXD->upper_setup.tcp_fields.tucse = htole16(0);
247 TXD->upper_setup.tcp_fields.tucso = tucso;
250 txr->csum_lhlen = pi->ipi_ehdrlen;
251 txr->csum_iphlen = pi->ipi_ip_hlen;
252 txr->csum_flags = csum_flags;
253 txr->csum_txd_upper = *txd_upper;
254 txr->csum_txd_lower = *txd_lower;
256 TXD->tcp_seg_setup.data = htole32(0);
257 TXD->cmd_and_length =
258 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
260 tx_buffer = &txr->tx_buffers[cur];
263 if (++cur == scctx->isc_ntxd[0]) {
266 DPRINTF(iflib_get_dev(adapter->ctx), "checksum_setup csum_flags=%x txd_upper=%x txd_lower=%x hdr_len=%d cmd=%x\n",
267 csum_flags, *txd_upper, *txd_lower, hdr_len, cmd);
272 em_isc_txd_encap(void *arg, if_pkt_info_t pi)
274 struct adapter *sc = arg;
275 if_softc_ctx_t scctx = sc->shared;
276 struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
277 struct tx_ring *txr = &que->txr;
278 bus_dma_segment_t *segs = pi->ipi_segs;
279 int nsegs = pi->ipi_nsegs;
280 int csum_flags = pi->ipi_csum_flags;
281 int i, j, first, pidx_last;
282 u32 txd_upper = 0, txd_lower = 0;
284 struct em_txbuffer *tx_buffer;
285 struct e1000_tx_desc *ctxd = NULL;
286 bool do_tso, tso_desc;
288 i = first = pi->ipi_pidx;
289 do_tso = (csum_flags & CSUM_TSO);
292 * TSO Hardware workaround, if this packet is not
293 * TSO, and is only a single descriptor long, and
294 * it follows a TSO burst, then we need to add a
295 * sentinel descriptor to prevent premature writeback.
297 if ((!do_tso) && (txr->tx_tso == TRUE)) {
303 /* Do hardware assists */
305 i = em_tso_setup(sc, pi, &txd_upper, &txd_lower);
307 } else if (csum_flags & EM_CSUM_OFFLOAD) {
308 i = em_transmit_checksum_setup(sc, pi, &txd_upper, &txd_lower);
311 if (pi->ipi_mflags & M_VLANTAG) {
312 /* Set the vlan id. */
313 txd_upper |= htole16(pi->ipi_vtag) << 16;
314 /* Tell hardware to add tag */
315 txd_lower |= htole32(E1000_TXD_CMD_VLE);
318 DPRINTF(iflib_get_dev(sc->ctx), "encap: set up tx: nsegs=%d first=%d i=%d\n", nsegs, first, i);
319 /* XXX adapter->pcix_82544 -- lem_fill_descriptors */
321 /* Set up our transmit descriptors */
322 for (j = 0; j < nsegs; j++) {
327 ctxd = &txr->tx_base[i];
328 tx_buffer = &txr->tx_buffers[i];
329 seg_addr = segs[j].ds_addr;
330 seg_len = segs[j].ds_len;
331 cmd = E1000_TXD_CMD_IFCS | sc->txd_cmd;
335 ** If this is the last descriptor, we want to
336 ** split it so we have a small final sentinel
338 if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) {
339 seg_len -= TSO_WORKAROUND;
340 ctxd->buffer_addr = htole64(seg_addr);
341 ctxd->lower.data = htole32(cmd | txd_lower | seg_len);
342 ctxd->upper.data = htole32(txd_upper);
344 if (++i == scctx->isc_ntxd[0])
347 /* Now make the sentinel */
348 ctxd = &txr->tx_base[i];
349 tx_buffer = &txr->tx_buffers[i];
350 ctxd->buffer_addr = htole64(seg_addr + seg_len);
351 ctxd->lower.data = htole32(cmd | txd_lower | TSO_WORKAROUND);
352 ctxd->upper.data = htole32(txd_upper);
354 if (++i == scctx->isc_ntxd[0])
356 DPRINTF(iflib_get_dev(sc->ctx), "TSO path pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]);
358 ctxd->buffer_addr = htole64(seg_addr);
359 ctxd->lower.data = htole32(cmd | txd_lower | seg_len);
360 ctxd->upper.data = htole32(txd_upper);
362 if (++i == scctx->isc_ntxd[0])
364 DPRINTF(iflib_get_dev(sc->ctx), "pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]);
370 * Last Descriptor of Packet
371 * needs End Of Packet (EOP)
372 * and Report Status (RS)
375 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
377 tx_buffer = &txr->tx_buffers[first];
378 tx_buffer->eop = pidx_last;
379 DPRINTF(iflib_get_dev(sc->ctx), "tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n", first, pidx_last, i);
380 pi->ipi_new_pidx = i;
386 em_isc_txd_flush(void *arg, uint16_t txqid, uint32_t pidx)
388 struct adapter *adapter = arg;
389 struct em_tx_queue *que = &adapter->tx_queues[txqid];
390 struct tx_ring *txr = &que->txr;
392 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx);
396 em_isc_txd_credits_update(void *arg, uint16_t txqid, uint32_t cidx_init, bool clear)
398 struct adapter *adapter = arg;
399 if_softc_ctx_t scctx = adapter->shared;
400 struct em_tx_queue *que = &adapter->tx_queues[txqid];
401 struct tx_ring *txr = &que->txr;
403 u32 cidx, processed = 0;
405 struct em_txbuffer *buf;
406 struct e1000_tx_desc *tx_desc, *eop_desc;
409 buf = &txr->tx_buffers[cidx];
410 tx_desc = &txr->tx_base[cidx];
412 eop_desc = &txr->tx_base[last];
414 DPRINTF(iflib_get_dev(adapter->ctx), "credits_update: cidx_init=%d clear=%d last=%d\n",
415 cidx_init, clear, last);
417 * What this does is get the index of the
418 * first descriptor AFTER the EOP of the
419 * first packet, that way we can do the
420 * simple comparison on the inner while loop.
422 if (++last == scctx->isc_ntxd[0])
427 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
428 /* We clean the range of the packet */
429 while (cidx != done) {
431 tx_desc->upper.data = 0;
432 tx_desc->lower.data = 0;
433 tx_desc->buffer_addr = 0;
440 /* wrap the ring ? */
441 if (++cidx == scctx->isc_ntxd[0]) {
444 buf = &txr->tx_buffers[cidx];
445 tx_desc = &txr->tx_base[cidx];
447 /* See if we can continue to the next packet */
451 eop_desc = &txr->tx_base[last];
452 /* Get new done point */
453 if (++last == scctx->isc_ntxd[0])
458 DPRINTF(iflib_get_dev(adapter->ctx), "Processed %d credits update\n", processed);
463 lem_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
464 uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused)
466 struct adapter *sc = arg;
467 if_softc_ctx_t scctx = sc->shared;
468 struct em_rx_queue *que = &sc->rx_queues[rxqid];
469 struct rx_ring *rxr = &que->rxr;
470 struct e1000_rx_desc *rxd;
474 for (i = 0, next_pidx = pidx; i < count; i++) {
475 rxd = (struct e1000_rx_desc *)&rxr->rx_base[next_pidx];
476 rxd->buffer_addr = htole64(paddrs[i]);
477 /* status bits must be cleared */
480 if (++next_pidx == scctx->isc_nrxd[0])
486 em_isc_rxd_refill(void *arg, uint16_t rxqid, uint8_t flid __unused,
487 uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs __unused, uint16_t count, uint16_t buflen __unused)
489 struct adapter *sc = arg;
490 if_softc_ctx_t scctx = sc->shared;
491 struct em_rx_queue *que = &sc->rx_queues[rxqid];
492 struct rx_ring *rxr = &que->rxr;
493 union e1000_rx_desc_extended *rxd;
497 for (i = 0, next_pidx = pidx; i < count; i++) {
498 rxd = &rxr->rx_base[next_pidx];
499 rxd->read.buffer_addr = htole64(paddrs[i]);
500 /* DD bits must be cleared */
501 rxd->wb.upper.status_error = 0;
503 if (++next_pidx == scctx->isc_nrxd[0])
509 em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, uint32_t pidx)
511 struct adapter *sc = arg;
512 struct em_rx_queue *que = &sc->rx_queues[rxqid];
513 struct rx_ring *rxr = &que->rxr;
515 E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
519 lem_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
521 struct adapter *sc = arg;
522 if_softc_ctx_t scctx = sc->shared;
523 struct em_rx_queue *que = &sc->rx_queues[rxqid];
524 struct rx_ring *rxr = &que->rxr;
525 struct e1000_rx_desc *rxd;
529 for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
530 rxd = (struct e1000_rx_desc *)&rxr->rx_base[i];
531 staterr = rxd->status;
533 if ((staterr & E1000_RXD_STAT_DD) == 0)
536 if (++i == scctx->isc_nrxd[0])
539 if (staterr & E1000_RXD_STAT_EOP)
546 em_isc_rxd_available(void *arg, uint16_t rxqid, uint32_t idx, int budget)
548 struct adapter *sc = arg;
549 if_softc_ctx_t scctx = sc->shared;
550 struct em_rx_queue *que = &sc->rx_queues[rxqid];
551 struct rx_ring *rxr = &que->rxr;
552 union e1000_rx_desc_extended *rxd;
556 for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
557 rxd = &rxr->rx_base[i];
558 staterr = le32toh(rxd->wb.upper.status_error);
560 if ((staterr & E1000_RXD_STAT_DD) == 0)
563 if (++i == scctx->isc_nrxd[0]) {
567 if (staterr & E1000_RXD_STAT_EOP)
575 lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
577 struct adapter *adapter = arg;
578 if_softc_ctx_t scctx = adapter->shared;
579 struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
580 struct rx_ring *rxr = &que->rxr;
581 struct e1000_rx_desc *rxd;
587 status = errors = i = 0;
591 rxd = (struct e1000_rx_desc *)&rxr->rx_base[cidx];
592 status = rxd->status;
593 errors = rxd->errors;
595 /* Error Checking then decrement count */
596 MPASS ((status & E1000_RXD_STAT_DD) != 0);
598 len = le16toh(rxd->length);
601 eop = (status & E1000_RXD_STAT_EOP) != 0;
603 /* Make sure bad packets are discarded */
604 if (errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
605 adapter->dropped_pkts++;
606 /* XXX fixup if common */
610 ri->iri_frags[i].irf_flid = 0;
611 ri->iri_frags[i].irf_idx = cidx;
612 ri->iri_frags[i].irf_len = len;
613 /* Zero out the receive descriptors status. */
616 if (++cidx == scctx->isc_nrxd[0])
621 /* XXX add a faster way to look this up */
622 if (adapter->hw.mac.type >= e1000_82543 && !(status & E1000_RXD_STAT_IXSM))
623 lem_receive_checksum(status, errors, ri);
625 if (status & E1000_RXD_STAT_VP) {
626 ri->iri_vtag = le16toh(rxd->special);
627 ri->iri_flags |= M_VLANTAG;
636 em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
638 struct adapter *adapter = arg;
639 if_softc_ctx_t scctx = adapter->shared;
640 struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
641 struct rx_ring *rxr = &que->rxr;
642 union e1000_rx_desc_extended *rxd;
653 rxd = &rxr->rx_base[cidx];
654 staterr = le32toh(rxd->wb.upper.status_error);
656 /* Error Checking then decrement count */
657 MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
659 len = le16toh(rxd->wb.upper.length);
662 eop = (staterr & E1000_RXD_STAT_EOP) != 0;
664 /* Make sure bad packets are discarded */
665 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
666 adapter->dropped_pkts++;
670 ri->iri_frags[i].irf_flid = 0;
671 ri->iri_frags[i].irf_idx = cidx;
672 ri->iri_frags[i].irf_len = len;
673 /* Zero out the receive descriptors status. */
674 rxd->wb.upper.status_error &= htole32(~0xFF);
676 if (++cidx == scctx->isc_nrxd[0])
681 /* XXX add a faster way to look this up */
682 if (adapter->hw.mac.type >= e1000_82543)
683 em_receive_checksum(staterr, ri);
685 if (staterr & E1000_RXD_STAT_VP) {
686 vtag = le16toh(rxd->wb.upper.vlan);
692 ri->iri_flags |= M_VLANTAG;
697 /*********************************************************************
699 * Verify that the hardware indicated that the checksum is valid.
700 * Inform the stack about the status of checksum so that stack
701 * doesn't spend time verifying the checksum.
703 *********************************************************************/
705 lem_receive_checksum(int status, int errors, if_rxd_info_t ri)
708 if (status & E1000_RXD_STAT_IPCS && !(errors & E1000_RXD_ERR_IPE))
709 ri->iri_csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
711 if (status & E1000_RXD_STAT_TCPCS) {
713 if (!(errors & E1000_RXD_ERR_TCPE)) {
714 ri->iri_csum_flags |=
715 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
716 ri->iri_csum_data = htons(0xffff);
722 em_receive_checksum(uint32_t status, if_rxd_info_t ri)
724 ri->iri_csum_flags = 0;
726 /* Ignore Checksum bit is set */
727 if (status & E1000_RXD_STAT_IXSM)
730 /* If the IP checksum exists and there is no IP Checksum error */
731 if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
732 E1000_RXD_STAT_IPCS) {
733 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
736 /* TCP or UDP checksum */
737 if ((status & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
738 E1000_RXD_STAT_TCPCS) {
739 ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
740 ri->iri_csum_data = htons(0xffff);
742 if (status & E1000_RXD_STAT_UDPCS) {
743 ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
744 ri->iri_csum_data = htons(0xffff);