2 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
3 * Copyright (c) 2017 Matthew Macy <mmacy@mattmacy.io>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <net/rss_config.h>
33 #include <netinet/in_rss.h>
37 #define DPRINTF device_printf
42 /*********************************************************************
43 * Local Function prototypes
44 *********************************************************************/
45 static int em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper,
47 static int em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi,
48 u32 *txd_upper, u32 *txd_lower);
49 static int em_isc_txd_encap(void *arg, if_pkt_info_t pi);
50 static void em_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
51 static int em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
52 static void em_isc_rxd_refill(void *arg, if_rxd_update_t iru);
53 static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
55 static int em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
57 static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
59 static void lem_isc_rxd_refill(void *arg, if_rxd_update_t iru);
61 static int lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
63 static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
65 static void lem_receive_checksum(int status, int errors, if_rxd_info_t ri);
66 static void em_receive_checksum(uint32_t status, if_rxd_info_t ri);
67 static int em_determine_rsstype(u32 pkt_info);
68 extern int em_intr(void *arg);
70 struct if_txrx em_txrx = {
71 .ift_txd_encap = em_isc_txd_encap,
72 .ift_txd_flush = em_isc_txd_flush,
73 .ift_txd_credits_update = em_isc_txd_credits_update,
74 .ift_rxd_available = em_isc_rxd_available,
75 .ift_rxd_pkt_get = em_isc_rxd_pkt_get,
76 .ift_rxd_refill = em_isc_rxd_refill,
77 .ift_rxd_flush = em_isc_rxd_flush,
78 .ift_legacy_intr = em_intr
81 struct if_txrx lem_txrx = {
82 .ift_txd_encap = em_isc_txd_encap,
83 .ift_txd_flush = em_isc_txd_flush,
84 .ift_txd_credits_update = em_isc_txd_credits_update,
85 .ift_rxd_available = lem_isc_rxd_available,
86 .ift_rxd_pkt_get = lem_isc_rxd_pkt_get,
87 .ift_rxd_refill = lem_isc_rxd_refill,
88 .ift_rxd_flush = em_isc_rxd_flush,
89 .ift_legacy_intr = em_intr
92 extern if_shared_ctx_t em_sctx;
95 em_dump_rs(struct adapter *adapter)
97 if_softc_ctx_t scctx = adapter->shared;
98 struct em_tx_queue *que;
100 qidx_t i, ntxd, qid, cur;
105 ntxd = scctx->isc_ntxd[0];
106 for (qid = 0; qid < adapter->tx_num_queues; qid++) {
107 que = &adapter->tx_queues[qid];
109 rs_cidx = txr->tx_rs_cidx;
110 if (rs_cidx != txr->tx_rs_pidx) {
111 cur = txr->tx_rsq[rs_cidx];
112 status = txr->tx_base[cur].upper.fields.status;
113 if (!(status & E1000_TXD_STAT_DD))
114 printf("qid[%d]->tx_rsq[%d]: %d clear ", qid, rs_cidx, cur);
116 rs_cidx = (rs_cidx-1)&(ntxd-1);
117 cur = txr->tx_rsq[rs_cidx];
118 printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", qid, rs_cidx, cur);
120 printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed, txr->tx_rs_pidx);
121 for (i = 0; i < ntxd; i++) {
122 if (txr->tx_base[i].upper.fields.status & E1000_TXD_STAT_DD)
123 printf("%d set ", i);
129 /**********************************************************************
131 * Setup work for hardware segmentation offload (TSO) on
132 * adapters using advanced tx descriptors
134 **********************************************************************/
136 em_tso_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
138 if_softc_ctx_t scctx = adapter->shared;
139 struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
140 struct tx_ring *txr = &que->txr;
141 struct e1000_context_desc *TXD;
144 hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
145 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
146 E1000_TXD_DTYP_D | /* Data descr type */
147 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
149 /* IP and/or TCP header checksum calculation and insertion. */
150 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
153 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
156 * Start offset for header checksum calculation.
157 * End offset for header checksum calculation.
158 * Offset of place put the checksum.
160 TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen;
161 TXD->lower_setup.ip_fields.ipcse =
162 htole16(pi->ipi_ehdrlen + pi->ipi_ip_hlen - 1);
163 TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum);
166 * Start offset for payload checksum calculation.
167 * End offset for payload checksum calculation.
168 * Offset of place to put the checksum.
170 TXD->upper_setup.tcp_fields.tucss = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
171 TXD->upper_setup.tcp_fields.tucse = 0;
172 TXD->upper_setup.tcp_fields.tucso =
173 pi->ipi_ehdrlen + pi->ipi_ip_hlen + offsetof(struct tcphdr, th_sum);
176 * Payload size per packet w/o any headers.
177 * Length of all headers up to payload.
179 TXD->tcp_seg_setup.fields.mss = htole16(pi->ipi_tso_segsz);
180 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
182 TXD->cmd_and_length = htole32(adapter->txd_cmd |
183 E1000_TXD_CMD_DEXT | /* Extended descr */
184 E1000_TXD_CMD_TSE | /* TSE context */
185 E1000_TXD_CMD_IP | /* Do IP csum */
186 E1000_TXD_CMD_TCP | /* Do TCP checksum */
187 (pi->ipi_len - hdr_len)); /* Total len */
190 if (++cur == scctx->isc_ntxd[0]) {
193 DPRINTF(iflib_get_dev(adapter->ctx), "%s: pidx: %d cur: %d\n", __FUNCTION__, pi->ipi_pidx, cur);
197 #define TSO_WORKAROUND 4
198 #define DONT_FORCE_CTX 1
201 /*********************************************************************
202 * The offload context is protocol specific (TCP/UDP) and thus
203 * only needs to be set when the protocol changes. The occasion
204 * of a context change can be a performance detriment, and
205 * might be better just disabled. The reason arises in the way
206 * in which the controller supports pipelined requests from the
207 * Tx data DMA. Up to four requests can be pipelined, and they may
208 * belong to the same packet or to multiple packets. However all
209 * requests for one packet are issued before a request is issued
210 * for a subsequent packet and if a request for the next packet
211 * requires a context change, that request will be stalled
212 * until the previous request completes. This means setting up
213 * a new context effectively disables pipelined Tx data DMA which
214 * in turn greatly slow down performance to send small sized
216 **********************************************************************/
219 em_transmit_checksum_setup(struct adapter *adapter, if_pkt_info_t pi, u32 *txd_upper, u32 *txd_lower)
221 struct e1000_context_desc *TXD = NULL;
222 if_softc_ctx_t scctx = adapter->shared;
223 struct em_tx_queue *que = &adapter->tx_queues[pi->ipi_qsidx];
224 struct tx_ring *txr = &que->txr;
225 int csum_flags = pi->ipi_csum_flags;
230 hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
231 cmd = adapter->txd_cmd;
234 * The 82574L can only remember the *last* context used
235 * regardless of queue that it was use for. We cannot reuse
236 * contexts on this hardware platform and must generate a new
237 * context every time. 82574L hardware spec, section 7.2.6,
240 if (DONT_FORCE_CTX &&
241 adapter->tx_num_queues == 1 &&
242 txr->csum_lhlen == pi->ipi_ehdrlen &&
243 txr->csum_iphlen == pi->ipi_ip_hlen &&
244 txr->csum_flags == csum_flags) {
246 * Same csum offload context as the previous packets;
249 *txd_upper = txr->csum_txd_upper;
250 *txd_lower = txr->csum_txd_lower;
254 TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
255 if (csum_flags & CSUM_IP) {
256 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
258 * Start offset for header checksum calculation.
259 * End offset for header checksum calculation.
260 * Offset of place to put the checksum.
262 TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen;
263 TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len);
264 TXD->lower_setup.ip_fields.ipcso = pi->ipi_ehdrlen + offsetof(struct ip, ip_sum);
265 cmd |= E1000_TXD_CMD_IP;
268 if (csum_flags & (CSUM_TCP|CSUM_UDP)) {
271 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
272 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
274 if (csum_flags & CSUM_TCP) {
275 tucso = hdr_len + offsetof(struct tcphdr, th_sum);
276 cmd |= E1000_TXD_CMD_TCP;
278 tucso = hdr_len + offsetof(struct udphdr, uh_sum);
279 TXD->upper_setup.tcp_fields.tucss = hdr_len;
280 TXD->upper_setup.tcp_fields.tucse = htole16(0);
281 TXD->upper_setup.tcp_fields.tucso = tucso;
284 txr->csum_lhlen = pi->ipi_ehdrlen;
285 txr->csum_iphlen = pi->ipi_ip_hlen;
286 txr->csum_flags = csum_flags;
287 txr->csum_txd_upper = *txd_upper;
288 txr->csum_txd_lower = *txd_lower;
290 TXD->tcp_seg_setup.data = htole32(0);
291 TXD->cmd_and_length =
292 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
294 if (++cur == scctx->isc_ntxd[0]) {
297 DPRINTF(iflib_get_dev(adapter->ctx), "checksum_setup csum_flags=%x txd_upper=%x txd_lower=%x hdr_len=%d cmd=%x\n",
298 csum_flags, *txd_upper, *txd_lower, hdr_len, cmd);
303 em_isc_txd_encap(void *arg, if_pkt_info_t pi)
305 struct adapter *sc = arg;
306 if_softc_ctx_t scctx = sc->shared;
307 struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
308 struct tx_ring *txr = &que->txr;
309 bus_dma_segment_t *segs = pi->ipi_segs;
310 int nsegs = pi->ipi_nsegs;
311 int csum_flags = pi->ipi_csum_flags;
312 int i, j, first, pidx_last;
313 u32 txd_flags, txd_upper = 0, txd_lower = 0;
315 struct e1000_tx_desc *ctxd = NULL;
316 bool do_tso, tso_desc;
319 txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_TXD_CMD_RS : 0;
320 i = first = pi->ipi_pidx;
321 do_tso = (csum_flags & CSUM_TSO);
323 ntxd = scctx->isc_ntxd[0];
325 * TSO Hardware workaround, if this packet is not
326 * TSO, and is only a single descriptor long, and
327 * it follows a TSO burst, then we need to add a
328 * sentinel descriptor to prevent premature writeback.
330 if ((!do_tso) && (txr->tx_tso == TRUE)) {
336 /* Do hardware assists */
338 i = em_tso_setup(sc, pi, &txd_upper, &txd_lower);
340 } else if (csum_flags & EM_CSUM_OFFLOAD) {
341 i = em_transmit_checksum_setup(sc, pi, &txd_upper, &txd_lower);
344 if (pi->ipi_mflags & M_VLANTAG) {
345 /* Set the vlan id. */
346 txd_upper |= htole16(pi->ipi_vtag) << 16;
347 /* Tell hardware to add tag */
348 txd_lower |= htole32(E1000_TXD_CMD_VLE);
351 DPRINTF(iflib_get_dev(sc->ctx), "encap: set up tx: nsegs=%d first=%d i=%d\n", nsegs, first, i);
352 /* XXX adapter->pcix_82544 -- lem_fill_descriptors */
354 /* Set up our transmit descriptors */
355 for (j = 0; j < nsegs; j++) {
360 ctxd = &txr->tx_base[i];
361 seg_addr = segs[j].ds_addr;
362 seg_len = segs[j].ds_len;
363 cmd = E1000_TXD_CMD_IFCS | sc->txd_cmd;
367 * If this is the last descriptor, we want to
368 * split it so we have a small final sentinel
370 if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) {
371 seg_len -= TSO_WORKAROUND;
372 ctxd->buffer_addr = htole64(seg_addr);
373 ctxd->lower.data = htole32(cmd | txd_lower | seg_len);
374 ctxd->upper.data = htole32(txd_upper);
376 if (++i == scctx->isc_ntxd[0])
379 /* Now make the sentinel */
380 ctxd = &txr->tx_base[i];
381 ctxd->buffer_addr = htole64(seg_addr + seg_len);
382 ctxd->lower.data = htole32(cmd | txd_lower | TSO_WORKAROUND);
383 ctxd->upper.data = htole32(txd_upper);
385 if (++i == scctx->isc_ntxd[0])
387 DPRINTF(iflib_get_dev(sc->ctx), "TSO path pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]);
389 ctxd->buffer_addr = htole64(seg_addr);
390 ctxd->lower.data = htole32(cmd | txd_lower | seg_len);
391 ctxd->upper.data = htole32(txd_upper);
393 if (++i == scctx->isc_ntxd[0])
395 DPRINTF(iflib_get_dev(sc->ctx), "pidx_last=%d i=%d ntxd[0]=%d\n", pidx_last, i, scctx->isc_ntxd[0]);
400 * Last Descriptor of Packet
401 * needs End Of Packet (EOP)
402 * and Report Status (RS)
404 if (txd_flags && nsegs) {
405 txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
406 DPRINTF(iflib_get_dev(sc->ctx), "setting to RS on %d rs_pidx %d first: %d\n", pidx_last, txr->tx_rs_pidx, first);
407 txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1);
408 MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
410 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | txd_flags);
411 DPRINTF(iflib_get_dev(sc->ctx), "tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n", first, pidx_last, i);
412 pi->ipi_new_pidx = i;
418 em_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
420 struct adapter *adapter = arg;
421 struct em_tx_queue *que = &adapter->tx_queues[txqid];
422 struct tx_ring *txr = &que->txr;
424 E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), pidx);
428 em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
430 struct adapter *adapter = arg;
431 if_softc_ctx_t scctx = adapter->shared;
432 struct em_tx_queue *que = &adapter->tx_queues[txqid];
433 struct tx_ring *txr = &que->txr;
435 qidx_t processed = 0;
437 qidx_t cur, prev, ntxd, rs_cidx;
441 rs_cidx = txr->tx_rs_cidx;
442 if (rs_cidx == txr->tx_rs_pidx)
444 cur = txr->tx_rsq[rs_cidx];
445 MPASS(cur != QIDX_INVALID);
446 status = txr->tx_base[cur].upper.fields.status;
447 updated = !!(status & E1000_TXD_STAT_DD);
449 if (clear == false || updated == 0)
452 prev = txr->tx_cidx_processed;
453 ntxd = scctx->isc_ntxd[0];
455 delta = (int32_t)cur - (int32_t)prev;
456 MPASS(prev == 0 || delta != 0);
459 DPRINTF(iflib_get_dev(adapter->ctx),
460 "%s: cidx_processed=%u cur=%u clear=%d delta=%d\n",
461 __FUNCTION__, prev, cur, clear, delta);
465 rs_cidx = (rs_cidx + 1) & (ntxd-1);
466 if (rs_cidx == txr->tx_rs_pidx)
468 cur = txr->tx_rsq[rs_cidx];
469 MPASS(cur != QIDX_INVALID);
470 status = txr->tx_base[cur].upper.fields.status;
471 } while ((status & E1000_TXD_STAT_DD));
473 txr->tx_rs_cidx = rs_cidx;
474 txr->tx_cidx_processed = prev;
479 lem_isc_rxd_refill(void *arg, if_rxd_update_t iru)
481 struct adapter *sc = arg;
482 if_softc_ctx_t scctx = sc->shared;
483 struct em_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
484 struct rx_ring *rxr = &que->rxr;
485 struct e1000_rx_desc *rxd;
487 uint32_t next_pidx, pidx;
491 paddrs = iru->iru_paddrs;
492 pidx = iru->iru_pidx;
493 count = iru->iru_count;
495 for (i = 0, next_pidx = pidx; i < count; i++) {
496 rxd = (struct e1000_rx_desc *)&rxr->rx_base[next_pidx];
497 rxd->buffer_addr = htole64(paddrs[i]);
498 /* status bits must be cleared */
501 if (++next_pidx == scctx->isc_nrxd[0])
507 em_isc_rxd_refill(void *arg, if_rxd_update_t iru)
509 struct adapter *sc = arg;
510 if_softc_ctx_t scctx = sc->shared;
511 uint16_t rxqid = iru->iru_qsidx;
512 struct em_rx_queue *que = &sc->rx_queues[rxqid];
513 struct rx_ring *rxr = &que->rxr;
514 union e1000_rx_desc_extended *rxd;
516 uint32_t next_pidx, pidx;
520 paddrs = iru->iru_paddrs;
521 pidx = iru->iru_pidx;
522 count = iru->iru_count;
524 for (i = 0, next_pidx = pidx; i < count; i++) {
525 rxd = &rxr->rx_base[next_pidx];
526 rxd->read.buffer_addr = htole64(paddrs[i]);
527 /* DD bits must be cleared */
528 rxd->wb.upper.status_error = 0;
530 if (++next_pidx == scctx->isc_nrxd[0])
536 em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
538 struct adapter *sc = arg;
539 struct em_rx_queue *que = &sc->rx_queues[rxqid];
540 struct rx_ring *rxr = &que->rxr;
542 E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
546 lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
548 struct adapter *sc = arg;
549 if_softc_ctx_t scctx = sc->shared;
550 struct em_rx_queue *que = &sc->rx_queues[rxqid];
551 struct rx_ring *rxr = &que->rxr;
552 struct e1000_rx_desc *rxd;
557 rxd = (struct e1000_rx_desc *)&rxr->rx_base[idx];
558 staterr = rxd->status;
559 return (staterr & E1000_RXD_STAT_DD);
562 for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
563 rxd = (struct e1000_rx_desc *)&rxr->rx_base[i];
564 staterr = rxd->status;
566 if ((staterr & E1000_RXD_STAT_DD) == 0)
569 if (++i == scctx->isc_nrxd[0])
572 if (staterr & E1000_RXD_STAT_EOP)
579 em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
581 struct adapter *sc = arg;
582 if_softc_ctx_t scctx = sc->shared;
583 struct em_rx_queue *que = &sc->rx_queues[rxqid];
584 struct rx_ring *rxr = &que->rxr;
585 union e1000_rx_desc_extended *rxd;
590 rxd = &rxr->rx_base[idx];
591 staterr = le32toh(rxd->wb.upper.status_error);
592 return (staterr & E1000_RXD_STAT_DD);
595 for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
596 rxd = &rxr->rx_base[i];
597 staterr = le32toh(rxd->wb.upper.status_error);
599 if ((staterr & E1000_RXD_STAT_DD) == 0)
602 if (++i == scctx->isc_nrxd[0]) {
606 if (staterr & E1000_RXD_STAT_EOP)
614 lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
616 struct adapter *adapter = arg;
617 if_softc_ctx_t scctx = adapter->shared;
618 struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
619 struct rx_ring *rxr = &que->rxr;
620 struct e1000_rx_desc *rxd;
626 status = errors = i = 0;
630 rxd = (struct e1000_rx_desc *)&rxr->rx_base[cidx];
631 status = rxd->status;
632 errors = rxd->errors;
634 /* Error Checking then decrement count */
635 MPASS ((status & E1000_RXD_STAT_DD) != 0);
637 len = le16toh(rxd->length);
640 eop = (status & E1000_RXD_STAT_EOP) != 0;
642 /* Make sure bad packets are discarded */
643 if (errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
644 adapter->dropped_pkts++;
645 /* XXX fixup if common */
649 ri->iri_frags[i].irf_flid = 0;
650 ri->iri_frags[i].irf_idx = cidx;
651 ri->iri_frags[i].irf_len = len;
652 /* Zero out the receive descriptors status. */
655 if (++cidx == scctx->isc_nrxd[0])
660 /* XXX add a faster way to look this up */
661 if (adapter->hw.mac.type >= e1000_82543 && !(status & E1000_RXD_STAT_IXSM))
662 lem_receive_checksum(status, errors, ri);
664 if (status & E1000_RXD_STAT_VP) {
665 ri->iri_vtag = le16toh(rxd->special);
666 ri->iri_flags |= M_VLANTAG;
675 em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
677 struct adapter *adapter = arg;
678 if_softc_ctx_t scctx = adapter->shared;
679 struct em_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
680 struct rx_ring *rxr = &que->rxr;
681 union e1000_rx_desc_extended *rxd;
693 rxd = &rxr->rx_base[cidx];
694 staterr = le32toh(rxd->wb.upper.status_error);
695 pkt_info = le32toh(rxd->wb.lower.mrq);
697 /* Error Checking then decrement count */
698 MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
700 len = le16toh(rxd->wb.upper.length);
703 eop = (staterr & E1000_RXD_STAT_EOP) != 0;
705 /* Make sure bad packets are discarded */
706 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
707 adapter->dropped_pkts++;
711 ri->iri_frags[i].irf_flid = 0;
712 ri->iri_frags[i].irf_idx = cidx;
713 ri->iri_frags[i].irf_len = len;
714 /* Zero out the receive descriptors status. */
715 rxd->wb.upper.status_error &= htole32(~0xFF);
717 if (++cidx == scctx->isc_nrxd[0])
722 /* XXX add a faster way to look this up */
723 if (adapter->hw.mac.type >= e1000_82543)
724 em_receive_checksum(staterr, ri);
726 if (staterr & E1000_RXD_STAT_VP) {
727 vtag = le16toh(rxd->wb.upper.vlan);
732 ri->iri_flags |= M_VLANTAG;
734 ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss);
735 ri->iri_rsstype = em_determine_rsstype(pkt_info);
741 /*********************************************************************
743 * Verify that the hardware indicated that the checksum is valid.
744 * Inform the stack about the status of checksum so that stack
745 * doesn't spend time verifying the checksum.
747 *********************************************************************/
749 lem_receive_checksum(int status, int errors, if_rxd_info_t ri)
752 if (status & E1000_RXD_STAT_IPCS && !(errors & E1000_RXD_ERR_IPE))
753 ri->iri_csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
755 if (status & E1000_RXD_STAT_TCPCS) {
757 if (!(errors & E1000_RXD_ERR_TCPE)) {
758 ri->iri_csum_flags |=
759 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
760 ri->iri_csum_data = htons(0xffff);
765 /********************************************************************
767 * Parse the packet type to determine the appropriate hash
769 ******************************************************************/
771 em_determine_rsstype(u32 pkt_info)
773 switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
774 case E1000_RXDADV_RSSTYPE_IPV4_TCP:
775 return M_HASHTYPE_RSS_TCP_IPV4;
776 case E1000_RXDADV_RSSTYPE_IPV4:
777 return M_HASHTYPE_RSS_IPV4;
778 case E1000_RXDADV_RSSTYPE_IPV6_TCP:
779 return M_HASHTYPE_RSS_TCP_IPV6;
780 case E1000_RXDADV_RSSTYPE_IPV6_EX:
781 return M_HASHTYPE_RSS_IPV6_EX;
782 case E1000_RXDADV_RSSTYPE_IPV6:
783 return M_HASHTYPE_RSS_IPV6;
784 case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX:
785 return M_HASHTYPE_RSS_TCP_IPV6_EX;
787 return M_HASHTYPE_OPAQUE;
792 em_receive_checksum(uint32_t status, if_rxd_info_t ri)
794 ri->iri_csum_flags = 0;
796 /* Ignore Checksum bit is set */
797 if (status & E1000_RXD_STAT_IXSM)
800 /* If the IP checksum exists and there is no IP Checksum error */
801 if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
802 E1000_RXD_STAT_IPCS) {
803 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
806 /* TCP or UDP checksum */
807 if ((status & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
808 E1000_RXD_STAT_TCPCS) {
809 ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
810 ri->iri_csum_data = htons(0xffff);
812 if (status & E1000_RXD_STAT_UDPCS) {
813 ri->iri_csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
814 ri->iri_csum_data = htons(0xffff);