2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2020 Advanced Micro Devices, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Contact Information :
28 * Rajesh Kumar <rajesh1.kumar@amd.com>
29 * Shreyank Amartya <Shreyank.Amartya@amd.com>
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include "xgbe-common.h"
42 static int axgbe_isc_txd_encap(void *, if_pkt_info_t);
43 static void axgbe_isc_txd_flush(void *, uint16_t, qidx_t);
44 static int axgbe_isc_txd_credits_update(void *, uint16_t, bool);
45 static void axgbe_isc_rxd_refill(void *, if_rxd_update_t);
46 static void axgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
47 static int axgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
48 static int axgbe_isc_rxd_pkt_get(void *, if_rxd_info_t);
50 struct if_txrx axgbe_txrx = {
51 .ift_txd_encap = axgbe_isc_txd_encap,
52 .ift_txd_flush = axgbe_isc_txd_flush,
53 .ift_txd_credits_update = axgbe_isc_txd_credits_update,
54 .ift_rxd_available = axgbe_isc_rxd_available,
55 .ift_rxd_pkt_get = axgbe_isc_rxd_pkt_get,
56 .ift_rxd_refill = axgbe_isc_rxd_refill,
57 .ift_rxd_flush = axgbe_isc_rxd_flush,
58 .ift_legacy_intr = NULL
62 xgbe_print_pkt_info(struct xgbe_prv_data *pdata, if_pkt_info_t pi)
65 axgbe_printf(1, "------Packet Info Start------\n");
66 axgbe_printf(1, "pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n",
67 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx);
68 axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n",
69 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag);
70 axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n",
71 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto);
72 axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n",
73 pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz);
77 axgbe_ctx_desc_setup(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
80 struct xgbe_ring_desc *rdesc;
81 struct xgbe_ring_data *rdata;
84 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
87 axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n",
88 pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur);
90 axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n",
91 pi->ipi_vtag, ring->tx.cur_vlan_ctag);
93 if ((pi->ipi_csum_flags & CSUM_TSO) &&
94 (pi->ipi_tso_segsz != ring->tx.cur_mss)) {
96 * Set TSO maximum segment size
97 * Mark as context descriptor
98 * Indicate this descriptor contains MSS
100 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
101 MSS, pi->ipi_tso_segsz);
102 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
103 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1);
104 ring->tx.cur_mss = pi->ipi_tso_segsz;
108 if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) {
110 * Mark it as context descriptor
112 * Indicate this descriptor contains the VLAN tag
114 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1);
115 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
117 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1);
118 ring->tx.cur_vlan_ctag = pi->ipi_vtag;
126 axgbe_calculate_tx_parms(struct xgbe_prv_data *pdata, if_pkt_info_t pi,
127 struct xgbe_packet_data *packet)
129 uint32_t tcp_payload_len = 0, bytes = 0;
130 uint16_t max_len, hlen, payload_len, pkts = 0;
132 packet->tx_packets = packet->tx_bytes = 0;
134 hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
135 if (pi->ipi_csum_flags & CSUM_TSO) {
137 tcp_payload_len = pi->ipi_len - hlen;
138 axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n",
139 __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen,
142 max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
144 max_len += VLAN_HLEN;
146 while (tcp_payload_len) {
148 payload_len = max_len - hlen;
149 payload_len = min(payload_len, tcp_payload_len);
150 tcp_payload_len -= payload_len;
152 bytes += (hlen + payload_len);
153 axgbe_printf(1, "%s: max_len %d payload_len %d "
154 "tcp_len %d\n", __func__, max_len, payload_len,
162 packet->tx_packets = pkts;
163 packet->tx_bytes = bytes;
165 axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__,
166 packet->tx_packets, packet->tx_bytes, hlen);
172 axgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
174 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
175 struct xgbe_prv_data *pdata = &sc->pdata;
176 struct xgbe_channel *channel;
177 struct xgbe_ring *ring;
178 struct xgbe_ring_desc *rdesc;
179 struct xgbe_ring_data *rdata;
180 struct xgbe_packet_data *packet;
181 unsigned int cur, start, tx_set_ic;
182 uint16_t offset, hlen, datalen, tcp_payload_len = 0;
185 xgbe_print_pkt_info(pdata, pi);
187 channel = pdata->channel[pi->ipi_qsidx];
188 ring = channel->tx_ring;
189 packet = &ring->packet_data;
190 cur = start = ring->cur;
192 axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n",
193 __func__, pi->ipi_qsidx, ring->cur, ring->dirty);
195 MPASS(pi->ipi_len != 0);
196 if (__predict_false(pi->ipi_len == 0)) {
197 axgbe_error("empty packet received from stack\n");
201 MPASS(ring->cur == pi->ipi_pidx);
202 if (__predict_false(ring->cur != pi->ipi_pidx)) {
203 axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
204 ring->cur, pi->ipi_pidx);
207 /* Determine if an interrupt should be generated for this Tx:
209 * - Tx frame count exceeds the frame count setting
210 * - Addition of Tx frame count to the frame count since the
211 * last interrupt was set exceeds the frame count setting
213 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
214 * - Addition of Tx frame count to the frame count since the
215 * last interrupt was set does not exceed the frame count setting
217 memset(packet, 0, sizeof(*packet));
218 hlen = axgbe_calculate_tx_parms(pdata, pi, packet);
219 axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n",
220 __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen);
222 ring->coalesce_count += packet->tx_packets;
223 if (!pdata->tx_frames)
225 else if (packet->tx_packets > pdata->tx_frames)
227 else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets))
232 /* Add Context descriptor if needed (for TSO, VLAN cases) */
233 if (axgbe_ctx_desc_setup(pdata, ring, pi))
236 rdata = XGBE_GET_DESC_DATA(ring, cur);
237 rdesc = rdata->rdesc;
239 axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
240 "ipi_len 0x%x\n", __func__, cur,
241 lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
242 upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
243 (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
245 /* Update buffer address (for TSO this is the header) */
246 rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr));
247 rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr));
249 /* Update the buffer length */
251 hlen = pi->ipi_segs[cur_seg].ds_len;
252 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen);
254 /* VLAN tag insertion check */
256 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
257 TX_NORMAL_DESC2_VLAN_INSERT);
260 /* Mark it as First Descriptor */
261 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
263 /* Mark it as a NORMAL descriptor */
264 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
267 * Set the OWN bit if this is not the first descriptor. For first
268 * descriptor, OWN bit will be set at last so that hardware will
269 * process the descriptors only after the OWN bit for the first
273 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
275 if (pi->ipi_csum_flags & CSUM_TSO) {
277 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
279 tcp_payload_len = pi->ipi_len - hlen;
281 /* Set TCP payload length*/
282 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
285 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
288 axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len,
291 /* Enable CRC and Pad Insertion */
292 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
295 if (pi->ipi_csum_flags)
296 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
298 /* Set total length to be transmitted */
299 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len);
304 for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) {
308 datalen = pi->ipi_segs[cur_seg].ds_len - hlen;
311 datalen = pi->ipi_segs[cur_seg].ds_len;
315 rdata = XGBE_GET_DESC_DATA(ring, cur);
316 rdesc = rdata->rdesc;
319 /* Update buffer address */
321 cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
323 cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset));
325 /* Update the buffer length */
326 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen);
329 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
331 /* Mark it as NORMAL descriptor */
332 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
335 if (pi->ipi_csum_flags)
336 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
338 axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x "
339 "ipi_len 0x%x\n", __func__, cur,
340 lower_32_bits(pi->ipi_segs[cur_seg].ds_addr),
341 upper_32_bits(pi->ipi_segs[cur_seg].ds_addr),
342 (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len);
348 /* Set LAST bit for the last descriptor */
349 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
351 /* Set IC bit based on Tx coalescing settings */
353 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
357 /* Set OWN bit for the first descriptor */
358 rdata = XGBE_GET_DESC_DATA(ring, start);
359 rdesc = rdata->rdesc;
360 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
362 ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1));
364 axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur,
371 axgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
373 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
374 struct xgbe_prv_data *pdata = &sc->pdata;
375 struct xgbe_channel *channel = pdata->channel[txqid];
376 struct xgbe_ring *ring = channel->tx_ring;
377 struct xgbe_ring_data *rdata = XGBE_GET_DESC_DATA(ring, pidx);
379 axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n",
380 __func__, txqid, pidx, ring->cur, ring->dirty);
382 MPASS(ring->cur == pidx);
383 if (__predict_false(ring->cur != pidx)) {
384 axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__,
391 if (XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO) !=
392 lower_32_bits(rdata->rdata_paddr)) {
393 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
394 lower_32_bits(rdata->rdata_paddr));
399 axgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
401 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
402 struct xgbe_hw_if *hw_if = &sc->pdata.hw_if;
403 struct xgbe_prv_data *pdata = &sc->pdata;
404 struct xgbe_channel *channel = pdata->channel[txqid];
405 struct xgbe_ring *ring = channel->tx_ring;
406 struct xgbe_ring_data *rdata;
409 axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n",
410 __func__, txqid, clear, ring->cur, ring->dirty);
412 if (__predict_false(ring->cur == ring->dirty)) {
413 axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n",
414 __func__, ring->cur, ring->dirty);
418 /* Check whether the first dirty descriptor is Tx complete */
419 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
420 if (!hw_if->tx_complete(rdata->rdesc)) {
421 axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty);
426 * If clear is false just let the caller know that there
427 * are descriptors to reclaim
430 axgbe_printf(1, "<-- %s: (!clear)\n", __func__);
435 hw_if->tx_desc_reset(rdata);
437 ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1);
440 * tx_complete will return true for unused descriptors also.
441 * so, check tx_complete only until used descriptors.
443 if (ring->cur == ring->dirty)
446 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
447 } while (hw_if->tx_complete(rdata->rdesc));
449 axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__,
450 processed, ring->cur, ring->dirty);
456 axgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
458 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
459 struct xgbe_prv_data *pdata = &sc->pdata;
460 struct xgbe_channel *channel = pdata->channel[iru->iru_qsidx];
461 struct xgbe_ring *ring = channel->rx_ring;
462 struct xgbe_ring_data *rdata;
463 struct xgbe_ring_desc *rdesc;
464 unsigned int rx_usecs = pdata->rx_usecs;
465 unsigned int rx_frames = pdata->rx_frames;
467 uint8_t count = iru->iru_count;
470 axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d "
471 "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx,
472 iru->iru_pidx, count, ring->cur, ring->dirty);
474 for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) {
476 if (i == XGBE_RX_DESC_CNT_DEFAULT)
479 rdata = XGBE_GET_DESC_DATA(ring, i);
480 rdesc = rdata->rdesc;
482 if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3,
483 RX_NORMAL_DESC3, OWN))) {
484 axgbe_error("%s: refill clash, cur %d dirty %d index %d"
485 "pidx %d\n", __func__, ring->cur, ring->dirty, j, i);
488 /* Assuming split header is enabled */
489 if (iru->iru_flidx == 0) {
491 /* Fill header/buffer1 address */
493 cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
495 cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
498 /* Fill data/buffer2 address */
500 cpu_to_le32(lower_32_bits(iru->iru_paddrs[j]));
502 cpu_to_le32(upper_32_bits(iru->iru_paddrs[j]));
504 if (!rx_usecs && !rx_frames) {
505 /* No coalescing, interrupt for every descriptor */
508 /* Set interrupt based on Rx frame coalescing setting */
510 !(((ring->dirty + 1) &(ring->rdesc_count - 1)) % rx_frames))
516 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
518 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
522 ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1));
526 axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__,
527 channel->queue_index, ring->cur, ring->dirty);
531 axgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx, qidx_t pidx)
533 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
534 struct xgbe_prv_data *pdata = &sc->pdata;
535 struct xgbe_channel *channel = pdata->channel[qsidx];
536 struct xgbe_ring *ring = channel->rx_ring;
537 struct xgbe_ring_data *rdata;
539 axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n",
540 __func__, qsidx, flidx, pidx, ring->cur, ring->dirty);
544 rdata = XGBE_GET_DESC_DATA(ring, pidx);
546 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
547 lower_32_bits(rdata->rdata_paddr));
554 axgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t idx, qidx_t budget)
556 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
557 struct xgbe_prv_data *pdata = &sc->pdata;
558 struct xgbe_channel *channel = pdata->channel[qsidx];
559 struct xgbe_ring *ring = channel->rx_ring;
560 struct xgbe_ring_data *rdata;
561 struct xgbe_ring_desc *rdesc;
564 uint8_t incomplete = 1, context_next = 0, running = 0;
566 axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n",
567 __func__, qsidx, idx, budget, ring->cur, ring->dirty);
570 for (count = 0; count <= budget; ) {
572 rdata = XGBE_GET_DESC_DATA(ring, cur);
573 rdesc = rdata->rdesc;
575 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
580 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
583 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
586 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT))
589 cur = (cur + 1) & (ring->rdesc_count - 1);
591 if (incomplete || context_next)
594 /* Increment pkt count & reset variables for next full packet */
601 axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d "
602 "count %d\n", __func__, qsidx, cur, incomplete, context_next,
609 xgbe_rx_buf1_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
610 struct xgbe_packet_data *packet)
613 /* Always zero if not the first descriptor */
614 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) {
615 axgbe_printf(1, "%s: Not First\n", __func__);
619 /* First descriptor with split header, return header length */
620 if (rdata->rx.hdr_len) {
621 axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len);
622 return (rdata->rx.hdr_len);
625 /* First descriptor but not the last descriptor and no split header,
626 * so the full buffer was used
628 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
629 axgbe_printf(1, "%s: Not last %d\n", __func__,
634 /* First descriptor and last descriptor and no split header, so
635 * calculate how much of the buffer was used
637 axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len,
640 return (min_t(unsigned int, 256, rdata->rx.len));
644 xgbe_rx_buf2_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata,
645 struct xgbe_packet_data *packet, unsigned int len)
648 /* Always the full buffer if not the last descriptor */
649 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) {
650 axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size);
651 return (pdata->rx_buf_size);
654 /* Last descriptor so calculate how much of the buffer was used
655 * for the last bit of data
657 return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0);
661 axgbe_add_frag(struct xgbe_prv_data *pdata, if_rxd_info_t ri, int idx, int len,
664 axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid);
665 ri->iri_frags[pos].irf_flid = flid;
666 ri->iri_frags[pos].irf_idx = idx;
667 ri->iri_frags[pos].irf_len = len;
671 axgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
673 struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg;
674 struct xgbe_prv_data *pdata = &sc->pdata;
675 struct xgbe_hw_if *hw_if = &pdata->hw_if;
676 struct xgbe_channel *channel = pdata->channel[ri->iri_qsidx];
677 struct xgbe_ring *ring = channel->rx_ring;
678 struct xgbe_packet_data *packet = &ring->packet_data;
679 struct xgbe_ring_data *rdata;
680 unsigned int last, context_next, context;
681 unsigned int buf1_len, buf2_len, max_len, len = 0, prev_cur;
684 axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__,
685 ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty);
687 memset(packet, 0, sizeof(struct xgbe_packet_data));
692 if (hw_if->dev_read(channel)) {
693 axgbe_printf(2, "<-- %s: OWN bit seen on %d\n",
694 __func__, ring->cur);
698 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
699 prev_cur = ring->cur;
700 ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1);
702 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
705 context_next = XGMAC_GET_BITS(packet->attributes,
706 RX_PACKET_ATTRIBUTES, CONTEXT_NEXT);
708 context = XGMAC_GET_BITS(packet->attributes,
709 RX_PACKET_ATTRIBUTES, CONTEXT);
712 /* Get the data length in the descriptor buffers */
713 buf1_len = xgbe_rx_buf1_len(pdata, rdata, packet);
715 buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len);
718 buf1_len = buf2_len = 0;
721 axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d "
722 "buf2 %d len %d frags %d error %d\n", __func__, last, context,
723 context_next, buf1_len, buf2_len, len, i, packet->errors);
725 axgbe_add_frag(pdata, ri, prev_cur, buf1_len, i, 0);
727 axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1);
730 if (!last || context_next)
736 if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) {
737 ri->iri_csum_flags |= CSUM_IP_CHECKED;
738 ri->iri_csum_flags |= CSUM_IP_VALID;
739 axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags);
742 max_len = if_getmtu(pdata->netdev) + ETH_HLEN;
743 if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) {
744 ri->iri_flags |= M_VLANTAG;
745 ri->iri_vtag = packet->vlan_ctag;
746 max_len += VLAN_HLEN;
747 axgbe_printf(2, "%s: iri_flags 0x%x vtag 0x%x\n", __func__,
748 ri->iri_flags, ri->iri_vtag);
752 if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) {
753 ri->iri_flowid = packet->rss_hash;
754 ri->iri_rsstype = packet->rss_hash_type;
755 axgbe_printf(2, "%s: hash 0x%x/0x%x rsstype 0x%x/0x%x\n",
756 __func__, packet->rss_hash, ri->iri_flowid,
757 packet->rss_hash_type, ri->iri_rsstype);
760 if (__predict_false(len == 0))
761 axgbe_error("%s: Zero len packet\n", __func__);
763 if (__predict_false(len > max_len))
764 axgbe_error("%s: Big packet %d/%d\n", __func__, len, max_len);
766 if (__predict_false(packet->errors))
767 axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d "
768 "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i,
769 ri->iri_cidx, ring->cur, ring->dirty, packet->errors);
771 axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i);