2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
36 #include "ena_netmap.h"
38 #define ENA_NETMAP_MORE_FRAMES 1
39 #define ENA_NETMAP_NO_MORE_FRAMES 0
40 #define ENA_MAX_FRAMES 16384
42 struct ena_netmap_ctx {
43 struct netmap_kring *kring;
44 struct ena_adapter *adapter;
45 struct netmap_adapter *na;
46 struct netmap_slot *slots;
47 struct ena_ring *ring;
48 struct ena_com_io_cq *io_cq;
49 struct ena_com_io_sq *io_sq;
55 /* Netmap callbacks */
56 static int ena_netmap_reg(struct netmap_adapter *, int);
57 static int ena_netmap_txsync(struct netmap_kring *, int);
58 static int ena_netmap_rxsync(struct netmap_kring *, int);
60 /* Helper functions */
61 static int ena_netmap_tx_frames(struct ena_netmap_ctx *);
62 static int ena_netmap_tx_frame(struct ena_netmap_ctx *);
63 static inline uint16_t ena_netmap_count_slots(struct ena_netmap_ctx *);
64 static inline uint16_t ena_netmap_packet_len(struct netmap_slot *, u_int,
66 static int ena_netmap_copy_data(struct netmap_adapter *,
67 struct netmap_slot *, u_int, uint16_t, uint16_t, void *);
68 static int ena_netmap_map_single_slot(struct netmap_adapter *,
69 struct netmap_slot *, bus_dma_tag_t, bus_dmamap_t, void **, uint64_t *);
70 static int ena_netmap_tx_map_slots(struct ena_netmap_ctx *,
71 struct ena_tx_buffer *, void **, uint16_t *, uint16_t *);
72 static void ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *,
73 struct ena_tx_buffer *);
74 static void ena_netmap_tx_cleanup(struct ena_netmap_ctx *);
75 static uint16_t ena_netmap_tx_clean_one(struct ena_netmap_ctx *,
77 static inline int validate_tx_req_id(struct ena_ring *, uint16_t);
78 static int ena_netmap_rx_frames(struct ena_netmap_ctx *);
79 static int ena_netmap_rx_frame(struct ena_netmap_ctx *);
80 static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t,
82 static void ena_netmap_rx_cleanup(struct ena_netmap_ctx *);
83 static void ena_netmap_fill_ctx(struct netmap_kring *,
84 struct ena_netmap_ctx *, uint16_t);
87 ena_netmap_attach(struct ena_adapter *adapter)
89 struct netmap_adapter na;
91 ena_log_nm(adapter->pdev, INFO, "netmap attach\n");
93 bzero(&na, sizeof(na));
94 na.na_flags = NAF_MOREFRAG;
95 na.ifp = adapter->ifp;
96 na.num_tx_desc = adapter->requested_tx_ring_size;
97 na.num_rx_desc = adapter->requested_rx_ring_size;
98 na.num_tx_rings = adapter->num_io_queues;
99 na.num_rx_rings = adapter->num_io_queues;
100 na.rx_buf_maxsize = adapter->buf_ring_size;
101 na.nm_txsync = ena_netmap_txsync;
102 na.nm_rxsync = ena_netmap_rxsync;
103 na.nm_register = ena_netmap_reg;
105 return (netmap_attach(&na));
109 ena_netmap_alloc_rx_slot(struct ena_adapter *adapter,
110 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
112 struct netmap_adapter *na = NA(adapter->ifp);
113 struct netmap_kring *kring;
114 struct netmap_ring *ring;
115 struct netmap_slot *slot;
118 int nm_i, qid, head, lim, rc;
120 /* if previously allocated frag is not used */
121 if (unlikely(rx_info->netmap_buf_idx != 0))
125 kring = na->rx_rings[qid];
126 nm_i = kring->nr_hwcur;
129 ena_log_nm(adapter->pdev, DBG, "nr_hwcur: %d, nr_hwtail: %d, "
130 "rhead: %d, rcur: %d, rtail: %d\n", kring->nr_hwcur,
131 kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail);
133 if ((nm_i == head) && rx_ring->initialized) {
134 ena_log_nm(adapter->pdev, ERR, "No free slots in netmap ring\n");
140 ena_log_nm(adapter->pdev, ERR, "Rx ring %d is NULL\n", qid);
143 slot = &ring->slot[nm_i];
145 addr = PNMB(na, slot, &paddr);
146 if (addr == NETMAP_BUF_BASE(na)) {
147 ena_log_nm(adapter->pdev, ERR, "Bad buff in slot\n");
151 rc = netmap_load_map(na, adapter->rx_buf_tag, rx_info->map, addr);
153 ena_log_nm(adapter->pdev, WARN, "DMA mapping error\n");
156 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
158 rx_info->ena_buf.paddr = paddr;
159 rx_info->ena_buf.len = ring->nr_buf_size;
160 rx_info->mbuf = NULL;
161 rx_info->netmap_buf_idx = slot->buf_idx;
165 lim = kring->nkr_num_slots - 1;
166 kring->nr_hwcur = nm_next(nm_i, lim);
172 ena_netmap_free_rx_slot(struct ena_adapter *adapter,
173 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
175 struct netmap_adapter *na;
176 struct netmap_kring *kring;
177 struct netmap_slot *slot;
180 na = NA(adapter->ifp);
182 ena_log_nm(adapter->pdev, ERR, "netmap adapter is NULL\n");
186 if (na->rx_rings == NULL) {
187 ena_log_nm(adapter->pdev, ERR, "netmap rings are NULL\n");
192 kring = na->rx_rings[qid];
194 ena_log_nm(adapter->pdev, ERR,
195 "netmap kernel ring %d is NULL\n", qid);
199 lim = kring->nkr_num_slots - 1;
200 nm_i = nm_prev(kring->nr_hwcur, lim);
202 if (kring->nr_mode != NKR_NETMAP_ON)
205 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
206 BUS_DMASYNC_POSTREAD);
207 netmap_unload_map(na, adapter->rx_buf_tag, rx_info->map);
209 KASSERT(kring->ring == NULL, ("Netmap Rx ring is NULL\n"));
211 slot = &kring->ring->slot[nm_i];
213 ENA_WARN(slot->buf_idx != 0, adapter->ena_dev, "Overwrite slot buf\n");
214 slot->buf_idx = rx_info->netmap_buf_idx;
215 slot->flags = NS_BUF_CHANGED;
217 rx_info->netmap_buf_idx = 0;
218 kring->nr_hwcur = nm_i;
222 ena_ring_in_netmap(struct ena_adapter *adapter, int qid, enum txrx x)
224 struct netmap_adapter *na;
225 struct netmap_kring *kring;
227 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
228 na = NA(adapter->ifp);
229 kring = (x == NR_RX) ? na->rx_rings[qid] : na->tx_rings[qid];
230 if (kring->nr_mode == NKR_NETMAP_ON)
237 ena_tx_ring_in_netmap(struct ena_adapter *adapter, int qid)
239 return ena_ring_in_netmap(adapter, qid, NR_TX);
243 ena_rx_ring_in_netmap(struct ena_adapter *adapter, int qid)
245 return ena_ring_in_netmap(adapter, qid, NR_RX);
249 ena_netmap_reset_ring(struct ena_adapter *adapter, int qid, enum txrx x)
251 if (!ena_ring_in_netmap(adapter, qid, x))
254 netmap_reset(NA(adapter->ifp), x, qid, 0);
255 ena_log_nm(adapter->pdev, INFO, "%s ring %d is in netmap mode\n",
256 (x == NR_TX) ? "Tx" : "Rx", qid);
260 ena_netmap_reset_rx_ring(struct ena_adapter *adapter, int qid)
262 ena_netmap_reset_ring(adapter, qid, NR_RX);
266 ena_netmap_reset_tx_ring(struct ena_adapter *adapter, int qid)
268 ena_netmap_reset_ring(adapter, qid, NR_TX);
272 ena_netmap_reg(struct netmap_adapter *na, int onoff)
274 struct ifnet *ifp = na->ifp;
275 struct ena_adapter* adapter = ifp->if_softc;
276 device_t pdev = adapter->pdev;
277 struct netmap_kring *kring;
281 ENA_LOCK_LOCK(adapter);
282 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
286 ena_log_nm(pdev, INFO, "netmap on\n");
288 for (i = 0; i <= nma_get_nrings(na, t); i++) {
289 kring = NMR(na, t)[i];
290 if (nm_kring_pending_on(kring)) {
291 kring->nr_mode = NKR_NETMAP_ON;
295 nm_set_native_flags(na);
297 ena_log_nm(pdev, INFO, "netmap off\n");
298 nm_clear_native_flags(na);
300 for (i = 0; i <= nma_get_nrings(na, t); i++) {
301 kring = NMR(na, t)[i];
302 if (nm_kring_pending_off(kring)) {
303 kring->nr_mode = NKR_NETMAP_OFF;
309 rc = ena_up(adapter);
311 ena_log_nm(pdev, WARN, "ena_up failed with rc=%d\n", rc);
312 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
313 nm_clear_native_flags(na);
314 ena_destroy_device(adapter, false);
315 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
316 rc = ena_restore_device(adapter);
318 ENA_LOCK_UNLOCK(adapter);
324 ena_netmap_txsync(struct netmap_kring *kring, int flags)
326 struct ena_netmap_ctx ctx;
329 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_TXQ_IDX(kring->ring_id));
330 ctx.ring = &ctx.adapter->tx_ring[kring->ring_id];
332 ENA_RING_MTX_LOCK(ctx.ring);
333 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, ctx.adapter)))
336 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
339 rc = ena_netmap_tx_frames(&ctx);
340 ena_netmap_tx_cleanup(&ctx);
343 ENA_RING_MTX_UNLOCK(ctx.ring);
348 ena_netmap_tx_frames(struct ena_netmap_ctx *ctx)
350 struct ena_ring *tx_ring = ctx->ring;
353 ctx->nm_i = ctx->kring->nr_hwcur;
354 ctx->nt = ctx->ring->next_to_use;
356 __builtin_prefetch(&ctx->slots[ctx->nm_i]);
358 while (ctx->nm_i != ctx->kring->rhead) {
359 if ((rc = ena_netmap_tx_frame(ctx)) != 0) {
361 * When there is no empty space in Tx ring, error is
362 * still being returned. It should not be passed to the
363 * netmap, as application knows current ring state from
364 * netmap ring pointers. Returning error there could
365 * cause application to exit, but the Tx ring is commonly
368 if (rc == ENA_COM_NO_MEM)
372 tx_ring->acum_pkts++;
375 /* If any packet was sent... */
376 if (likely(ctx->nm_i != ctx->kring->nr_hwcur)) {
377 /* ...send the doorbell to the device. */
378 ena_com_write_sq_doorbell(ctx->io_sq);
379 counter_u64_add(ctx->ring->tx_stats.doorbells, 1);
380 tx_ring->acum_pkts = 0;
382 ctx->ring->next_to_use = ctx->nt;
383 ctx->kring->nr_hwcur = ctx->nm_i;
390 ena_netmap_tx_frame(struct ena_netmap_ctx *ctx)
392 struct ena_com_tx_ctx ena_tx_ctx;
393 struct ena_adapter *adapter;
394 struct ena_ring *tx_ring;
395 struct ena_tx_buffer *tx_info;
403 adapter = ctx->adapter;
404 if (ena_netmap_count_slots(ctx) > adapter->max_tx_sgl_size) {
405 ena_log_nm(adapter->pdev, WARN, "Too many slots per packet\n");
411 req_id = tx_ring->free_tx_ids[ctx->nt];
412 tx_info = &tx_ring->tx_buffer_info[req_id];
413 tx_info->num_of_bufs = 0;
414 tx_info->nm_info.sockets_used = 0;
416 rc = ena_netmap_tx_map_slots(ctx, tx_info, &push_hdr, &header_len,
418 if (unlikely(rc != 0)) {
419 ena_log_nm(adapter->pdev, ERR, "Failed to map Tx slot\n");
423 bzero(&ena_tx_ctx, sizeof(struct ena_com_tx_ctx));
424 ena_tx_ctx.ena_bufs = tx_info->bufs;
425 ena_tx_ctx.push_header = push_hdr;
426 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
427 ena_tx_ctx.req_id = req_id;
428 ena_tx_ctx.header_len = header_len;
430 /* There are no any offloads, as the netmap doesn't support them */
432 if (tx_ring->acum_pkts == DB_THRESHOLD ||
433 ena_com_is_doorbell_needed(ctx->io_sq, &ena_tx_ctx)) {
434 ena_com_write_sq_doorbell(ctx->io_sq);
435 counter_u64_add(tx_ring->tx_stats.doorbells, 1);
436 tx_ring->acum_pkts = 0;
439 rc = ena_com_prepare_tx(ctx->io_sq, &ena_tx_ctx, &nb_hw_desc);
440 if (unlikely(rc != 0)) {
441 if (likely(rc == ENA_COM_NO_MEM)) {
442 ena_log_nm(adapter->pdev, DBG,
443 "Tx ring[%d] is out of space\n", tx_ring->que->id);
445 ena_log_nm(adapter->pdev, ERR,
446 "Failed to prepare Tx bufs\n");
447 ena_trigger_reset(adapter,
448 ENA_REGS_RESET_DRIVER_INVALID_STATE);
450 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
452 ena_netmap_unmap_last_socket_chain(ctx, tx_info);
457 counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
458 counter_u64_add_protected(tx_ring->tx_stats.bytes, packet_len);
459 counter_u64_add_protected(adapter->hw_stats.tx_packets, 1);
460 counter_u64_add_protected(adapter->hw_stats.tx_bytes, packet_len);
463 tx_info->tx_descs = nb_hw_desc;
465 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
467 for (unsigned int i = 0; i < tx_info->num_of_bufs; i++)
468 bus_dmamap_sync(adapter->tx_buf_tag,
469 tx_info->nm_info.map_seg[i], BUS_DMASYNC_PREWRITE);
474 static inline uint16_t
475 ena_netmap_count_slots(struct ena_netmap_ctx *ctx)
478 uint16_t nm = ctx->nm_i;
480 while ((ctx->slots[nm].flags & NS_MOREFRAG) != 0) {
482 nm = nm_next(nm, ctx->lim);
488 static inline uint16_t
489 ena_netmap_packet_len(struct netmap_slot *slots, u_int slot_index,
492 struct netmap_slot *nm_slot;
493 uint16_t packet_size = 0;
496 nm_slot = &slots[slot_index];
497 packet_size += nm_slot->len;
498 slot_index = nm_next(slot_index, limit);
499 } while ((nm_slot->flags & NS_MOREFRAG) != 0);
505 ena_netmap_copy_data(struct netmap_adapter *na, struct netmap_slot *slots,
506 u_int slot_index, uint16_t limit, uint16_t bytes_to_copy, void *destination)
508 struct netmap_slot *nm_slot;
510 uint16_t packet_size;
511 uint16_t data_amount;
515 nm_slot = &slots[slot_index];
516 slot_vaddr = NMB(na, nm_slot);
517 if (unlikely(slot_vaddr == NULL))
520 data_amount = min_t(uint16_t, bytes_to_copy, nm_slot->len);
521 memcpy(destination, slot_vaddr, data_amount);
522 bytes_to_copy -= data_amount;
524 slot_index = nm_next(slot_index, limit);
525 } while ((nm_slot->flags & NS_MOREFRAG) != 0 && bytes_to_copy > 0);
531 ena_netmap_map_single_slot(struct netmap_adapter *na, struct netmap_slot *slot,
532 bus_dma_tag_t dmatag, bus_dmamap_t dmamap, void **vaddr, uint64_t *paddr)
537 pdev = ((struct ena_adapter *)na->ifp->if_softc)->pdev;
539 *vaddr = PNMB(na, slot, paddr);
540 if (unlikely(vaddr == NULL)) {
541 ena_log_nm(pdev, ERR, "Slot address is NULL\n");
545 rc = netmap_load_map(na, dmatag, dmamap, *vaddr);
546 if (unlikely(rc != 0)) {
547 ena_log_nm(pdev, ERR, "Failed to map slot %d for DMA\n",
556 ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx,
557 struct ena_tx_buffer *tx_info, void **push_hdr, uint16_t *header_len,
558 uint16_t *packet_len)
560 struct netmap_slot *slot;
561 struct ena_com_buf *ena_buf;
562 struct ena_adapter *adapter;
563 struct ena_ring *tx_ring;
564 struct ena_netmap_tx_info *nm_info;
565 bus_dmamap_t *nm_maps;
568 uint32_t *nm_buf_idx;
569 uint32_t slot_head_len;
571 uint32_t remaining_len;
576 adapter = ctx->adapter;
578 ena_buf = tx_info->bufs;
579 nm_info = &tx_info->nm_info;
580 nm_maps = nm_info->map_seg;
581 nm_buf_idx = nm_info->socket_buf_idx;
582 slot = &ctx->slots[ctx->nm_i];
584 slot_head_len = slot->len;
585 *packet_len = ena_netmap_packet_len(ctx->slots, ctx->nm_i, ctx->lim);
586 remaining_len = *packet_len;
589 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
590 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
592 * When the device is in LLQ mode, the driver will copy
593 * the header into the device memory space.
594 * The ena_com layer assumes that the header is in a linear
596 * This assumption might be wrong since part of the header
597 * can be in the fragmented buffers.
598 * First, check if header fits in the first slot. If not, copy
599 * it to separate buffer that will be holding linearized data.
601 push_len = min_t(uint32_t, *packet_len,
602 tx_ring->tx_max_header_size);
603 *header_len = push_len;
604 /* If header is in linear space, just point to socket's data. */
605 if (likely(push_len <= slot_head_len)) {
606 *push_hdr = NMB(ctx->na, slot);
607 if (unlikely(push_hdr == NULL)) {
608 ena_log_nm(adapter->pdev, ERR,
609 "Slot vaddress is NULL\n");
613 * Otherwise, copy whole portion of header from multiple slots
614 * to intermediate buffer.
617 rc = ena_netmap_copy_data(ctx->na,
622 tx_ring->push_buf_intermediate_buf);
624 ena_log_nm(adapter->pdev, ERR,
625 "Failed to copy data from slots to push_buf\n");
629 *push_hdr = tx_ring->push_buf_intermediate_buf;
630 counter_u64_add(tx_ring->tx_stats.llq_buffer_copy, 1);
632 delta = push_len - slot_head_len;
635 ena_log_nm(adapter->pdev, DBG,
636 "slot: %d header_buf->vaddr: %p push_len: %d\n",
637 slot->buf_idx, *push_hdr, push_len);
640 * If header was in linear memory space, map for the dma rest of the data
641 * in the first mbuf of the mbuf chain.
643 if (slot_head_len > push_len) {
644 rc = ena_netmap_map_single_slot(ctx->na,
650 if (unlikely(rc != 0)) {
651 ena_log_nm(adapter->pdev, ERR,
652 "DMA mapping error\n");
657 ena_buf->paddr = paddr + push_len;
658 ena_buf->len = slot->len - push_len;
661 tx_info->num_of_bufs++;
664 remaining_len -= slot->len;
666 /* Save buf idx before advancing */
667 *nm_buf_idx = slot->buf_idx;
671 /* Advance to the next socket */
672 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
673 slot = &ctx->slots[ctx->nm_i];
674 nm_info->sockets_used++;
677 * If header is in non linear space (delta > 0), then skip mbufs
678 * containing header and map the last one containing both header
679 * and the packet data.
680 * The first segment is already counted in.
683 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
684 frag_len = slot->len;
687 * If whole segment contains header just move to the
688 * next one and reduce delta.
690 if (unlikely(delta >= frag_len)) {
694 * Map the data and then assign it with the
697 rc = ena_netmap_map_single_slot(ctx->na,
703 if (unlikely(rc != 0)) {
704 ena_log_nm(adapter->pdev, ERR,
705 "DMA mapping error\n");
710 ena_buf->paddr = paddr + delta;
711 ena_buf->len = slot->len - delta;
714 tx_info->num_of_bufs++;
718 remaining_len -= slot->len;
720 /* Save buf idx before advancing */
721 *nm_buf_idx = slot->buf_idx;
725 /* Advance to the next socket */
726 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
727 slot = &ctx->slots[ctx->nm_i];
728 nm_info->sockets_used++;
733 * header_len is just a hint for the device. Because netmap is
734 * not giving us any information about packet header length and
735 * it is not guaranteed that all packet headers will be in the
736 * 1st slot, setting header_len to 0 is making the device ignore
737 * this value and resolve header on it's own.
742 /* Map all remaining data (regular routine for non-LLQ mode) */
743 while (remaining_len > 0) {
744 __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
746 rc = ena_netmap_map_single_slot(ctx->na,
752 if (unlikely(rc != 0)) {
753 ena_log_nm(adapter->pdev, ERR,
754 "DMA mapping error\n");
759 ena_buf->paddr = paddr;
760 ena_buf->len = slot->len;
763 tx_info->num_of_bufs++;
765 remaining_len -= slot->len;
767 /* Save buf idx before advancing */
768 *nm_buf_idx = slot->buf_idx;
772 /* Advance to the next socket */
773 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
774 slot = &ctx->slots[ctx->nm_i];
775 nm_info->sockets_used++;
781 ena_netmap_unmap_last_socket_chain(ctx, tx_info);
787 ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx,
788 struct ena_tx_buffer *tx_info)
790 struct ena_netmap_tx_info *nm_info;
793 nm_info = &tx_info->nm_info;
796 * As the used sockets must not be equal to the buffers used in the LLQ
797 * mode, they must be treated separately.
798 * First, unmap the DMA maps.
800 n = tx_info->num_of_bufs;
802 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
803 nm_info->map_seg[n]);
805 tx_info->num_of_bufs = 0;
807 /* Next, retain the sockets back to the userspace */
808 n = nm_info->sockets_used;
810 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
811 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
812 nm_info->socket_buf_idx[n] = 0;
813 ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim);
815 nm_info->sockets_used = 0;
819 ena_netmap_tx_cleanup(struct ena_netmap_ctx *ctx)
822 uint16_t total_tx_descs = 0;
824 ctx->nm_i = ctx->kring->nr_hwtail;
825 ctx->nt = ctx->ring->next_to_clean;
827 /* Reclaim buffers for completed transmissions */
828 while (ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id) >= 0) {
829 if (validate_tx_req_id(ctx->ring, req_id) != 0)
831 total_tx_descs += ena_netmap_tx_clean_one(ctx, req_id);
834 ctx->kring->nr_hwtail = ctx->nm_i;
836 if (total_tx_descs > 0) {
837 /* acknowledge completion of sent packets */
838 ctx->ring->next_to_clean = ctx->nt;
839 ena_com_comp_ack(ctx->ring->ena_com_io_sq, total_tx_descs);
840 ena_com_update_dev_comp_head(ctx->ring->ena_com_io_cq);
845 ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id)
847 struct ena_tx_buffer *tx_info;
848 struct ena_netmap_tx_info *nm_info;
851 tx_info = &ctx->ring->tx_buffer_info[req_id];
852 nm_info = &tx_info->nm_info;
855 * As the used sockets must not be equal to the buffers used in the LLQ
856 * mode, they must be treated separately.
857 * First, unmap the DMA maps.
859 n = tx_info->num_of_bufs;
860 for (n = 0; n < tx_info->num_of_bufs; n++) {
861 netmap_unload_map(ctx->na, ctx->adapter->tx_buf_tag,
862 nm_info->map_seg[n]);
864 tx_info->num_of_bufs = 0;
866 /* Next, retain the sockets back to the userspace */
867 for (n = 0; n < nm_info->sockets_used; n++) {
868 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
869 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0,
870 ctx->adapter->ena_dev, "Tx idx is not 0.\n");
871 ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
872 ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
873 nm_info->socket_buf_idx[n] = 0;
875 nm_info->sockets_used = 0;
877 ctx->ring->free_tx_ids[ctx->nt] = req_id;
878 ctx->nt = ENA_TX_RING_IDX_NEXT(ctx->nt, ctx->lim);
880 return tx_info->tx_descs;
884 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
886 struct ena_adapter *adapter = tx_ring->adapter;
888 if (likely(req_id < tx_ring->ring_size))
891 ena_log_nm(adapter->pdev, WARN, "Invalid req_id: %hu\n", req_id);
892 counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
894 ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
900 ena_netmap_rxsync(struct netmap_kring *kring, int flags)
902 struct ena_netmap_ctx ctx;
905 ena_netmap_fill_ctx(kring, &ctx, ENA_IO_RXQ_IDX(kring->ring_id));
906 ctx.ring = &ctx.adapter->rx_ring[kring->ring_id];
908 if (ctx.kring->rhead > ctx.lim) {
909 /* Probably not needed to release slots from RX ring. */
910 return (netmap_ring_reinit(ctx.kring));
913 if (unlikely((if_getdrvflags(ctx.na->ifp) & IFF_DRV_RUNNING) == 0))
916 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, ctx.adapter)))
919 if ((rc = ena_netmap_rx_frames(&ctx)) != 0)
922 ena_netmap_rx_cleanup(&ctx);
928 ena_netmap_rx_frames(struct ena_netmap_ctx *ctx)
931 int frames_counter = 0;
933 ctx->nt = ctx->ring->next_to_clean;
934 ctx->nm_i = ctx->kring->nr_hwtail;
936 while((rc = ena_netmap_rx_frame(ctx)) == ENA_NETMAP_MORE_FRAMES) {
938 /* In case of multiple frames, it is not an error. */
940 if (frames_counter > ENA_MAX_FRAMES) {
941 ena_log_nm(ctx->adapter->pdev, ERR,
942 "Driver is stuck in the Rx loop\n");
947 ctx->kring->nr_hwtail = ctx->nm_i;
948 ctx->kring->nr_kflags &= ~NKR_PENDINTR;
949 ctx->ring->next_to_clean = ctx->nt;
955 ena_netmap_rx_frame(struct ena_netmap_ctx *ctx)
957 struct ena_com_rx_ctx ena_rx_ctx;
958 enum ena_regs_reset_reason_types reset_reason;
962 ena_rx_ctx.ena_bufs = ctx->ring->ena_bufs;
963 ena_rx_ctx.max_bufs = ctx->adapter->max_rx_sgl_size;
964 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
965 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_POSTREAD);
967 rc = ena_com_rx_pkt(ctx->io_cq, ctx->io_sq, &ena_rx_ctx);
968 if (unlikely(rc != 0)) {
969 ena_log_nm(ctx->adapter->pdev, ERR,
970 "Failed to read pkt from the device with error: %d\n", rc);
971 if (rc == ENA_COM_NO_SPACE) {
972 counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1);
973 reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
975 counter_u64_add(ctx->ring->rx_stats.bad_req_id, 1);
976 reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
978 ena_trigger_reset(ctx->adapter, reset_reason);
981 if (unlikely(ena_rx_ctx.descs == 0))
982 return (ENA_NETMAP_NO_MORE_FRAMES);
984 ena_log_nm(ctx->adapter->pdev, DBG,
985 "Rx: q %d got packet from ena. descs #:"
986 " %d l3 proto %d l4 proto %d hash: %x\n", ctx->ring->qid,
987 ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto,
990 for (buf = 0; buf < ena_rx_ctx.descs; buf++)
991 if ((rc = ena_netmap_rx_load_desc(ctx, buf, &len)) != 0)
994 * ena_netmap_rx_load_desc doesn't know the number of descriptors.
995 * It just set flag NS_MOREFRAG to all slots, then here flag of
996 * last slot is cleared.
998 ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags = NS_BUF_CHANGED;
1004 bus_dmamap_sync(ctx->io_cq->cdesc_addr.mem_handle.tag,
1005 ctx->io_cq->cdesc_addr.mem_handle.map, BUS_DMASYNC_PREREAD);
1008 counter_u64_add_protected(ctx->ring->rx_stats.bytes, len);
1009 counter_u64_add_protected(ctx->adapter->hw_stats.rx_bytes, len);
1010 counter_u64_add_protected(ctx->ring->rx_stats.cnt, 1);
1011 counter_u64_add_protected(ctx->adapter->hw_stats.rx_packets, 1);
1014 return (ENA_NETMAP_MORE_FRAMES);
1019 /* Remove failed packet from ring */
1021 ctx->slots[nm].flags = 0;
1022 ctx->slots[nm].len = 0;
1023 nm = nm_prev(nm, ctx->lim);
1030 ena_netmap_rx_load_desc(struct ena_netmap_ctx *ctx, uint16_t buf, int *len)
1032 struct ena_rx_buffer *rx_info;
1035 req_id = ctx->ring->ena_bufs[buf].req_id;
1036 rx_info = &ctx->ring->rx_buffer_info[req_id];
1037 bus_dmamap_sync(ctx->adapter->rx_buf_tag, rx_info->map,
1038 BUS_DMASYNC_POSTREAD);
1039 netmap_unload_map(ctx->na, ctx->adapter->rx_buf_tag, rx_info->map);
1041 ENA_WARN(ctx->slots[ctx->nm_i].buf_idx != 0, ctx->adapter->ena_dev,
1042 "Rx idx is not 0.\n");
1044 ctx->slots[ctx->nm_i].buf_idx = rx_info->netmap_buf_idx;
1045 rx_info->netmap_buf_idx = 0;
1047 * Set NS_MOREFRAG to all slots.
1048 * Then ena_netmap_rx_frame clears it from last one.
1050 ctx->slots[ctx->nm_i].flags |= NS_MOREFRAG | NS_BUF_CHANGED;
1051 ctx->slots[ctx->nm_i].len = ctx->ring->ena_bufs[buf].len;
1052 *len += ctx->slots[ctx->nm_i].len;
1053 ctx->ring->free_rx_ids[ctx->nt] = req_id;
1054 ena_log_nm(ctx->adapter->pdev, DBG, "rx_info %p, buf_idx %d, paddr %jx, nm: %d\n",
1055 rx_info, ctx->slots[ctx->nm_i].buf_idx,
1056 (uintmax_t)rx_info->ena_buf.paddr, ctx->nm_i);
1058 ctx->nm_i = nm_next(ctx->nm_i, ctx->lim);
1059 ctx->nt = ENA_RX_RING_IDX_NEXT(ctx->nt, ctx->ring->ring_size);
1065 ena_netmap_rx_cleanup(struct ena_netmap_ctx *ctx)
1067 int refill_required;
1069 refill_required = ctx->kring->rhead - ctx->kring->nr_hwcur;
1070 if (ctx->kring->nr_hwcur != ctx->kring->nr_hwtail)
1071 refill_required -= 1;
1073 if (refill_required == 0)
1075 else if (refill_required < 0)
1076 refill_required += ctx->kring->nkr_num_slots;
1078 ena_refill_rx_bufs(ctx->ring, refill_required);
1082 ena_netmap_fill_ctx(struct netmap_kring *kring, struct ena_netmap_ctx *ctx,
1086 ctx->na = kring->na;
1087 ctx->adapter = ctx->na->ifp->if_softc;
1088 ctx->lim = kring->nkr_num_slots - 1;
1089 ctx->io_cq = &ctx->adapter->ena_dev->io_cq_queues[ena_qid];
1090 ctx->io_sq = &ctx->adapter->ena_dev->io_sq_queues[ena_qid];
1091 ctx->slots = kring->ring->slot;
1095 ena_netmap_unload(struct ena_adapter *adapter, bus_dmamap_t map)
1097 struct netmap_adapter *na = NA(adapter->ifp);
1099 netmap_unload_map(na, adapter->tx_buf_tag, map);
1102 #endif /* DEV_NETMAP */