2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2023 Google LLC
6 * Redistribution and use in source and binary forms, with or without modification,
7 * are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3. Neither the name of the copyright holder nor the names of its contributors
17 * may be used to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "gve_adminq.h"
35 gve_rx_free_ring(struct gve_priv *priv, int i)
37 struct gve_rx_ring *rx = &priv->rx[i];
38 struct gve_ring_com *com = &rx->com;
40 /* Safe to call even if never allocated */
41 gve_free_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS);
43 if (rx->page_info != NULL) {
44 free(rx->page_info, M_GVE);
48 if (rx->data_ring != NULL) {
49 gve_dma_free_coherent(&rx->data_ring_mem);
53 if (rx->desc_ring != NULL) {
54 gve_dma_free_coherent(&rx->desc_ring_mem);
58 if (com->q_resources != NULL) {
59 gve_dma_free_coherent(&com->q_resources_mem);
60 com->q_resources = NULL;
65 gve_prefill_rx_slots(struct gve_rx_ring *rx)
67 struct gve_ring_com *com = &rx->com;
68 struct gve_dma_handle *dma;
71 for (i = 0; i < com->priv->rx_desc_cnt; i++) {
72 rx->data_ring[i].qpl_offset = htobe64(PAGE_SIZE * i);
73 rx->page_info[i].page_offset = 0;
74 rx->page_info[i].page_address = com->qpl->dmas[i].cpu_addr;
75 rx->page_info[i].page = com->qpl->pages[i];
77 dma = &com->qpl->dmas[i];
78 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREREAD);
81 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map,
82 BUS_DMASYNC_PREWRITE);
86 gve_rx_alloc_ring(struct gve_priv *priv, int i)
88 struct gve_rx_ring *rx = &priv->rx[i];
89 struct gve_ring_com *com = &rx->com;
95 rx->mask = priv->rx_pages_per_qpl - 1;
97 com->qpl = &priv->qpls[priv->tx_cfg.max_queues + i];
98 if (com->qpl == NULL) {
99 device_printf(priv->dev, "No QPL left for rx ring %d", i);
103 rx->page_info = malloc(priv->rx_desc_cnt * sizeof(*rx->page_info), M_GVE,
106 gve_alloc_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS);
108 err = gve_dma_alloc_coherent(priv, sizeof(struct gve_queue_resources),
109 PAGE_SIZE, &com->q_resources_mem);
111 device_printf(priv->dev, "Failed to alloc queue resources for rx ring %d", i);
114 com->q_resources = com->q_resources_mem.cpu_addr;
116 err = gve_dma_alloc_coherent(priv,
117 sizeof(struct gve_rx_desc) * priv->rx_desc_cnt,
118 CACHE_LINE_SIZE, &rx->desc_ring_mem);
120 device_printf(priv->dev, "Failed to alloc desc ring for rx ring %d", i);
123 rx->desc_ring = rx->desc_ring_mem.cpu_addr;
125 err = gve_dma_alloc_coherent(priv,
126 sizeof(union gve_rx_data_slot) * priv->rx_desc_cnt,
127 CACHE_LINE_SIZE, &rx->data_ring_mem);
129 device_printf(priv->dev, "Failed to alloc data ring for rx ring %d", i);
132 rx->data_ring = rx->data_ring_mem.cpu_addr;
134 gve_prefill_rx_slots(rx);
138 gve_rx_free_ring(priv, i);
143 gve_alloc_rx_rings(struct gve_priv *priv)
148 priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.num_queues,
149 M_GVE, M_WAITOK | M_ZERO);
151 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
152 err = gve_rx_alloc_ring(priv, i);
161 gve_rx_free_ring(priv, i);
162 free(priv->rx, M_GVE);
167 gve_free_rx_rings(struct gve_priv *priv)
171 for (i = 0; i < priv->rx_cfg.num_queues; i++)
172 gve_rx_free_ring(priv, i);
174 free(priv->rx, M_GVE);
178 gve_rx_clear_data_ring(struct gve_rx_ring *rx)
180 struct gve_priv *priv = rx->com.priv;
184 * The Rx data ring has this invariant: "the networking stack is not
185 * using the buffer beginning at any page_offset". This invariant is
186 * established initially by gve_prefill_rx_slots at alloc-time and is
187 * maintained by the cleanup taskqueue. This invariant implies that the
188 * ring can be considered to be fully posted with buffers at this point,
189 * even if there are unfreed mbufs still being processed, which is why we
190 * can fill the ring without waiting on can_flip at each slot to become true.
192 for (i = 0; i < priv->rx_desc_cnt; i++) {
193 rx->data_ring[i].qpl_offset = htobe64(PAGE_SIZE * i +
194 rx->page_info[i].page_offset);
198 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map,
199 BUS_DMASYNC_PREWRITE);
203 gve_rx_clear_desc_ring(struct gve_rx_ring *rx)
205 struct gve_priv *priv = rx->com.priv;
208 for (i = 0; i < priv->rx_desc_cnt; i++)
209 rx->desc_ring[i] = (struct gve_rx_desc){};
211 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map,
212 BUS_DMASYNC_PREWRITE);
216 gve_clear_rx_ring(struct gve_priv *priv, int i)
218 struct gve_rx_ring *rx = &priv->rx[i];
223 rx->mask = priv->rx_desc_cnt - 1;
225 gve_rx_clear_desc_ring(rx);
226 gve_rx_clear_data_ring(rx);
230 gve_start_rx_ring(struct gve_priv *priv, int i)
232 struct gve_rx_ring *rx = &priv->rx[i];
233 struct gve_ring_com *com = &rx->com;
235 if ((if_getcapenable(priv->ifp) & IFCAP_LRO) != 0) {
236 if (tcp_lro_init(&rx->lro) != 0)
237 device_printf(priv->dev, "Failed to init lro for rx ring %d", i);
238 rx->lro.ifp = priv->ifp;
241 NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq, rx);
242 com->cleanup_tq = taskqueue_create_fast("gve rx", M_WAITOK,
243 taskqueue_thread_enqueue, &com->cleanup_tq);
245 taskqueue_start_threads(&com->cleanup_tq, 1, PI_NET,
246 "%s rxq %d", device_get_nameunit(priv->dev), i);
248 gve_db_bar_write_4(priv, com->db_offset, rx->fill_cnt);
252 gve_create_rx_rings(struct gve_priv *priv)
254 struct gve_ring_com *com;
255 struct gve_rx_ring *rx;
259 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK))
262 for (i = 0; i < priv->rx_cfg.num_queues; i++)
263 gve_clear_rx_ring(priv, i);
265 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
269 bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map,
270 BUS_DMASYNC_POSTREAD);
272 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
276 com->irq_db_offset = 4 * be32toh(priv->irq_db_indices[com->ntfy_id].index);
278 bus_dmamap_sync(com->q_resources_mem.tag, com->q_resources_mem.map,
279 BUS_DMASYNC_POSTREAD);
280 com->db_offset = 4 * be32toh(com->q_resources->db_index);
281 com->counter_idx = be32toh(com->q_resources->counter_index);
283 gve_start_rx_ring(priv, i);
286 gve_set_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK);
291 gve_stop_rx_ring(struct gve_priv *priv, int i)
293 struct gve_rx_ring *rx = &priv->rx[i];
294 struct gve_ring_com *com = &rx->com;
296 if (com->cleanup_tq != NULL) {
297 taskqueue_quiesce(com->cleanup_tq);
298 taskqueue_free(com->cleanup_tq);
299 com->cleanup_tq = NULL;
302 tcp_lro_free(&rx->lro);
303 rx->ctx = (struct gve_rx_ctx){};
307 gve_destroy_rx_rings(struct gve_priv *priv)
312 for (i = 0; i < priv->rx_cfg.num_queues; i++)
313 gve_stop_rx_ring(priv, i);
315 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK)) {
316 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
319 gve_clear_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK);
326 gve_rx_intr(void *arg)
328 struct gve_rx_ring *rx = arg;
329 struct gve_priv *priv = rx->com.priv;
330 struct gve_ring_com *com = &rx->com;
332 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0))
333 return (FILTER_STRAY);
335 gve_db_bar_write_4(priv, com->irq_db_offset, GVE_IRQ_MASK);
336 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task);
337 return (FILTER_HANDLED);
341 gve_set_rss_type(__be16 flag, struct mbuf *mbuf)
343 if ((flag & GVE_RXF_IPV4) != 0) {
344 if ((flag & GVE_RXF_TCP) != 0)
345 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
346 else if ((flag & GVE_RXF_UDP) != 0)
347 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
349 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
353 if ((flag & GVE_RXF_IPV6) != 0) {
354 if ((flag & GVE_RXF_TCP) != 0)
355 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
356 else if ((flag & GVE_RXF_UDP) != 0)
357 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
359 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
365 gve_mextadd_free(struct mbuf *mbuf)
367 vm_page_t page = (vm_page_t)mbuf->m_ext.ext_arg1;
368 vm_offset_t va = (vm_offset_t)mbuf->m_ext.ext_arg2;
371 * Free the page only if this is the last ref.
372 * The interface might no longer exist by the time
373 * this callback is called, see gve_free_qpl.
375 if (__predict_false(vm_page_unwire_noq(page))) {
377 kva_free(va, PAGE_SIZE);
383 gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
385 const __be64 offset = htobe64(GVE_DEFAULT_RX_BUFFER_OFFSET);
386 page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
387 *(slot_addr) ^= offset;
391 gve_rx_create_mbuf(struct gve_priv *priv, struct gve_rx_ring *rx,
392 struct gve_rx_slot_page_info *page_info, uint16_t len,
393 union gve_rx_data_slot *data_slot, bool is_only_frag)
395 struct gve_rx_ctx *ctx = &rx->ctx;
400 uint32_t offset = page_info->page_offset + page_info->pad;
401 void *va = (char *)page_info->page_address + offset;
403 if (len <= priv->rx_copybreak && is_only_frag) {
404 mbuf = m_get2(len, M_NOWAIT, MT_DATA, M_PKTHDR);
405 if (__predict_false(mbuf == NULL))
408 m_copyback(mbuf, 0, len, va);
410 counter_u64_add_protected(rx->stats.rx_copybreak_cnt, 1);
412 ctx->mbuf_head = mbuf;
413 ctx->mbuf_tail = mbuf;
415 struct mbuf *mbuf_tail = ctx->mbuf_tail;
416 KASSERT(len <= MCLBYTES, ("gve rx fragment bigger than cluster mbuf"));
419 * This page was created with VM_ALLOC_WIRED, thus the lowest
420 * wire count experienced by the page until the interface is
423 * We wire the page again before supplying an mbuf pointing to
424 * it to the networking stack, so before the mbuf leaves the
425 * driver, the wire count rises to 2.
427 * If it is 1 again, it necessarily means that the mbuf has been
428 * consumed and it was gve_mextadd_free that brought down the wire
429 * count back to 1. We only need to eventually observe the 1.
431 ref_count = atomic_load_int(&page_info->page->ref_count);
432 can_flip = VPRC_WIRE_COUNT(ref_count) == 1;
434 if (mbuf_tail == NULL) {
436 mbuf = m_gethdr(M_NOWAIT, MT_DATA);
438 mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
440 ctx->mbuf_head = mbuf;
441 ctx->mbuf_tail = mbuf;
444 mbuf = m_get(M_NOWAIT, MT_DATA);
446 mbuf = m_getcl(M_NOWAIT, MT_DATA, 0);
448 mbuf_tail->m_next = mbuf;
449 ctx->mbuf_tail = mbuf;
452 if (__predict_false(mbuf == NULL))
456 MEXTADD(mbuf, va, len, gve_mextadd_free,
457 page_info->page, page_info->page_address,
461 counter_u64_add_protected(rx->stats.rx_frag_flip_cnt, 1);
465 * Grab an extra ref to the page so that gve_mextadd_free
466 * does not end up freeing the page while the interface exists.
468 vm_page_wire(page_info->page);
470 gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
472 m_copyback(mbuf, 0, len, va);
474 counter_u64_add_protected(rx->stats.rx_frag_copy_cnt, 1);
480 ctx->total_size += len;
486 gve_needs_rss(__be16 flag)
488 if ((flag & GVE_RXF_FRAG) != 0)
490 if ((flag & (GVE_RXF_IPV4 | GVE_RXF_IPV6)) != 0)
496 gve_rx(struct gve_priv *priv, struct gve_rx_ring *rx, struct gve_rx_desc *desc,
499 struct gve_rx_slot_page_info *page_info;
500 struct gve_dma_handle *page_dma_handle;
501 union gve_rx_data_slot *data_slot;
502 struct gve_rx_ctx *ctx = &rx->ctx;
503 struct mbuf *mbuf = NULL;
504 if_t ifp = priv->ifp;
508 bool is_first_frag = ctx->frag_cnt == 0;
509 bool is_last_frag = !(GVE_RXF_PKT_CONT & desc->flags_seq);
510 bool is_only_frag = is_first_frag && is_last_frag;
512 if (__predict_false(ctx->drop_pkt))
515 if ((desc->flags_seq & GVE_RXF_ERR) != 0) {
516 ctx->drop_pkt = true;
518 counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1);
519 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1);
521 m_freem(ctx->mbuf_head);
525 page_info = &rx->page_info[idx];
526 data_slot = &rx->data_ring[idx];
527 page_dma_handle = &(rx->com.qpl->dmas[idx]);
529 page_info->pad = is_first_frag ? GVE_RX_PAD : 0;
530 len = be16toh(desc->len) - page_info->pad;
532 bus_dmamap_sync(page_dma_handle->tag, page_dma_handle->map,
533 BUS_DMASYNC_POSTREAD);
535 mbuf = gve_rx_create_mbuf(priv, rx, page_info, len, data_slot,
538 ctx->drop_pkt = true;
540 counter_u64_add_protected(rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1);
541 counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1);
543 m_freem(ctx->mbuf_head);
548 mbuf->m_pkthdr.rcvif = priv->ifp;
549 ctx->is_tcp = desc->flags_seq & GVE_RXF_TCP;
551 if (gve_needs_rss(desc->flags_seq)) {
552 gve_set_rss_type(desc->flags_seq, mbuf);
553 mbuf->m_pkthdr.flowid = be32toh(desc->rss_hash);
556 if ((desc->csum != 0) && ((desc->flags_seq & GVE_RXF_FRAG) == 0)) {
557 mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
561 mbuf->m_pkthdr.csum_data = 0xffff;
566 mbuf = ctx->mbuf_head;
567 mbuf->m_pkthdr.len = ctx->total_size;
570 if (((if_getcapenable(priv->ifp) & IFCAP_LRO) != 0) && /* LRO is enabled */
571 (ctx->is_tcp) && /* pkt is a TCP pkt */
572 ((mbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID) != 0) && /* NIC verified csum */
573 (rx->lro.lro_cnt != 0) && /* LRO resources exist */
574 (tcp_lro_rx(&rx->lro, mbuf, 0) == 0))
581 counter_u64_add_protected(rx->stats.rbytes, ctx->total_size);
582 counter_u64_add_protected(rx->stats.rpackets, 1);
589 rx->ctx = (struct gve_rx_ctx){};
593 gve_rx_work_pending(struct gve_rx_ring *rx)
595 struct gve_rx_desc *desc;
599 next_idx = rx->cnt & rx->mask;
600 desc = rx->desc_ring + next_idx;
602 flags_seq = desc->flags_seq;
604 return (GVE_SEQNO(flags_seq) == rx->seq_no);
607 static inline uint8_t
608 gve_next_seqno(uint8_t seq)
610 return ((seq + 1) == 8 ? 1 : seq + 1);
614 gve_rx_cleanup(struct gve_priv *priv, struct gve_rx_ring *rx, int budget)
616 uint32_t idx = rx->cnt & rx->mask;
617 struct gve_rx_desc *desc;
618 struct gve_rx_ctx *ctx = &rx->ctx;
619 uint32_t work_done = 0;
623 bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map,
624 BUS_DMASYNC_POSTREAD);
625 desc = &rx->desc_ring[idx];
627 while ((work_done < budget || ctx->frag_cnt) &&
628 (GVE_SEQNO(desc->flags_seq) == rx->seq_no)) {
630 gve_rx(priv, rx, desc, idx);
633 idx = rx->cnt & rx->mask;
634 desc = &rx->desc_ring[idx];
635 rx->seq_no = gve_next_seqno(rx->seq_no);
639 /* The device will only send whole packets. */
640 if (__predict_false(ctx->frag_cnt)) {
641 m_freem(ctx->mbuf_head);
642 rx->ctx = (struct gve_rx_ctx){};
643 device_printf(priv->dev,
644 "Unexpected seq number %d with incomplete packet, expected %d, scheduling reset",
645 GVE_SEQNO(desc->flags_seq), rx->seq_no);
646 gve_schedule_reset(priv);
650 tcp_lro_flush_all(&rx->lro);
652 bus_dmamap_sync(rx->data_ring_mem.tag, rx->data_ring_mem.map,
653 BUS_DMASYNC_PREWRITE);
655 /* Buffers are refilled as the descs are processed */
656 rx->fill_cnt += work_done;
657 gve_db_bar_write_4(priv, rx->com.db_offset, rx->fill_cnt);
661 gve_rx_cleanup_tq(void *arg, int pending)
663 struct gve_rx_ring *rx = arg;
664 struct gve_priv *priv = rx->com.priv;
666 if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0))
669 gve_rx_cleanup(priv, rx, /*budget=*/128);
671 gve_db_bar_write_4(priv, rx->com.irq_db_offset,
672 GVE_IRQ_ACK | GVE_IRQ_EVENT);
675 * Fragments received before this barrier MAY NOT cause the NIC to send an
676 * interrupt but they will still be handled by the enqueue below.
677 * Fragments received after the barrier WILL trigger an interrupt.
681 if (gve_rx_work_pending(rx)) {
682 gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK);
683 taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task);