2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2023 Google LLC
6 * Redistribution and use in source and binary forms, with or without modification,
7 * are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3. Neither the name of the copyright holder nor the names of its contributors
17 * may be used to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/malloc.h>
34 #include "gve_adminq.h"
36 static MALLOC_DEFINE(M_GVE_QPL, "gve qpl", "gve qpl allocations");
39 gve_num_tx_qpls(struct gve_priv *priv)
41 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
44 return (priv->tx_cfg.max_queues);
48 gve_num_rx_qpls(struct gve_priv *priv)
50 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
53 return (priv->rx_cfg.max_queues);
57 gve_free_qpl(struct gve_priv *priv, uint32_t id)
59 struct gve_queue_page_list *qpl = &priv->qpls[id];
62 for (i = 0; i < qpl->num_dmas; i++) {
63 gve_dmamap_destroy(&qpl->dmas[i]);
67 pmap_qremove(qpl->kva, qpl->num_pages);
68 kva_free(qpl->kva, PAGE_SIZE * qpl->num_pages);
71 for (i = 0; i < qpl->num_pages; i++) {
73 * Free the page only if this is the last ref.
74 * Tx pages are known to have no other refs at
75 * this point, but Rx pages might still be in
76 * use by the networking stack, see gve_mextadd_free.
78 if (vm_page_unwire_noq(qpl->pages[i])) {
80 pmap_qremove((vm_offset_t)qpl->dmas[i].cpu_addr, 1);
81 kva_free((vm_offset_t)qpl->dmas[i].cpu_addr, PAGE_SIZE);
83 vm_page_free(qpl->pages[i]);
86 priv->num_registered_pages--;
89 if (qpl->pages != NULL)
90 free(qpl->pages, M_GVE_QPL);
92 if (qpl->dmas != NULL)
93 free(qpl->dmas, M_GVE_QPL);
97 gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
99 struct gve_queue_page_list *qpl = &priv->qpls[id];
103 if (npages + priv->num_registered_pages > priv->max_registered_pages) {
104 device_printf(priv->dev, "Reached max number of registered pages %ju > %ju\n",
105 (uintmax_t)npages + priv->num_registered_pages,
106 (uintmax_t)priv->max_registered_pages);
114 qpl->dmas = malloc(npages * sizeof(*qpl->dmas), M_GVE_QPL,
117 qpl->pages = malloc(npages * sizeof(*qpl->pages), M_GVE_QPL,
122 qpl->kva = kva_alloc(PAGE_SIZE * npages);
124 device_printf(priv->dev, "Failed to create the single kva for QPL %d\n", id);
130 for (i = 0; i < npages; i++) {
131 qpl->pages[i] = vm_page_alloc_noobj(VM_ALLOC_WIRED |
136 qpl->dmas[i].cpu_addr = (void *)kva_alloc(PAGE_SIZE);
137 if (!qpl->dmas[i].cpu_addr) {
138 device_printf(priv->dev, "Failed to create kva for page %d in QPL %d", i, id);
142 pmap_qenter((vm_offset_t)qpl->dmas[i].cpu_addr, &(qpl->pages[i]), 1);
144 qpl->dmas[i].cpu_addr = (void *)(qpl->kva + (PAGE_SIZE * i));
151 pmap_qenter(qpl->kva, qpl->pages, npages);
153 for (i = 0; i < npages; i++) {
154 err = gve_dmamap_create(priv, /*size=*/PAGE_SIZE, /*align=*/PAGE_SIZE,
157 device_printf(priv->dev, "Failed to dma-map page %d in QPL %d\n", i, id);
162 priv->num_registered_pages++;
168 gve_free_qpl(priv, id);
173 gve_free_qpls(struct gve_priv *priv)
175 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
181 if (priv->qpls != NULL) {
182 for (i = 0; i < num_qpls; i++)
183 gve_free_qpl(priv, i);
184 free(priv->qpls, M_GVE_QPL);
188 int gve_alloc_qpls(struct gve_priv *priv)
190 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
197 priv->qpls = malloc(num_qpls * sizeof(*priv->qpls), M_GVE_QPL,
200 for (i = 0; i < gve_num_tx_qpls(priv); i++) {
201 err = gve_alloc_qpl(priv, i, priv->tx_desc_cnt / GVE_QPL_DIVISOR,
202 /*single_kva=*/true);
207 for (; i < num_qpls; i++) {
208 err = gve_alloc_qpl(priv, i, priv->rx_desc_cnt, /*single_kva=*/false);
221 gve_unregister_n_qpls(struct gve_priv *priv, int n)
226 for (i = 0; i < n; i++) {
227 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
229 device_printf(priv->dev,
230 "Failed to unregister qpl %d, err: %d\n",
231 priv->qpls[i].id, err);
242 gve_register_qpls(struct gve_priv *priv)
244 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
248 if (gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
251 for (i = 0; i < num_qpls; i++) {
252 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
254 device_printf(priv->dev,
255 "Failed to register qpl %d, err: %d\n",
256 priv->qpls[i].id, err);
261 gve_set_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
265 gve_unregister_n_qpls(priv, i);
270 gve_unregister_qpls(struct gve_priv *priv)
272 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
275 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
278 err = gve_unregister_n_qpls(priv, num_qpls);
282 gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);