2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2023 Google LLC
6 * Redistribution and use in source and binary forms, with or without modification,
7 * are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3. Neither the name of the copyright holder nor the names of its contributors
17 * may be used to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset)
36 return (be32toh(bus_read_4(priv->reg_bar, offset)));
40 gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val)
42 bus_write_4(priv->reg_bar, offset, htobe32(val));
46 gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val)
48 bus_write_4(priv->db_bar, offset, htobe32(val));
52 gve_alloc_counters(counter_u64_t *stat, int num_stats)
56 for (i = 0; i < num_stats; i++)
57 stat[i] = counter_u64_alloc(M_WAITOK);
61 gve_free_counters(counter_u64_t *stat, int num_stats)
65 for (i = 0; i < num_stats; i++)
66 counter_u64_free(stat[i]);
69 /* Currently assumes a single segment. */
71 gve_dmamap_load_callback(void *arg, bus_dma_segment_t *segs, int nseg,
75 *(bus_addr_t *) arg = segs[0].ds_addr;
79 gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
80 struct gve_dma_handle *dma)
83 device_t dev = priv->dev;
85 err = bus_dma_tag_create(
86 bus_get_dma_tag(dev), /* parent */
87 align, 0, /* alignment, bounds */
88 BUS_SPACE_MAXADDR, /* lowaddr */
89 BUS_SPACE_MAXADDR, /* highaddr */
90 NULL, NULL, /* filter, filterarg */
93 size, /* maxsegsize */
94 BUS_DMA_ALLOCNOW, /* flags */
99 device_printf(dev, "%s: bus_dma_tag_create failed: %d\n",
104 err = bus_dmamem_alloc(dma->tag, (void **) &dma->cpu_addr,
105 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
108 device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n",
109 __func__, (uintmax_t)size, err);
113 /* An address set by the callback will never be -1 */
114 dma->bus_addr = (bus_addr_t)-1;
115 err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size,
116 gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_NOWAIT);
117 if (err != 0 || dma->bus_addr == (bus_addr_t)-1) {
118 device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err);
125 bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map);
127 bus_dma_tag_destroy(dma->tag);
135 gve_dma_free_coherent(struct gve_dma_handle *dma)
137 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
138 bus_dmamap_unload(dma->tag, dma->map);
139 bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map);
140 bus_dma_tag_destroy(dma->tag);
144 gve_dmamap_create(struct gve_priv *priv, int size, int align,
145 struct gve_dma_handle *dma)
148 device_t dev = priv->dev;
150 err = bus_dma_tag_create(
151 bus_get_dma_tag(dev), /* parent */
152 align, 0, /* alignment, bounds */
153 BUS_SPACE_MAXADDR, /* lowaddr */
154 BUS_SPACE_MAXADDR, /* highaddr */
155 NULL, NULL, /* filter, filterarg */
158 size, /* maxsegsize */
159 BUS_DMA_ALLOCNOW, /* flags */
164 device_printf(dev, "%s: bus_dma_tag_create failed: %d\n",
169 err = bus_dmamap_create(dma->tag, BUS_DMA_COHERENT, &dma->map);
171 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
176 /* An address set by the callback will never be -1 */
177 dma->bus_addr = (bus_addr_t)-1;
178 err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size,
179 gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_WAITOK);
180 if (err != 0 || dma->bus_addr == (bus_addr_t)-1) {
181 device_printf(dev, "%s: bus_dmamap_load failed: %d\n",
189 bus_dmamap_destroy(dma->tag, dma->map);
191 bus_dma_tag_destroy(dma->tag);
199 gve_dmamap_destroy(struct gve_dma_handle *dma)
201 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
202 bus_dmamap_unload(dma->tag, dma->map);
203 bus_dmamap_destroy(dma->tag, dma->map);
204 bus_dma_tag_destroy(dma->tag);
208 gve_mgmnt_intr(void *arg)
210 struct gve_priv *priv = arg;
212 taskqueue_enqueue(priv->service_tq, &priv->service_task);
213 return (FILTER_HANDLED);
217 gve_free_irqs(struct gve_priv *priv)
225 if (priv->irq_tbl == NULL) {
226 device_printf(priv->dev, "No irq table, nothing to free\n");
230 num_irqs = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues + 1;
232 for (i = 0; i < num_irqs; i++) {
233 irq = &priv->irq_tbl[i];
234 if (irq->res == NULL)
237 rid = rman_get_rid(irq->res);
239 rc = bus_teardown_intr(priv->dev, irq->res, irq->cookie);
241 device_printf(priv->dev, "Failed to teardown irq num %d\n",
244 rc = bus_release_resource(priv->dev, SYS_RES_IRQ,
247 device_printf(priv->dev, "Failed to release irq num %d\n",
254 free(priv->irq_tbl, M_GVE);
255 priv->irq_tbl = NULL;
257 /* Safe to call even if msix was never alloced */
258 pci_release_msi(priv->dev);
262 gve_alloc_irqs(struct gve_priv *priv)
264 int num_tx = priv->tx_cfg.num_queues;
265 int num_rx = priv->rx_cfg.num_queues;
266 int req_nvecs = num_tx + num_rx + 1;
267 int got_nvecs = req_nvecs;
273 struct gve_ring_com *com;
274 struct gve_rx_ring *rx;
275 struct gve_tx_ring *tx;
277 if (pci_alloc_msix(priv->dev, &got_nvecs) != 0) {
278 device_printf(priv->dev, "Failed to acquire any msix vectors\n");
281 } else if (got_nvecs != req_nvecs) {
282 device_printf(priv->dev, "Tried to acquire %d msix vectors, got only %d\n",
283 req_nvecs, got_nvecs);
289 device_printf(priv->dev, "Enabled MSIX with %d vectors\n", got_nvecs);
291 priv->irq_tbl = malloc(sizeof(struct gve_irq) * req_nvecs, M_GVE,
294 for (i = 0; i < num_tx; i++) {
295 irq = &priv->irq_tbl[i];
300 irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ,
302 if (irq->res == NULL) {
303 device_printf(priv->dev, "Failed to alloc irq %d for Tx queue %d\n",
309 err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
310 gve_tx_intr, NULL, &priv->tx[i], &irq->cookie);
312 device_printf(priv->dev, "Failed to setup irq %d for Tx queue %d, "
313 "err: %d\n", rid, i, err);
317 bus_describe_intr(priv->dev, irq->res, irq->cookie, "tx%d", i);
321 for (j = 0; j < num_rx; j++) {
322 irq = &priv->irq_tbl[i + j];
327 irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ,
329 if (irq->res == NULL) {
330 device_printf(priv->dev,
331 "Failed to alloc irq %d for Rx queue %d", rid, j);
336 err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
337 gve_rx_intr, NULL, &priv->rx[j], &irq->cookie);
339 device_printf(priv->dev, "Failed to setup irq %d for Rx queue %d, "
340 "err: %d\n", rid, j, err);
344 bus_describe_intr(priv->dev, irq->res, irq->cookie, "rx%d", j);
345 com->ntfy_id = i + j;
350 irq = &priv->irq_tbl[m];
352 irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ,
354 if (irq->res == NULL) {
355 device_printf(priv->dev, "Failed to allocate irq %d for mgmnt queue\n", rid);
360 err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
361 gve_mgmnt_intr, NULL, priv, &irq->cookie);
363 device_printf(priv->dev, "Failed to setup irq %d for mgmnt queue, err: %d\n",
368 bus_describe_intr(priv->dev, irq->res, irq->cookie, "mgmnt");
378 gve_unmask_all_queue_irqs(struct gve_priv *priv)
380 struct gve_tx_ring *tx;
381 struct gve_rx_ring *rx;
384 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
386 gve_db_bar_write_4(priv, tx->com.irq_db_offset, 0);
388 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
390 gve_db_bar_write_4(priv, rx->com.irq_db_offset, 0);
395 gve_mask_all_queue_irqs(struct gve_priv *priv)
397 for (int idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
398 struct gve_tx_ring *tx = &priv->tx[idx];
399 gve_db_bar_write_4(priv, tx->com.irq_db_offset, GVE_IRQ_MASK);
401 for (int idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
402 struct gve_rx_ring *rx = &priv->rx[idx];
403 gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK);