2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2023 Google LLC
6 * Redistribution and use in source and binary forms, with or without modification,
7 * are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3. Neither the name of the copyright holder nor the names of its contributors
17 * may be used to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/endian.h>
32 #include <sys/socket.h>
35 #include <net/ethernet.h>
37 #include <net/if_var.h>
40 #include "gve_adminq.h"
42 #define GVE_ADMINQ_SLEEP_LEN_MS 20
43 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 10
44 #define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
45 #define GVE_REG_ADMINQ_ADDR 16
46 #define ADMINQ_SLOTS (ADMINQ_SIZE / sizeof(struct gve_adminq_command))
48 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
49 "Expected: length=%d, feature_mask=%x.\n" \
50 "Actual: length=%d, feature_mask=%x.\n"
52 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected." \
53 " Possible older version of guest driver.\n"
56 void gve_parse_device_option(struct gve_priv *priv,
57 struct gve_device_descriptor *device_descriptor,
58 struct gve_device_option *option,
59 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
60 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
62 uint32_t req_feat_mask = be32toh(option->required_features_mask);
63 uint16_t option_length = be16toh(option->option_length);
64 uint16_t option_id = be16toh(option->option_id);
67 * If the length or feature mask doesn't match, continue without
68 * enabling the feature.
71 case GVE_DEV_OPT_ID_GQI_QPL:
72 if (option_length < sizeof(**dev_op_gqi_qpl) ||
73 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
74 device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
75 "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
76 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
77 option_length, req_feat_mask);
81 if (option_length > sizeof(**dev_op_gqi_qpl)) {
82 device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
85 *dev_op_gqi_qpl = (void *)(option + 1);
88 case GVE_DEV_OPT_ID_JUMBO_FRAMES:
89 if (option_length < sizeof(**dev_op_jumbo_frames) ||
90 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
91 device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
92 "Jumbo Frames", (int)sizeof(**dev_op_jumbo_frames),
93 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
94 option_length, req_feat_mask);
98 if (option_length > sizeof(**dev_op_jumbo_frames)) {
99 device_printf(priv->dev,
100 GVE_DEVICE_OPTION_TOO_BIG_FMT, "Jumbo Frames");
102 *dev_op_jumbo_frames = (void *)(option + 1);
107 * If we don't recognize the option just continue
108 * without doing anything.
110 device_printf(priv->dev, "Unrecognized device option 0x%hx not enabled.\n",
115 /* Process all device options for a given describe device call. */
117 gve_process_device_options(struct gve_priv *priv,
118 struct gve_device_descriptor *descriptor,
119 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
120 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
122 char *desc_end = (char *)descriptor + be16toh(descriptor->total_length);
123 const int num_options = be16toh(descriptor->num_device_options);
124 struct gve_device_option *dev_opt;
127 /* The options struct directly follows the device descriptor. */
128 dev_opt = (void *)(descriptor + 1);
129 for (i = 0; i < num_options; i++) {
130 if ((char *)(dev_opt + 1) > desc_end ||
131 (char *)(dev_opt + 1) + be16toh(dev_opt->option_length) > desc_end) {
132 device_printf(priv->dev,
133 "options exceed device_descriptor's total length.\n");
137 gve_parse_device_option(priv, descriptor, dev_opt,
138 dev_op_gqi_qpl, dev_op_jumbo_frames);
139 dev_opt = (void *)((char *)(dev_opt + 1) + be16toh(dev_opt->option_length));
145 static int gve_adminq_execute_cmd(struct gve_priv *priv,
146 struct gve_adminq_command *cmd);
149 gve_adminq_destroy_tx_queue(struct gve_priv *priv, uint32_t id)
151 struct gve_adminq_command cmd = (struct gve_adminq_command){};
153 cmd.opcode = htobe32(GVE_ADMINQ_DESTROY_TX_QUEUE);
154 cmd.destroy_tx_queue.queue_id = htobe32(id);
156 return (gve_adminq_execute_cmd(priv, &cmd));
160 gve_adminq_destroy_rx_queue(struct gve_priv *priv, uint32_t id)
162 struct gve_adminq_command cmd = (struct gve_adminq_command){};
164 cmd.opcode = htobe32(GVE_ADMINQ_DESTROY_RX_QUEUE);
165 cmd.destroy_rx_queue.queue_id = htobe32(id);
167 return (gve_adminq_execute_cmd(priv, &cmd));
171 gve_adminq_destroy_rx_queues(struct gve_priv *priv, uint32_t num_queues)
176 for (i = 0; i < num_queues; i++) {
177 err = gve_adminq_destroy_rx_queue(priv, i);
179 device_printf(priv->dev, "Failed to destroy rxq %d, err: %d\n",
187 device_printf(priv->dev, "Destroyed %d rx queues\n", num_queues);
192 gve_adminq_destroy_tx_queues(struct gve_priv *priv, uint32_t num_queues)
197 for (i = 0; i < num_queues; i++) {
198 err = gve_adminq_destroy_tx_queue(priv, i);
200 device_printf(priv->dev, "Failed to destroy txq %d, err: %d\n",
208 device_printf(priv->dev, "Destroyed %d tx queues\n", num_queues);
213 gve_adminq_create_rx_queue(struct gve_priv *priv, uint32_t queue_index)
215 struct gve_adminq_command cmd = (struct gve_adminq_command){};
216 struct gve_rx_ring *rx = &priv->rx[queue_index];
217 struct gve_dma_handle *qres_dma = &rx->com.q_resources_mem;
219 bus_dmamap_sync(qres_dma->tag, qres_dma->map, BUS_DMASYNC_PREREAD);
221 cmd.opcode = htobe32(GVE_ADMINQ_CREATE_RX_QUEUE);
222 cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
223 .queue_id = htobe32(queue_index),
224 .index = htobe32(queue_index),
225 .ntfy_id = htobe32(rx->com.ntfy_id),
226 .queue_resources_addr = htobe64(qres_dma->bus_addr),
227 .rx_desc_ring_addr = htobe64(rx->desc_ring_mem.bus_addr),
228 .rx_data_ring_addr = htobe64(rx->data_ring_mem.bus_addr),
229 .queue_page_list_id = htobe32((rx->com.qpl)->id),
230 .rx_ring_size = htobe16(priv->rx_desc_cnt),
231 .packet_buffer_size = htobe16(GVE_DEFAULT_RX_BUFFER_SIZE),
234 return (gve_adminq_execute_cmd(priv, &cmd));
238 gve_adminq_create_rx_queues(struct gve_priv *priv, uint32_t num_queues)
243 for (i = 0; i < num_queues; i++) {
244 err = gve_adminq_create_rx_queue(priv, i);
246 device_printf(priv->dev, "Failed to create rxq %d, err: %d\n",
253 device_printf(priv->dev, "Created %d rx queues\n", num_queues);
257 gve_adminq_destroy_rx_queues(priv, i);
262 gve_adminq_create_tx_queue(struct gve_priv *priv, uint32_t queue_index)
264 struct gve_adminq_command cmd = (struct gve_adminq_command){};
265 struct gve_tx_ring *tx = &priv->tx[queue_index];
266 struct gve_dma_handle *qres_dma = &tx->com.q_resources_mem;
268 bus_dmamap_sync(qres_dma->tag, qres_dma->map, BUS_DMASYNC_PREREAD);
270 cmd.opcode = htobe32(GVE_ADMINQ_CREATE_TX_QUEUE);
271 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
272 .queue_id = htobe32(queue_index),
273 .queue_resources_addr = htobe64(qres_dma->bus_addr),
274 .tx_ring_addr = htobe64(tx->desc_ring_mem.bus_addr),
275 .queue_page_list_id = htobe32((tx->com.qpl)->id),
276 .ntfy_id = htobe32(tx->com.ntfy_id),
277 .tx_ring_size = htobe16(priv->tx_desc_cnt),
280 return (gve_adminq_execute_cmd(priv, &cmd));
284 gve_adminq_create_tx_queues(struct gve_priv *priv, uint32_t num_queues)
289 for (i = 0; i < num_queues; i++) {
290 err = gve_adminq_create_tx_queue(priv, i);
292 device_printf(priv->dev, "Failed to create txq %d, err: %d\n",
299 device_printf(priv->dev, "Created %d tx queues\n", num_queues);
303 gve_adminq_destroy_tx_queues(priv, i);
308 gve_adminq_set_mtu(struct gve_priv *priv, uint32_t mtu) {
309 struct gve_adminq_command cmd = (struct gve_adminq_command){};
311 cmd.opcode = htobe32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
312 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
313 .parameter_type = htobe32(GVE_SET_PARAM_MTU),
314 .parameter_value = htobe64(mtu),
317 return (gve_adminq_execute_cmd(priv, &cmd));
321 gve_enable_supported_features(struct gve_priv *priv,
322 uint32_t supported_features_mask,
323 const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames)
325 if (dev_op_jumbo_frames &&
326 (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
328 device_printf(priv->dev, "JUMBO FRAMES device option enabled: %u.\n",
329 be16toh(dev_op_jumbo_frames->max_mtu));
330 priv->max_mtu = be16toh(dev_op_jumbo_frames->max_mtu);
335 gve_adminq_describe_device(struct gve_priv *priv)
337 struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
338 struct gve_device_descriptor *desc;
339 struct gve_dma_handle desc_mem;
340 struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
341 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
342 uint32_t supported_features_mask = 0;
346 rc = gve_dma_alloc_coherent(priv, ADMINQ_SIZE, ADMINQ_SIZE, &desc_mem);
348 device_printf(priv->dev, "Failed to alloc DMA mem for DescribeDevice.\n");
352 desc = desc_mem.cpu_addr;
354 aq_cmd.opcode = htobe32(GVE_ADMINQ_DESCRIBE_DEVICE);
355 aq_cmd.describe_device.device_descriptor_addr = htobe64(
357 aq_cmd.describe_device.device_descriptor_version = htobe32(
358 GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
359 aq_cmd.describe_device.available_length = htobe32(ADMINQ_SIZE);
361 bus_dmamap_sync(desc_mem.tag, desc_mem.map, BUS_DMASYNC_PREWRITE);
363 rc = gve_adminq_execute_cmd(priv, &aq_cmd);
365 goto free_device_descriptor;
367 bus_dmamap_sync(desc_mem.tag, desc_mem.map, BUS_DMASYNC_POSTREAD);
369 rc = gve_process_device_options(priv, desc, &dev_op_gqi_qpl,
370 &dev_op_jumbo_frames);
372 goto free_device_descriptor;
374 if (dev_op_gqi_qpl != NULL) {
375 priv->queue_format = GVE_GQI_QPL_FORMAT;
376 supported_features_mask = be32toh(
377 dev_op_gqi_qpl->supported_features_mask);
379 device_printf(priv->dev,
380 "Driver is running with GQI QPL queue format.\n");
382 device_printf(priv->dev, "No compatible queue formats\n");
384 goto free_device_descriptor;
387 priv->num_event_counters = be16toh(desc->counters);
388 priv->default_num_queues = be16toh(desc->default_num_queues);
389 priv->tx_desc_cnt = be16toh(desc->tx_queue_entries);
390 priv->rx_desc_cnt = be16toh(desc->rx_queue_entries);
391 priv->rx_pages_per_qpl = be16toh(desc->rx_pages_per_qpl);
392 priv->max_registered_pages = be64toh(desc->max_registered_pages);
393 priv->max_mtu = be16toh(desc->mtu);
394 priv->default_num_queues = be16toh(desc->default_num_queues);
395 priv->supported_features = supported_features_mask;
397 gve_enable_supported_features(priv, supported_features_mask,
398 dev_op_jumbo_frames);
400 for (i = 0; i < ETHER_ADDR_LEN; i++)
401 priv->mac[i] = desc->mac[i];
403 free_device_descriptor:
404 gve_dma_free_coherent(&desc_mem);
410 gve_adminq_register_page_list(struct gve_priv *priv,
411 struct gve_queue_page_list *qpl)
413 struct gve_adminq_command cmd = (struct gve_adminq_command){};
414 uint32_t num_entries = qpl->num_pages;
415 uint32_t size = num_entries * sizeof(qpl->dmas[0].bus_addr);
417 struct gve_dma_handle dma;
421 err = gve_dma_alloc_coherent(priv, size, PAGE_SIZE, &dma);
425 page_list = dma.cpu_addr;
427 for (i = 0; i < num_entries; i++)
428 page_list[i] = htobe64(qpl->dmas[i].bus_addr);
430 bus_dmamap_sync(dma.tag, dma.map, BUS_DMASYNC_PREWRITE);
432 cmd.opcode = htobe32(GVE_ADMINQ_REGISTER_PAGE_LIST);
433 cmd.reg_page_list = (struct gve_adminq_register_page_list) {
434 .page_list_id = htobe32(qpl->id),
435 .num_pages = htobe32(num_entries),
436 .page_address_list_addr = htobe64(dma.bus_addr),
437 .page_size = htobe64(PAGE_SIZE),
440 err = gve_adminq_execute_cmd(priv, &cmd);
441 gve_dma_free_coherent(&dma);
446 gve_adminq_unregister_page_list(struct gve_priv *priv, uint32_t page_list_id)
448 struct gve_adminq_command cmd = (struct gve_adminq_command){};
450 cmd.opcode = htobe32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
451 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
452 .page_list_id = htobe32(page_list_id),
455 return (gve_adminq_execute_cmd(priv, &cmd));
458 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0
460 gve_adminq_configure_device_resources(struct gve_priv *priv)
462 struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
464 bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map,
465 BUS_DMASYNC_PREREAD);
466 bus_dmamap_sync(priv->counter_array_mem.tag,
467 priv->counter_array_mem.map, BUS_DMASYNC_PREREAD);
469 aq_cmd.opcode = htobe32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
470 aq_cmd.configure_device_resources =
471 (struct gve_adminq_configure_device_resources) {
472 .counter_array = htobe64(priv->counter_array_mem.bus_addr),
473 .irq_db_addr = htobe64(priv->irqs_db_mem.bus_addr),
474 .num_counters = htobe32(priv->num_event_counters),
475 .num_irq_dbs = htobe32(priv->num_queues),
476 .irq_db_stride = htobe32(sizeof(struct gve_irq_db)),
477 .ntfy_blk_msix_base_idx = htobe32(GVE_NTFY_BLK_BASE_MSIX_IDX),
478 .queue_format = priv->queue_format,
481 return (gve_adminq_execute_cmd(priv, &aq_cmd));
485 gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
487 struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
489 aq_cmd.opcode = htobe32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
490 return (gve_adminq_execute_cmd(priv, &aq_cmd));
494 gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
495 uint64_t driver_info_len,
496 vm_paddr_t driver_info_addr)
498 struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
500 aq_cmd.opcode = htobe32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
501 aq_cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
502 .driver_info_len = htobe64(driver_info_len),
503 .driver_info_addr = htobe64(driver_info_addr),
506 return (gve_adminq_execute_cmd(priv, &aq_cmd));
510 gve_adminq_alloc(struct gve_priv *priv)
514 if (gve_get_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK))
517 if (priv->aq_mem.cpu_addr == NULL) {
518 rc = gve_dma_alloc_coherent(priv, ADMINQ_SIZE, ADMINQ_SIZE,
521 device_printf(priv->dev, "Failed to allocate admin queue mem\n");
526 priv->adminq = priv->aq_mem.cpu_addr;
527 priv->adminq_bus_addr = priv->aq_mem.bus_addr;
529 if (priv->adminq == NULL)
532 priv->adminq_mask = ADMINQ_SLOTS - 1;
533 priv->adminq_prod_cnt = 0;
534 priv->adminq_cmd_fail = 0;
535 priv->adminq_timeouts = 0;
536 priv->adminq_describe_device_cnt = 0;
537 priv->adminq_cfg_device_resources_cnt = 0;
538 priv->adminq_register_page_list_cnt = 0;
539 priv->adminq_unregister_page_list_cnt = 0;
540 priv->adminq_create_tx_queue_cnt = 0;
541 priv->adminq_create_rx_queue_cnt = 0;
542 priv->adminq_destroy_tx_queue_cnt = 0;
543 priv->adminq_destroy_rx_queue_cnt = 0;
544 priv->adminq_dcfg_device_resources_cnt = 0;
545 priv->adminq_set_driver_parameter_cnt = 0;
547 gve_reg_bar_write_4(priv, GVE_REG_ADMINQ_ADDR,
548 priv->adminq_bus_addr / ADMINQ_SIZE);
550 gve_set_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK);
555 gve_release_adminq(struct gve_priv *priv)
557 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK))
560 gve_reg_bar_write_4(priv, GVE_REG_ADMINQ_ADDR, 0);
561 while (gve_reg_bar_read_4(priv, GVE_REG_ADMINQ_ADDR)) {
562 device_printf(priv->dev, "Waiting until admin queue is released.\n");
563 pause("gve release adminq", GVE_ADMINQ_SLEEP_LEN_MS);
566 gve_dma_free_coherent(&priv->aq_mem);
567 priv->aq_mem = (struct gve_dma_handle){};
569 priv->adminq_bus_addr = 0;
571 gve_clear_state_flag(priv, GVE_STATE_FLAG_ADMINQ_OK);
574 device_printf(priv->dev, "Admin queue released\n");
578 gve_adminq_parse_err(struct gve_priv *priv, uint32_t opcode, uint32_t status)
580 if (status != GVE_ADMINQ_COMMAND_PASSED &&
581 status != GVE_ADMINQ_COMMAND_UNSET) {
582 device_printf(priv->dev, "AQ command(%u): failed with status %d\n", opcode, status);
583 priv->adminq_cmd_fail++;
586 case GVE_ADMINQ_COMMAND_PASSED:
589 case GVE_ADMINQ_COMMAND_UNSET:
590 device_printf(priv->dev,
591 "AQ command(%u): err and status both unset, this should not be possible.\n",
595 case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
596 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
597 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
598 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
599 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
602 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
603 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
604 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
605 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
606 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
607 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
610 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
613 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
614 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
617 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
620 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
624 device_printf(priv->dev, "AQ command(%u): unknown status code %d\n",
631 gve_adminq_kick_cmd(struct gve_priv *priv, uint32_t prod_cnt)
633 gve_reg_bar_write_4(priv, ADMINQ_DOORBELL, prod_cnt);
638 gve_adminq_wait_for_cmd(struct gve_priv *priv, uint32_t prod_cnt)
642 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
643 if (gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER) == prod_cnt)
645 pause("gve adminq cmd", GVE_ADMINQ_SLEEP_LEN_MS);
652 * Flushes all AQ commands currently queued and waits for them to complete.
653 * If there are failures, it will return the first error.
656 gve_adminq_kick_and_wait(struct gve_priv *priv)
658 struct gve_adminq_command *cmd;
659 uint32_t status, err;
664 tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
665 head = priv->adminq_prod_cnt;
667 gve_adminq_kick_cmd(priv, head);
668 if (!gve_adminq_wait_for_cmd(priv, head)) {
669 device_printf(priv->dev, "AQ commands timed out, need to reset AQ\n");
670 priv->adminq_timeouts++;
671 return (ENOTRECOVERABLE);
674 priv->aq_mem.tag, priv->aq_mem.map, BUS_DMASYNC_POSTREAD);
676 for (i = tail; i < head; i++) {
677 cmd = &priv->adminq[i & priv->adminq_mask];
678 status = be32toh(cmd->status);
679 opcode = be32toh(cmd->opcode);
680 err = gve_adminq_parse_err(priv, opcode, status);
689 * This function is not threadsafe - the caller is responsible for any
693 gve_adminq_issue_cmd(struct gve_priv *priv, struct gve_adminq_command *cmd_orig)
695 struct gve_adminq_command *cmd;
700 tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
702 /* Check if next command will overflow the buffer. */
703 if ((priv->adminq_prod_cnt - tail) > priv->adminq_mask) {
704 /* Flush existing commands to make room. */
705 err = gve_adminq_kick_and_wait(priv);
710 tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
711 if ((priv->adminq_prod_cnt - tail) > priv->adminq_mask) {
713 * This should never happen. We just flushed the
714 * command queue so there should be enough space.
720 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
721 priv->adminq_prod_cnt++;
723 memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
726 priv->aq_mem.tag, priv->aq_mem.map, BUS_DMASYNC_PREWRITE);
728 opcode = be32toh(cmd->opcode);
731 case GVE_ADMINQ_DESCRIBE_DEVICE:
732 priv->adminq_describe_device_cnt++;
735 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
736 priv->adminq_cfg_device_resources_cnt++;
739 case GVE_ADMINQ_REGISTER_PAGE_LIST:
740 priv->adminq_register_page_list_cnt++;
743 case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
744 priv->adminq_unregister_page_list_cnt++;
747 case GVE_ADMINQ_CREATE_TX_QUEUE:
748 priv->adminq_create_tx_queue_cnt++;
751 case GVE_ADMINQ_CREATE_RX_QUEUE:
752 priv->adminq_create_rx_queue_cnt++;
755 case GVE_ADMINQ_DESTROY_TX_QUEUE:
756 priv->adminq_destroy_tx_queue_cnt++;
759 case GVE_ADMINQ_DESTROY_RX_QUEUE:
760 priv->adminq_destroy_rx_queue_cnt++;
763 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
764 priv->adminq_dcfg_device_resources_cnt++;
767 case GVE_ADMINQ_SET_DRIVER_PARAMETER:
768 priv->adminq_set_driver_parameter_cnt++;
771 case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
772 priv->adminq_verify_driver_compatibility_cnt++;
776 device_printf(priv->dev, "Unknown AQ command opcode %d\n", opcode);
783 * This function is not threadsafe - the caller is responsible for any
785 * The caller is also responsible for making sure there are no commands
786 * waiting to be executed.
789 gve_adminq_execute_cmd(struct gve_priv *priv, struct gve_adminq_command *cmd_orig)
794 tail = gve_reg_bar_read_4(priv, ADMINQ_EVENT_COUNTER);
795 head = priv->adminq_prod_cnt;
799 err = gve_adminq_issue_cmd(priv, cmd_orig);
802 return (gve_adminq_kick_and_wait(priv));