2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2023 Google LLC
6 * Redistribution and use in source and binary forms, with or without modification,
7 * are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * 3. Neither the name of the copyright holder nor the names of its contributors
17 * may be used to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "gve_adminq.h"
34 #define GVE_DRIVER_VERSION "GVE-FBSD-1.0.0\n"
35 #define GVE_VERSION_MAJOR 0
36 #define GVE_VERSION_MINOR 9
37 #define GVE_VERSION_SUB 0
39 #define GVE_DEFAULT_RX_COPYBREAK 256
41 /* Devices supported by this driver. */
42 static struct gve_dev {
47 { PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC, "gVNIC" }
50 struct sx gve_global_lock;
53 gve_verify_driver_compatibility(struct gve_priv *priv)
56 struct gve_driver_info *driver_info;
57 struct gve_dma_handle driver_info_mem;
59 err = gve_dma_alloc_coherent(priv, sizeof(struct gve_driver_info),
60 PAGE_SIZE, &driver_info_mem);
65 driver_info = driver_info_mem.cpu_addr;
67 *driver_info = (struct gve_driver_info) {
68 .os_type = 3, /* Freebsd */
69 .driver_major = GVE_VERSION_MAJOR,
70 .driver_minor = GVE_VERSION_MINOR,
71 .driver_sub = GVE_VERSION_SUB,
72 .os_version_major = htobe32(FBSD_VERSION_MAJOR),
73 .os_version_minor = htobe32(FBSD_VERSION_MINOR),
74 .os_version_sub = htobe32(FBSD_VERSION_PATCH),
75 .driver_capability_flags = {
76 htobe64(GVE_DRIVER_CAPABILITY_FLAGS1),
77 htobe64(GVE_DRIVER_CAPABILITY_FLAGS2),
78 htobe64(GVE_DRIVER_CAPABILITY_FLAGS3),
79 htobe64(GVE_DRIVER_CAPABILITY_FLAGS4),
83 snprintf(driver_info->os_version_str1, sizeof(driver_info->os_version_str1),
84 "FreeBSD %u", __FreeBSD_version);
86 bus_dmamap_sync(driver_info_mem.tag, driver_info_mem.map,
89 err = gve_adminq_verify_driver_compatibility(priv,
90 sizeof(struct gve_driver_info), driver_info_mem.bus_addr);
92 /* It's ok if the device doesn't support this */
93 if (err == EOPNOTSUPP)
96 gve_dma_free_coherent(&driver_info_mem);
102 gve_up(struct gve_priv *priv)
104 if_t ifp = priv->ifp;
107 GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
109 if (device_is_attached(priv->dev) == 0) {
110 device_printf(priv->dev, "Cannot bring the iface up when detached\n");
114 if (gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
117 if_clearhwassist(ifp);
118 if (if_getcapenable(ifp) & IFCAP_TXCSUM)
119 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0);
120 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
121 if_sethwassistbits(ifp, CSUM_IP6_TCP | CSUM_IP6_UDP, 0);
122 if (if_getcapenable(ifp) & IFCAP_TSO4)
123 if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
124 if (if_getcapenable(ifp) & IFCAP_TSO6)
125 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
127 err = gve_register_qpls(priv);
131 err = gve_create_rx_rings(priv);
135 err = gve_create_tx_rings(priv);
139 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
141 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
142 if_link_state_change(ifp, LINK_STATE_UP);
143 gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
146 gve_unmask_all_queue_irqs(priv);
147 gve_set_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
148 priv->interface_up_cnt++;
152 gve_schedule_reset(priv);
157 gve_down(struct gve_priv *priv)
159 GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
161 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
164 if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
165 if_link_state_change(priv->ifp, LINK_STATE_DOWN);
166 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
169 if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
171 if (gve_destroy_rx_rings(priv) != 0)
174 if (gve_destroy_tx_rings(priv) != 0)
177 if (gve_unregister_qpls(priv) != 0)
180 gve_mask_all_queue_irqs(priv);
181 gve_clear_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
182 priv->interface_down_cnt++;
186 gve_schedule_reset(priv);
190 gve_set_mtu(if_t ifp, uint32_t new_mtu)
192 struct gve_priv *priv = if_getsoftc(ifp);
195 if ((new_mtu > priv->max_mtu) || (new_mtu < ETHERMIN)) {
196 device_printf(priv->dev, "Invalid new MTU setting. new mtu: %d max mtu: %d min mtu: %d\n",
197 new_mtu, priv->max_mtu, ETHERMIN);
201 err = gve_adminq_set_mtu(priv, new_mtu);
204 device_printf(priv->dev, "MTU set to %d\n", new_mtu);
205 if_setmtu(ifp, new_mtu);
207 device_printf(priv->dev, "Failed to set MTU to %d\n", new_mtu);
216 struct gve_priv *priv = (struct gve_priv *)arg;
218 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP)) {
219 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
221 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
226 gve_ioctl(if_t ifp, u_long command, caddr_t data)
228 struct gve_priv *priv;
232 priv = if_getsoftc(ifp);
233 ifr = (struct ifreq *)data;
237 if (if_getmtu(ifp) == ifr->ifr_mtu)
239 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
241 gve_set_mtu(ifp, ifr->ifr_mtu);
243 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
247 if ((if_getflags(ifp) & IFF_UP) != 0) {
248 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
249 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
251 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
254 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
255 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
257 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
263 if (ifr->ifr_reqcap == if_getcapenable(ifp))
265 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
267 if_setcapenable(ifp, ifr->ifr_reqcap);
269 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
275 rc = ifmedia_ioctl(ifp, ifr, &priv->media, command);
279 rc = ether_ioctl(ifp, command, data);
287 gve_media_change(if_t ifp)
289 struct gve_priv *priv = if_getsoftc(ifp);
291 device_printf(priv->dev, "Media change not supported\n");
296 gve_media_status(if_t ifp, struct ifmediareq *ifmr)
298 struct gve_priv *priv = if_getsoftc(ifp);
300 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
302 ifmr->ifm_status = IFM_AVALID;
303 ifmr->ifm_active = IFM_ETHER;
305 if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
306 ifmr->ifm_status |= IFM_ACTIVE;
307 ifmr->ifm_active |= IFM_AUTO;
309 ifmr->ifm_active |= IFM_NONE;
312 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
316 gve_get_counter(if_t ifp, ift_counter cnt)
318 struct gve_priv *priv;
319 uint64_t rpackets = 0;
320 uint64_t tpackets = 0;
323 uint64_t rx_dropped_pkt = 0;
324 uint64_t tx_dropped_pkt = 0;
326 priv = if_getsoftc(ifp);
328 gve_accum_stats(priv, &rpackets, &rbytes, &rx_dropped_pkt, &tpackets,
329 &tbytes, &tx_dropped_pkt);
332 case IFCOUNTER_IPACKETS:
335 case IFCOUNTER_OPACKETS:
338 case IFCOUNTER_IBYTES:
341 case IFCOUNTER_OBYTES:
344 case IFCOUNTER_IQDROPS:
345 return (rx_dropped_pkt);
347 case IFCOUNTER_OQDROPS:
348 return (tx_dropped_pkt);
351 return (if_get_counter_default(ifp, cnt));
356 gve_setup_ifnet(device_t dev, struct gve_priv *priv)
361 ifp = priv->ifp = if_alloc(IFT_ETHER);
363 device_printf(priv->dev, "Failed to allocate ifnet struct\n");
367 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
368 if_setsoftc(ifp, priv);
370 if_setinitfn(ifp, gve_init);
371 if_setioctlfn(ifp, gve_ioctl);
372 if_settransmitfn(ifp, gve_xmit_ifp);
373 if_setqflushfn(ifp, gve_qflush);
375 #if __FreeBSD_version >= 1400086
376 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
378 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_KNOWSEPOCH);
381 ifmedia_init(&priv->media, IFM_IMASK, gve_media_change, gve_media_status);
382 if_setgetcounterfn(ifp, gve_get_counter);
384 caps = IFCAP_RXCSUM |
390 if ((priv->supported_features & GVE_SUP_JUMBO_FRAMES_MASK) != 0)
391 caps |= IFCAP_JUMBO_MTU;
393 if_setcapabilities(ifp, caps);
394 if_setcapenable(ifp, caps);
397 device_printf(priv->dev, "Setting initial MTU to %d\n", priv->max_mtu);
398 if_setmtu(ifp, priv->max_mtu);
400 ether_ifattach(ifp, priv->mac);
402 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
403 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
409 gve_alloc_counter_array(struct gve_priv *priv)
413 err = gve_dma_alloc_coherent(priv, sizeof(uint32_t) * priv->num_event_counters,
414 PAGE_SIZE, &priv->counter_array_mem);
418 priv->counters = priv->counter_array_mem.cpu_addr;
423 gve_free_counter_array(struct gve_priv *priv)
425 if (priv->counters != NULL)
426 gve_dma_free_coherent(&priv->counter_array_mem);
427 priv->counter_array_mem = (struct gve_dma_handle){};
431 gve_alloc_irq_db_array(struct gve_priv *priv)
435 err = gve_dma_alloc_coherent(priv,
436 sizeof(struct gve_irq_db) * (priv->num_queues), PAGE_SIZE,
441 priv->irq_db_indices = priv->irqs_db_mem.cpu_addr;
446 gve_free_irq_db_array(struct gve_priv *priv)
448 if (priv->irq_db_indices != NULL)
449 gve_dma_free_coherent(&priv->irqs_db_mem);
450 priv->irqs_db_mem = (struct gve_dma_handle){};
454 gve_free_rings(struct gve_priv *priv)
457 gve_free_tx_rings(priv);
458 gve_free_rx_rings(priv);
463 gve_alloc_rings(struct gve_priv *priv)
467 err = gve_alloc_qpls(priv);
471 err = gve_alloc_rx_rings(priv);
475 err = gve_alloc_tx_rings(priv);
479 err = gve_alloc_irqs(priv);
486 gve_free_rings(priv);
491 gve_deconfigure_resources(struct gve_priv *priv)
495 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK)) {
496 err = gve_adminq_deconfigure_device_resources(priv);
498 device_printf(priv->dev, "Failed to deconfigure device resources: err=%d\n",
503 device_printf(priv->dev, "Deconfigured device resources\n");
504 gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
507 gve_free_irq_db_array(priv);
508 gve_free_counter_array(priv);
512 gve_configure_resources(struct gve_priv *priv)
516 if (gve_get_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK))
519 err = gve_alloc_counter_array(priv);
523 err = gve_alloc_irq_db_array(priv);
527 err = gve_adminq_configure_device_resources(priv);
529 device_printf(priv->dev, "Failed to configure device resources: err=%d\n",
535 gve_set_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
537 device_printf(priv->dev, "Configured device resources\n");
541 gve_deconfigure_resources(priv);
546 gve_set_queue_cnts(struct gve_priv *priv)
548 priv->tx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_TX_QUEUES);
549 priv->rx_cfg.max_queues = gve_reg_bar_read_4(priv, MAX_RX_QUEUES);
550 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
551 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
553 if (priv->default_num_queues > 0) {
554 priv->tx_cfg.num_queues = MIN(priv->default_num_queues,
555 priv->tx_cfg.num_queues);
556 priv->rx_cfg.num_queues = MIN(priv->default_num_queues,
557 priv->rx_cfg.num_queues);
560 priv->num_queues = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues;
561 priv->mgmt_msix_idx = priv->num_queues;
565 gve_alloc_adminq_and_describe_device(struct gve_priv *priv)
569 if ((err = gve_adminq_alloc(priv)) != 0)
572 if ((err = gve_verify_driver_compatibility(priv)) != 0) {
573 device_printf(priv->dev,
574 "Failed to verify driver compatibility: err=%d\n", err);
578 if ((err = gve_adminq_describe_device(priv)) != 0)
581 gve_set_queue_cnts(priv);
583 priv->num_registered_pages = 0;
587 gve_release_adminq(priv);
592 gve_schedule_reset(struct gve_priv *priv)
594 if (gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET))
597 device_printf(priv->dev, "Scheduling reset task!\n");
598 gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
599 taskqueue_enqueue(priv->service_tq, &priv->service_task);
603 gve_destroy(struct gve_priv *priv)
606 gve_deconfigure_resources(priv);
607 gve_release_adminq(priv);
611 gve_restore(struct gve_priv *priv)
615 err = gve_adminq_alloc(priv);
619 err = gve_configure_resources(priv);
630 device_printf(priv->dev, "Restore failed!\n");
635 gve_handle_reset(struct gve_priv *priv)
637 if (!gve_get_state_flag(priv, GVE_STATE_FLAG_DO_RESET))
640 gve_clear_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
641 gve_set_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
643 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
645 if_setdrvflagbits(priv->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
646 if_link_state_change(priv->ifp, LINK_STATE_DOWN);
647 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
650 * Releasing the adminq causes the NIC to destroy all resources
651 * registered with it, so by clearing the flags beneath we cause
652 * the subsequent gve_down call below to not attempt to tell the
653 * NIC to destroy these resources again.
655 * The call to gve_down is needed in the first place to refresh
656 * the state and the DMA-able memory within each driver ring.
658 gve_release_adminq(priv);
659 gve_clear_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
660 gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
661 gve_clear_state_flag(priv, GVE_STATE_FLAG_RX_RINGS_OK);
662 gve_clear_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK);
667 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
670 gve_clear_state_flag(priv, GVE_STATE_FLAG_IN_RESET);
674 gve_handle_link_status(struct gve_priv *priv)
676 uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
677 bool link_up = status & GVE_DEVICE_STATUS_LINK_STATUS;
679 if (link_up == gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP))
684 device_printf(priv->dev, "Device link is up.\n");
685 if_link_state_change(priv->ifp, LINK_STATE_UP);
686 gve_set_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
688 device_printf(priv->dev, "Device link is down.\n");
689 if_link_state_change(priv->ifp, LINK_STATE_DOWN);
690 gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
695 gve_service_task(void *arg, int pending)
697 struct gve_priv *priv = (struct gve_priv *)arg;
698 uint32_t status = gve_reg_bar_read_4(priv, DEVICE_STATUS);
700 if (((GVE_DEVICE_STATUS_RESET_MASK & status) != 0) &&
701 !gve_get_state_flag(priv, GVE_STATE_FLAG_IN_RESET)) {
702 device_printf(priv->dev, "Device requested reset\n");
703 gve_set_state_flag(priv, GVE_STATE_FLAG_DO_RESET);
706 gve_handle_reset(priv);
707 gve_handle_link_status(priv);
711 gve_probe(device_t dev)
713 uint16_t deviceid, vendorid;
716 vendorid = pci_get_vendor(dev);
717 deviceid = pci_get_device(dev);
719 for (i = 0; i < nitems(gve_devs); i++) {
720 if (vendorid == gve_devs[i].vendor_id &&
721 deviceid == gve_devs[i].device_id) {
722 device_set_desc(dev, gve_devs[i].name);
723 return (BUS_PROBE_DEFAULT);
730 gve_free_sys_res_mem(struct gve_priv *priv)
732 if (priv->msix_table != NULL)
733 bus_release_resource(priv->dev, SYS_RES_MEMORY,
734 rman_get_rid(priv->msix_table), priv->msix_table);
736 if (priv->db_bar != NULL)
737 bus_release_resource(priv->dev, SYS_RES_MEMORY,
738 rman_get_rid(priv->db_bar), priv->db_bar);
740 if (priv->reg_bar != NULL)
741 bus_release_resource(priv->dev, SYS_RES_MEMORY,
742 rman_get_rid(priv->reg_bar), priv->reg_bar);
746 gve_attach(device_t dev)
748 struct gve_priv *priv;
752 priv = device_get_softc(dev);
754 GVE_IFACE_LOCK_INIT(priv->gve_iface_lock);
756 pci_enable_busmaster(dev);
758 rid = PCIR_BAR(GVE_REGISTER_BAR);
759 priv->reg_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
761 if (priv->reg_bar == NULL) {
762 device_printf(dev, "Failed to allocate BAR0\n");
767 rid = PCIR_BAR(GVE_DOORBELL_BAR);
768 priv->db_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
770 if (priv->db_bar == NULL) {
771 device_printf(dev, "Failed to allocate BAR2\n");
776 rid = pci_msix_table_bar(priv->dev);
777 priv->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
779 if (priv->msix_table == NULL) {
780 device_printf(dev, "Failed to allocate msix table\n");
785 err = gve_alloc_adminq_and_describe_device(priv);
789 err = gve_configure_resources(priv);
793 err = gve_alloc_rings(priv);
797 err = gve_setup_ifnet(dev, priv);
801 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
803 bus_write_multi_1(priv->reg_bar, DRIVER_VERSION, GVE_DRIVER_VERSION,
804 sizeof(GVE_DRIVER_VERSION) - 1);
806 TASK_INIT(&priv->service_task, 0, gve_service_task, priv);
807 priv->service_tq = taskqueue_create("gve service", M_WAITOK | M_ZERO,
808 taskqueue_thread_enqueue, &priv->service_tq);
809 taskqueue_start_threads(&priv->service_tq, 1, PI_NET, "%s service tq",
810 device_get_nameunit(priv->dev));
812 gve_setup_sysctl(priv);
815 device_printf(priv->dev, "Successfully attached %s", GVE_DRIVER_VERSION);
819 gve_free_rings(priv);
820 gve_deconfigure_resources(priv);
821 gve_release_adminq(priv);
822 gve_free_sys_res_mem(priv);
823 GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
828 gve_detach(device_t dev)
830 struct gve_priv *priv = device_get_softc(dev);
831 if_t ifp = priv->ifp;
835 GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
837 GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
839 gve_free_rings(priv);
840 gve_free_sys_res_mem(priv);
841 GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
843 while (taskqueue_cancel(priv->service_tq, &priv->service_task, NULL))
844 taskqueue_drain(priv->service_tq, &priv->service_task);
845 taskqueue_free(priv->service_tq);
848 return (bus_generic_detach(dev));
851 static device_method_t gve_methods[] = {
852 DEVMETHOD(device_probe, gve_probe),
853 DEVMETHOD(device_attach, gve_attach),
854 DEVMETHOD(device_detach, gve_detach),
858 static driver_t gve_driver = {
861 sizeof(struct gve_priv)
864 #if __FreeBSD_version < 1301503
865 static devclass_t gve_devclass;
867 DRIVER_MODULE(gve, pci, gve_driver, gve_devclass, 0, 0);
869 DRIVER_MODULE(gve, pci, gve_driver, 0, 0);
871 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, gve, gve_devs,