2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2015-2021 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/eventhandler.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/malloc.h>
42 #include <sys/module.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
54 #include <machine/atomic.h>
55 #include <machine/bus.h>
56 #include <machine/in_cksum.h>
57 #include <machine/resource.h>
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
63 #include <net/ethernet.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/if_types.h>
69 #include <net/if_var.h>
70 #include <net/if_vlan_var.h>
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/if_ether.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 #include <netinet/tcp.h>
77 #include <netinet/udp.h>
80 #include "ena_datapath.h"
82 #include "ena_sysctl.h"
85 #include "ena_netmap.h"
86 #endif /* DEV_NETMAP */
88 /*********************************************************
90 *********************************************************/
91 static int ena_probe(device_t);
92 static void ena_intr_msix_mgmnt(void *);
93 static void ena_free_pci_resources(struct ena_adapter *);
94 static int ena_change_mtu(if_t, int);
95 static inline void ena_alloc_counters(counter_u64_t *, int);
96 static inline void ena_free_counters(counter_u64_t *, int);
97 static inline void ena_reset_counters(counter_u64_t *, int);
98 static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *,
100 static void ena_init_io_rings_basic(struct ena_adapter *);
101 static void ena_init_io_rings_advanced(struct ena_adapter *);
102 static void ena_init_io_rings(struct ena_adapter *);
103 static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
104 static void ena_free_all_io_rings_resources(struct ena_adapter *);
105 static int ena_setup_tx_dma_tag(struct ena_adapter *);
106 static int ena_free_tx_dma_tag(struct ena_adapter *);
107 static int ena_setup_rx_dma_tag(struct ena_adapter *);
108 static int ena_free_rx_dma_tag(struct ena_adapter *);
109 static void ena_release_all_tx_dmamap(struct ena_ring *);
110 static int ena_setup_tx_resources(struct ena_adapter *, int);
111 static void ena_free_tx_resources(struct ena_adapter *, int);
112 static int ena_setup_all_tx_resources(struct ena_adapter *);
113 static void ena_free_all_tx_resources(struct ena_adapter *);
114 static int ena_setup_rx_resources(struct ena_adapter *, unsigned int);
115 static void ena_free_rx_resources(struct ena_adapter *, unsigned int);
116 static int ena_setup_all_rx_resources(struct ena_adapter *);
117 static void ena_free_all_rx_resources(struct ena_adapter *);
118 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
119 struct ena_rx_buffer *);
120 static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
121 struct ena_rx_buffer *);
122 static void ena_free_rx_bufs(struct ena_adapter *, unsigned int);
123 static void ena_refill_all_rx_bufs(struct ena_adapter *);
124 static void ena_free_all_rx_bufs(struct ena_adapter *);
125 static void ena_free_tx_bufs(struct ena_adapter *, unsigned int);
126 static void ena_free_all_tx_bufs(struct ena_adapter *);
127 static void ena_destroy_all_tx_queues(struct ena_adapter *);
128 static void ena_destroy_all_rx_queues(struct ena_adapter *);
129 static void ena_destroy_all_io_queues(struct ena_adapter *);
130 static int ena_create_io_queues(struct ena_adapter *);
131 static int ena_handle_msix(void *);
132 static int ena_enable_msix(struct ena_adapter *);
133 static void ena_setup_mgmnt_intr(struct ena_adapter *);
134 static int ena_setup_io_intr(struct ena_adapter *);
135 static int ena_request_mgmnt_irq(struct ena_adapter *);
136 static int ena_request_io_irq(struct ena_adapter *);
137 static void ena_free_mgmnt_irq(struct ena_adapter *);
138 static void ena_free_io_irq(struct ena_adapter *);
139 static void ena_free_irqs(struct ena_adapter *);
140 static void ena_disable_msix(struct ena_adapter *);
141 static void ena_unmask_all_io_irqs(struct ena_adapter *);
142 static int ena_up_complete(struct ena_adapter *);
143 static uint64_t ena_get_counter(if_t, ift_counter);
144 static int ena_media_change(if_t);
145 static void ena_media_status(if_t, struct ifmediareq *);
146 static void ena_init(void *);
147 static int ena_ioctl(if_t, u_long, caddr_t);
148 static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
149 static void ena_update_host_info(struct ena_admin_host_info *, if_t);
150 static void ena_update_hwassist(struct ena_adapter *);
151 static int ena_setup_ifnet(device_t, struct ena_adapter *,
152 struct ena_com_dev_get_features_ctx *);
153 static int ena_enable_wc(device_t, struct resource *);
154 static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
155 struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
156 static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *);
157 static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
158 struct ena_com_dev_get_features_ctx *);
159 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
160 static void ena_config_host_info(struct ena_com_dev *, device_t);
161 static int ena_attach(device_t);
162 static int ena_detach(device_t);
163 static int ena_device_init(struct ena_adapter *, device_t,
164 struct ena_com_dev_get_features_ctx *, int *);
165 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
166 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
167 static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *);
168 static int ena_copy_eni_metrics(struct ena_adapter *);
169 static void ena_timer_service(void *);
171 static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME
172 " v" ENA_DRV_MODULE_VERSION;
174 static ena_vendor_info_t ena_vendor_info_array[] = {
175 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0 },
176 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0 },
177 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0 },
178 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0 },
183 struct sx ena_global_lock;
186 * Contains pointers to event handlers, e.g. link state chage.
188 static struct ena_aenq_handlers aenq_handlers;
191 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
195 *(bus_addr_t *)arg = segs[0].ds_addr;
199 ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
200 int mapflags, bus_size_t alignment, int domain)
202 struct ena_adapter *adapter = device_get_softc(dmadev);
203 device_t pdev = adapter->pdev;
205 uint64_t dma_space_addr;
208 maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
210 dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
211 if (unlikely(dma_space_addr == 0))
212 dma_space_addr = BUS_SPACE_MAXADDR;
214 error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
215 alignment, 0, /* alignment, bounds */
216 dma_space_addr, /* lowaddr of exclusion window */
217 BUS_SPACE_MAXADDR, /* highaddr of exclusion window */
218 NULL, NULL, /* filter, filterarg */
219 maxsize, /* maxsize */
221 maxsize, /* maxsegsize */
222 BUS_DMA_ALLOCNOW, /* flags */
226 if (unlikely(error != 0)) {
227 ena_log(pdev, ERR, "bus_dma_tag_create failed: %d\n", error);
231 error = bus_dma_tag_set_domain(dma->tag, domain);
232 if (unlikely(error != 0)) {
233 ena_log(pdev, ERR, "bus_dma_tag_set_domain failed: %d\n",
235 goto fail_map_create;
238 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
239 BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
240 if (unlikely(error != 0)) {
241 ena_log(pdev, ERR, "bus_dmamem_alloc(%ju) failed: %d\n",
242 (uintmax_t)size, error);
243 goto fail_map_create;
247 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
248 ena_dmamap_callback, &dma->paddr, mapflags);
249 if (unlikely((error != 0) || (dma->paddr == 0))) {
250 ena_log(pdev, ERR, "bus_dmamap_load failed: %d\n", error);
254 bus_dmamap_sync(dma->tag, dma->map,
255 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
260 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
262 bus_dma_tag_destroy(dma->tag);
272 ena_free_pci_resources(struct ena_adapter *adapter)
274 device_t pdev = adapter->pdev;
276 if (adapter->memory != NULL) {
277 bus_release_resource(pdev, SYS_RES_MEMORY,
278 PCIR_BAR(ENA_MEM_BAR), adapter->memory);
281 if (adapter->registers != NULL) {
282 bus_release_resource(pdev, SYS_RES_MEMORY,
283 PCIR_BAR(ENA_REG_BAR), adapter->registers);
286 if (adapter->msix != NULL) {
287 bus_release_resource(pdev, SYS_RES_MEMORY, adapter->msix_rid,
293 ena_probe(device_t dev)
295 ena_vendor_info_t *ent;
296 uint16_t pci_vendor_id = 0;
297 uint16_t pci_device_id = 0;
299 pci_vendor_id = pci_get_vendor(dev);
300 pci_device_id = pci_get_device(dev);
302 ent = ena_vendor_info_array;
303 while (ent->vendor_id != 0) {
304 if ((pci_vendor_id == ent->vendor_id) &&
305 (pci_device_id == ent->device_id)) {
306 ena_log_raw(DBG, "vendor=%x device=%x\n", pci_vendor_id,
309 device_set_desc(dev, ENA_DEVICE_DESC);
310 return (BUS_PROBE_DEFAULT);
320 ena_change_mtu(if_t ifp, int new_mtu)
322 struct ena_adapter *adapter = if_getsoftc(ifp);
323 device_t pdev = adapter->pdev;
326 if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
327 ena_log(pdev, ERR, "Invalid MTU setting. new_mtu: %d max mtu: %d min mtu: %d\n",
328 new_mtu, adapter->max_mtu, ENA_MIN_MTU);
332 rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
333 if (likely(rc == 0)) {
334 ena_log(pdev, DBG, "set MTU to %d\n", new_mtu);
335 if_setmtu(ifp, new_mtu);
337 ena_log(pdev, ERR, "Failed to set MTU to %d\n", new_mtu);
344 ena_alloc_counters(counter_u64_t *begin, int size)
346 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
348 for (; begin < end; ++begin)
349 *begin = counter_u64_alloc(M_WAITOK);
353 ena_free_counters(counter_u64_t *begin, int size)
355 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
357 for (; begin < end; ++begin)
358 counter_u64_free(*begin);
362 ena_reset_counters(counter_u64_t *begin, int size)
364 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
366 for (; begin < end; ++begin)
367 counter_u64_zero(*begin);
371 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
375 ring->adapter = adapter;
376 ring->ena_dev = adapter->ena_dev;
377 atomic_store_8(&ring->first_interrupt, 0);
378 ring->no_interrupt_event_cnt = 0;
382 ena_init_io_rings_basic(struct ena_adapter *adapter)
384 struct ena_com_dev *ena_dev;
385 struct ena_ring *txr, *rxr;
389 ena_dev = adapter->ena_dev;
391 for (i = 0; i < adapter->num_io_queues; i++) {
392 txr = &adapter->tx_ring[i];
393 rxr = &adapter->rx_ring[i];
395 /* TX/RX common ring state */
396 ena_init_io_rings_common(adapter, txr, i);
397 ena_init_io_rings_common(adapter, rxr, i);
399 /* TX specific ring state */
400 txr->tx_max_header_size = ena_dev->tx_max_header_size;
401 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
403 que = &adapter->que[i];
404 que->adapter = adapter;
412 rxr->empty_rx_queue = 0;
413 rxr->rx_mbuf_sz = ena_mbuf_sz;
418 ena_init_io_rings_advanced(struct ena_adapter *adapter)
420 struct ena_ring *txr, *rxr;
423 for (i = 0; i < adapter->num_io_queues; i++) {
424 txr = &adapter->tx_ring[i];
425 rxr = &adapter->rx_ring[i];
427 /* Allocate a buf ring */
428 txr->buf_ring_size = adapter->buf_ring_size;
429 txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK,
432 /* Allocate Tx statistics. */
433 ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
434 sizeof(txr->tx_stats));
435 txr->tx_last_cleanup_ticks = ticks;
437 /* Allocate Rx statistics. */
438 ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
439 sizeof(rxr->rx_stats));
441 /* Initialize locks */
442 snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
443 device_get_nameunit(adapter->pdev), i);
444 snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
445 device_get_nameunit(adapter->pdev), i);
447 mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
452 ena_init_io_rings(struct ena_adapter *adapter)
455 * IO rings initialization can be divided into the 2 steps:
456 * 1. Initialize variables and fields with initial values and copy
457 * them from adapter/ena_dev (basic)
458 * 2. Allocate mutex, counters and buf_ring (advanced)
460 ena_init_io_rings_basic(adapter);
461 ena_init_io_rings_advanced(adapter);
465 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
467 struct ena_ring *txr = &adapter->tx_ring[qid];
468 struct ena_ring *rxr = &adapter->rx_ring[qid];
470 ena_free_counters((counter_u64_t *)&txr->tx_stats,
471 sizeof(txr->tx_stats));
472 ena_free_counters((counter_u64_t *)&rxr->rx_stats,
473 sizeof(rxr->rx_stats));
475 ENA_RING_MTX_LOCK(txr);
476 drbr_free(txr->br, M_DEVBUF);
477 ENA_RING_MTX_UNLOCK(txr);
479 mtx_destroy(&txr->ring_mtx);
483 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
487 for (i = 0; i < adapter->num_io_queues; i++)
488 ena_free_io_ring_resources(adapter, i);
492 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
496 /* Create DMA tag for Tx buffers */
497 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
498 1, 0, /* alignment, bounds */
499 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
500 BUS_SPACE_MAXADDR, /* highaddr of excl window */
501 NULL, NULL, /* filter, filterarg */
502 ENA_TSO_MAXSIZE, /* maxsize */
503 adapter->max_tx_sgl_size - 1, /* nsegments */
504 ENA_TSO_MAXSIZE, /* maxsegsize */
507 NULL, /* lockfuncarg */
508 &adapter->tx_buf_tag);
514 ena_free_tx_dma_tag(struct ena_adapter *adapter)
518 ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
520 if (likely(ret == 0))
521 adapter->tx_buf_tag = NULL;
527 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
531 /* Create DMA tag for Rx buffers*/
532 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */
533 1, 0, /* alignment, bounds */
534 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
535 BUS_SPACE_MAXADDR, /* highaddr of excl window */
536 NULL, NULL, /* filter, filterarg */
537 ena_mbuf_sz, /* maxsize */
538 adapter->max_rx_sgl_size, /* nsegments */
539 ena_mbuf_sz, /* maxsegsize */
543 &adapter->rx_buf_tag);
549 ena_free_rx_dma_tag(struct ena_adapter *adapter)
553 ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
555 if (likely(ret == 0))
556 adapter->rx_buf_tag = NULL;
562 ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
564 struct ena_adapter *adapter = tx_ring->adapter;
565 struct ena_tx_buffer *tx_info;
566 bus_dma_tag_t tx_tag = adapter->tx_buf_tag;
569 struct ena_netmap_tx_info *nm_info;
571 #endif /* DEV_NETMAP */
573 for (i = 0; i < tx_ring->ring_size; ++i) {
574 tx_info = &tx_ring->tx_buffer_info[i];
576 if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
577 nm_info = &tx_info->nm_info;
578 for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
579 if (nm_info->map_seg[j] != NULL) {
580 bus_dmamap_destroy(tx_tag,
581 nm_info->map_seg[j]);
582 nm_info->map_seg[j] = NULL;
586 #endif /* DEV_NETMAP */
587 if (tx_info->dmamap != NULL) {
588 bus_dmamap_destroy(tx_tag, tx_info->dmamap);
589 tx_info->dmamap = NULL;
595 * ena_setup_tx_resources - allocate Tx resources (Descriptors)
596 * @adapter: network interface device structure
599 * Returns 0 on success, otherwise on failure.
602 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
604 device_t pdev = adapter->pdev;
605 char thread_name[MAXCOMLEN + 1];
606 struct ena_que *que = &adapter->que[qid];
607 struct ena_ring *tx_ring = que->tx_ring;
608 cpuset_t *cpu_mask = NULL;
614 ena_netmap_reset_tx_ring(adapter, qid);
615 #endif /* DEV_NETMAP */
617 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
619 tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
620 if (unlikely(tx_ring->tx_buffer_info == NULL))
623 size = sizeof(uint16_t) * tx_ring->ring_size;
624 tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
625 if (unlikely(tx_ring->free_tx_ids == NULL))
626 goto err_buf_info_free;
628 size = tx_ring->tx_max_header_size;
629 tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
631 if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
632 goto err_tx_ids_free;
634 /* Req id stack for TX OOO completions */
635 for (i = 0; i < tx_ring->ring_size; i++)
636 tx_ring->free_tx_ids[i] = i;
638 /* Reset TX statistics. */
639 ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
640 sizeof(tx_ring->tx_stats));
642 tx_ring->next_to_use = 0;
643 tx_ring->next_to_clean = 0;
644 tx_ring->acum_pkts = 0;
646 /* Make sure that drbr is empty */
647 ENA_RING_MTX_LOCK(tx_ring);
648 drbr_flush(adapter->ifp, tx_ring->br);
649 ENA_RING_MTX_UNLOCK(tx_ring);
651 /* ... and create the buffer DMA maps */
652 for (i = 0; i < tx_ring->ring_size; i++) {
653 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
654 &tx_ring->tx_buffer_info[i].dmamap);
655 if (unlikely(err != 0)) {
657 "Unable to create Tx DMA map for buffer %d\n", i);
658 goto err_map_release;
662 if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
663 map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
664 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
665 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
667 if (unlikely(err != 0)) {
669 "Unable to create Tx DMA for buffer %d %d\n",
671 goto err_map_release;
675 #endif /* DEV_NETMAP */
678 /* Allocate taskqueues */
679 TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
680 tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
681 taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
682 if (unlikely(tx_ring->enqueue_tq == NULL)) {
684 "Unable to create taskqueue for enqueue task\n");
685 i = tx_ring->ring_size;
686 goto err_map_release;
689 tx_ring->running = true;
692 cpu_mask = &que->cpu_mask;
693 snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
694 device_get_nameunit(adapter->pdev), que->cpu);
696 snprintf(thread_name, sizeof(thread_name), "%s txeq %d",
697 device_get_nameunit(adapter->pdev), que->id);
699 taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
700 cpu_mask, "%s", thread_name);
705 ena_release_all_tx_dmamap(tx_ring);
707 free(tx_ring->free_tx_ids, M_DEVBUF);
708 tx_ring->free_tx_ids = NULL;
710 free(tx_ring->tx_buffer_info, M_DEVBUF);
711 tx_ring->tx_buffer_info = NULL;
717 * ena_free_tx_resources - Free Tx Resources per Queue
718 * @adapter: network interface device structure
721 * Free all transmit software resources
724 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
726 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
728 struct ena_netmap_tx_info *nm_info;
730 #endif /* DEV_NETMAP */
732 while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL))
733 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
735 taskqueue_free(tx_ring->enqueue_tq);
737 ENA_RING_MTX_LOCK(tx_ring);
738 /* Flush buffer ring, */
739 drbr_flush(adapter->ifp, tx_ring->br);
741 /* Free buffer DMA maps, */
742 for (int i = 0; i < tx_ring->ring_size; i++) {
743 bus_dmamap_sync(adapter->tx_buf_tag,
744 tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
745 bus_dmamap_unload(adapter->tx_buf_tag,
746 tx_ring->tx_buffer_info[i].dmamap);
747 bus_dmamap_destroy(adapter->tx_buf_tag,
748 tx_ring->tx_buffer_info[i].dmamap);
751 if (if_getcapenable(adapter->ifp) & IFCAP_NETMAP) {
752 nm_info = &tx_ring->tx_buffer_info[i].nm_info;
753 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
754 if (nm_info->socket_buf_idx[j] != 0) {
755 bus_dmamap_sync(adapter->tx_buf_tag,
757 BUS_DMASYNC_POSTWRITE);
758 ena_netmap_unload(adapter,
759 nm_info->map_seg[j]);
761 bus_dmamap_destroy(adapter->tx_buf_tag,
762 nm_info->map_seg[j]);
763 nm_info->socket_buf_idx[j] = 0;
766 #endif /* DEV_NETMAP */
768 m_freem(tx_ring->tx_buffer_info[i].mbuf);
769 tx_ring->tx_buffer_info[i].mbuf = NULL;
771 ENA_RING_MTX_UNLOCK(tx_ring);
773 /* And free allocated memory. */
774 free(tx_ring->tx_buffer_info, M_DEVBUF);
775 tx_ring->tx_buffer_info = NULL;
777 free(tx_ring->free_tx_ids, M_DEVBUF);
778 tx_ring->free_tx_ids = NULL;
780 free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
781 tx_ring->push_buf_intermediate_buf = NULL;
785 * ena_setup_all_tx_resources - allocate all queues Tx resources
786 * @adapter: network interface device structure
788 * Returns 0 on success, otherwise on failure.
791 ena_setup_all_tx_resources(struct ena_adapter *adapter)
795 for (i = 0; i < adapter->num_io_queues; i++) {
796 rc = ena_setup_tx_resources(adapter, i);
798 ena_log(adapter->pdev, ERR,
799 "Allocation for Tx Queue %u failed\n", i);
807 /* Rewind the index freeing the rings as we go */
809 ena_free_tx_resources(adapter, i);
814 * ena_free_all_tx_resources - Free Tx Resources for All Queues
815 * @adapter: network interface device structure
817 * Free all transmit software resources
820 ena_free_all_tx_resources(struct ena_adapter *adapter)
824 for (i = 0; i < adapter->num_io_queues; i++)
825 ena_free_tx_resources(adapter, i);
829 * ena_setup_rx_resources - allocate Rx resources (Descriptors)
830 * @adapter: network interface device structure
833 * Returns 0 on success, otherwise on failure.
836 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
838 device_t pdev = adapter->pdev;
839 struct ena_que *que = &adapter->que[qid];
840 struct ena_ring *rx_ring = que->rx_ring;
843 size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
846 ena_netmap_reset_rx_ring(adapter, qid);
847 rx_ring->initialized = false;
848 #endif /* DEV_NETMAP */
851 * Alloc extra element so in rx path
852 * we can always prefetch rx_info + 1
854 size += sizeof(struct ena_rx_buffer);
856 rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
858 size = sizeof(uint16_t) * rx_ring->ring_size;
859 rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
861 for (i = 0; i < rx_ring->ring_size; i++)
862 rx_ring->free_rx_ids[i] = i;
864 /* Reset RX statistics. */
865 ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
866 sizeof(rx_ring->rx_stats));
868 rx_ring->next_to_clean = 0;
869 rx_ring->next_to_use = 0;
871 /* ... and create the buffer DMA maps */
872 for (i = 0; i < rx_ring->ring_size; i++) {
873 err = bus_dmamap_create(adapter->rx_buf_tag, 0,
874 &(rx_ring->rx_buffer_info[i].map));
877 "Unable to create Rx DMA map for buffer %d\n", i);
878 goto err_buf_info_unmap;
882 /* Create LRO for the ring */
883 if ((if_getcapenable(adapter->ifp) & IFCAP_LRO) != 0) {
884 int err = tcp_lro_init(&rx_ring->lro);
886 ena_log(pdev, ERR, "LRO[%d] Initialization failed!\n",
889 ena_log(pdev, DBG, "RX Soft LRO[%d] Initialized\n",
891 rx_ring->lro.ifp = adapter->ifp;
899 bus_dmamap_destroy(adapter->rx_buf_tag,
900 rx_ring->rx_buffer_info[i].map);
903 free(rx_ring->free_rx_ids, M_DEVBUF);
904 rx_ring->free_rx_ids = NULL;
905 free(rx_ring->rx_buffer_info, M_DEVBUF);
906 rx_ring->rx_buffer_info = NULL;
911 * ena_free_rx_resources - Free Rx Resources
912 * @adapter: network interface device structure
915 * Free all receive software resources
918 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
920 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
922 /* Free buffer DMA maps, */
923 for (int i = 0; i < rx_ring->ring_size; i++) {
924 bus_dmamap_sync(adapter->rx_buf_tag,
925 rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
926 m_freem(rx_ring->rx_buffer_info[i].mbuf);
927 rx_ring->rx_buffer_info[i].mbuf = NULL;
928 bus_dmamap_unload(adapter->rx_buf_tag,
929 rx_ring->rx_buffer_info[i].map);
930 bus_dmamap_destroy(adapter->rx_buf_tag,
931 rx_ring->rx_buffer_info[i].map);
934 /* free LRO resources, */
935 tcp_lro_free(&rx_ring->lro);
937 /* free allocated memory */
938 free(rx_ring->rx_buffer_info, M_DEVBUF);
939 rx_ring->rx_buffer_info = NULL;
941 free(rx_ring->free_rx_ids, M_DEVBUF);
942 rx_ring->free_rx_ids = NULL;
946 * ena_setup_all_rx_resources - allocate all queues Rx resources
947 * @adapter: network interface device structure
949 * Returns 0 on success, otherwise on failure.
952 ena_setup_all_rx_resources(struct ena_adapter *adapter)
956 for (i = 0; i < adapter->num_io_queues; i++) {
957 rc = ena_setup_rx_resources(adapter, i);
959 ena_log(adapter->pdev, ERR,
960 "Allocation for Rx Queue %u failed\n", i);
967 /* rewind the index freeing the rings as we go */
969 ena_free_rx_resources(adapter, i);
974 * ena_free_all_rx_resources - Free Rx resources for all queues
975 * @adapter: network interface device structure
977 * Free all receive software resources
980 ena_free_all_rx_resources(struct ena_adapter *adapter)
984 for (i = 0; i < adapter->num_io_queues; i++)
985 ena_free_rx_resources(adapter, i);
989 ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
990 struct ena_rx_buffer *rx_info)
992 device_t pdev = adapter->pdev;
993 struct ena_com_buf *ena_buf;
994 bus_dma_segment_t segs[1];
998 /* if previous allocated frag is not used */
999 if (unlikely(rx_info->mbuf != NULL))
1002 /* Get mbuf using UMA allocator */
1003 rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1004 rx_ring->rx_mbuf_sz);
1006 if (unlikely(rx_info->mbuf == NULL)) {
1007 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1008 rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1009 if (unlikely(rx_info->mbuf == NULL)) {
1010 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1015 mlen = rx_ring->rx_mbuf_sz;
1017 /* Set mbuf length*/
1018 rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1020 /* Map packets for DMA */
1022 "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
1023 adapter->rx_buf_tag, rx_info->mbuf, rx_info->mbuf->m_len);
1024 error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
1025 rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1026 if (unlikely((error != 0) || (nsegs != 1))) {
1028 "failed to map mbuf, error: %d, nsegs: %d\n", error, nsegs);
1029 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1033 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
1035 ena_buf = &rx_info->ena_buf;
1036 ena_buf->paddr = segs[0].ds_addr;
1037 ena_buf->len = mlen;
1040 "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
1041 rx_info->mbuf, rx_info, ena_buf->len, (uintmax_t)ena_buf->paddr);
1046 m_freem(rx_info->mbuf);
1047 rx_info->mbuf = NULL;
1052 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1053 struct ena_rx_buffer *rx_info)
1055 if (rx_info->mbuf == NULL) {
1056 ena_log(adapter->pdev, WARN,
1057 "Trying to free unallocated buffer\n");
1061 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1062 BUS_DMASYNC_POSTREAD);
1063 bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1064 m_freem(rx_info->mbuf);
1065 rx_info->mbuf = NULL;
1069 * ena_refill_rx_bufs - Refills ring with descriptors
1070 * @rx_ring: the ring which we want to feed with free descriptors
1071 * @num: number of descriptors to refill
1072 * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1075 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1077 struct ena_adapter *adapter = rx_ring->adapter;
1078 device_t pdev = adapter->pdev;
1079 uint16_t next_to_use, req_id;
1083 ena_log_io(adapter->pdev, DBG, "refill qid: %d\n", rx_ring->qid);
1085 next_to_use = rx_ring->next_to_use;
1087 for (i = 0; i < num; i++) {
1088 struct ena_rx_buffer *rx_info;
1090 ena_log_io(pdev, DBG, "RX buffer - next to use: %d\n",
1093 req_id = rx_ring->free_rx_ids[next_to_use];
1094 rx_info = &rx_ring->rx_buffer_info[req_id];
1096 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1097 rc = ena_netmap_alloc_rx_slot(adapter, rx_ring,
1100 #endif /* DEV_NETMAP */
1101 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1102 if (unlikely(rc != 0)) {
1103 ena_log_io(pdev, WARN,
1104 "failed to alloc buffer for rx queue %d\n",
1108 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1109 &rx_info->ena_buf, req_id);
1110 if (unlikely(rc != 0)) {
1111 ena_log_io(pdev, WARN,
1112 "failed to add buffer for rx queue %d\n",
1116 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1117 rx_ring->ring_size);
1120 if (unlikely(i < num)) {
1121 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1122 ena_log_io(pdev, WARN,
1123 "refilled rx qid %d with only %d mbufs (from %d)\n",
1124 rx_ring->qid, i, num);
1128 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1130 rx_ring->next_to_use = next_to_use;
1135 ena_update_buf_ring_size(struct ena_adapter *adapter,
1136 uint32_t new_buf_ring_size)
1138 uint32_t old_buf_ring_size;
1142 old_buf_ring_size = adapter->buf_ring_size;
1143 adapter->buf_ring_size = new_buf_ring_size;
1145 dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1148 /* Reconfigure buf ring for all Tx rings. */
1149 ena_free_all_io_rings_resources(adapter);
1150 ena_init_io_rings_advanced(adapter);
1153 * If ena_up() fails, it's not because of recent buf_ring size
1154 * changes. Because of that, we just want to revert old drbr
1155 * value and trigger the reset because something else had to
1158 rc = ena_up(adapter);
1159 if (unlikely(rc != 0)) {
1160 ena_log(adapter->pdev, ERR,
1161 "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
1162 new_buf_ring_size, old_buf_ring_size);
1164 /* Revert old size and trigger the reset */
1165 adapter->buf_ring_size = old_buf_ring_size;
1166 ena_free_all_io_rings_resources(adapter);
1167 ena_init_io_rings_advanced(adapter);
1169 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
1171 ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
1179 ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
1180 uint32_t new_rx_size)
1182 uint32_t old_tx_size, old_rx_size;
1186 old_tx_size = adapter->requested_tx_ring_size;
1187 old_rx_size = adapter->requested_rx_ring_size;
1188 adapter->requested_tx_ring_size = new_tx_size;
1189 adapter->requested_rx_ring_size = new_rx_size;
1191 dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1194 /* Configure queues with new size. */
1195 ena_init_io_rings_basic(adapter);
1197 rc = ena_up(adapter);
1198 if (unlikely(rc != 0)) {
1199 ena_log(adapter->pdev, ERR,
1200 "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
1201 new_tx_size, new_rx_size, old_tx_size, old_rx_size);
1203 /* Revert old size. */
1204 adapter->requested_tx_ring_size = old_tx_size;
1205 adapter->requested_rx_ring_size = old_rx_size;
1206 ena_init_io_rings_basic(adapter);
1208 /* And try again. */
1209 rc = ena_up(adapter);
1210 if (unlikely(rc != 0)) {
1211 ena_log(adapter->pdev, ERR,
1212 "Failed to revert old queue sizes. Triggering device reset.\n");
1214 * If we've failed again, something had to go
1215 * wrong. After reset, the device should try to
1218 ENA_FLAG_SET_ATOMIC(
1219 ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1220 ena_trigger_reset(adapter,
1221 ENA_REGS_RESET_OS_TRIGGER);
1230 ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
1232 ena_free_all_io_rings_resources(adapter);
1233 /* Force indirection table to be reinitialized */
1234 ena_com_rss_destroy(adapter->ena_dev);
1236 adapter->num_io_queues = num;
1237 ena_init_io_rings(adapter);
1240 /* Caller should sanitize new_num */
1242 ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
1248 dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1249 old_num = adapter->num_io_queues;
1252 ena_update_io_rings(adapter, new_num);
1255 rc = ena_up(adapter);
1256 if (unlikely(rc != 0)) {
1257 ena_log(adapter->pdev, ERR,
1258 "Failed to configure device with %u IO queues. "
1259 "Reverting to previous value: %u\n",
1262 ena_update_io_rings(adapter, old_num);
1264 rc = ena_up(adapter);
1265 if (unlikely(rc != 0)) {
1266 ena_log(adapter->pdev, ERR,
1267 "Failed to revert to previous setup IO "
1268 "queues. Triggering device reset.\n");
1269 ENA_FLAG_SET_ATOMIC(
1270 ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1271 ena_trigger_reset(adapter,
1272 ENA_REGS_RESET_OS_TRIGGER);
1281 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1283 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1286 for (i = 0; i < rx_ring->ring_size; i++) {
1287 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1289 if (rx_info->mbuf != NULL)
1290 ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1292 if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
1293 (if_getcapenable(adapter->ifp) & IFCAP_NETMAP)) {
1294 if (rx_info->netmap_buf_idx != 0)
1295 ena_netmap_free_rx_slot(adapter, rx_ring,
1298 #endif /* DEV_NETMAP */
1303 * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1304 * @adapter: network interface device structure
1308 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1310 struct ena_ring *rx_ring;
1311 int i, rc, bufs_num;
1313 for (i = 0; i < adapter->num_io_queues; i++) {
1314 rx_ring = &adapter->rx_ring[i];
1315 bufs_num = rx_ring->ring_size - 1;
1316 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1317 if (unlikely(rc != bufs_num))
1318 ena_log_io(adapter->pdev, WARN,
1319 "refilling Queue %d failed. "
1320 "Allocated %d buffers from: %d\n",
1323 rx_ring->initialized = true;
1324 #endif /* DEV_NETMAP */
1329 ena_free_all_rx_bufs(struct ena_adapter *adapter)
1333 for (i = 0; i < adapter->num_io_queues; i++)
1334 ena_free_rx_bufs(adapter, i);
1338 * ena_free_tx_bufs - Free Tx Buffers per Queue
1339 * @adapter: network interface device structure
1343 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1345 bool print_once = true;
1346 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1348 ENA_RING_MTX_LOCK(tx_ring);
1349 for (int i = 0; i < tx_ring->ring_size; i++) {
1350 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1352 if (tx_info->mbuf == NULL)
1356 ena_log(adapter->pdev, WARN,
1357 "free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
1361 ena_log(adapter->pdev, DBG,
1362 "free uncompleted tx mbuf qid %d idx 0x%x\n", qid,
1366 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1367 BUS_DMASYNC_POSTWRITE);
1368 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1370 m_free(tx_info->mbuf);
1371 tx_info->mbuf = NULL;
1373 ENA_RING_MTX_UNLOCK(tx_ring);
1377 ena_free_all_tx_bufs(struct ena_adapter *adapter)
1379 for (int i = 0; i < adapter->num_io_queues; i++)
1380 ena_free_tx_bufs(adapter, i);
1384 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1389 for (i = 0; i < adapter->num_io_queues; i++) {
1390 ena_qid = ENA_IO_TXQ_IDX(i);
1391 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1396 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1401 for (i = 0; i < adapter->num_io_queues; i++) {
1402 ena_qid = ENA_IO_RXQ_IDX(i);
1403 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1408 ena_destroy_all_io_queues(struct ena_adapter *adapter)
1410 struct ena_que *queue;
1413 for (i = 0; i < adapter->num_io_queues; i++) {
1414 queue = &adapter->que[i];
1415 while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL))
1416 taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task);
1417 taskqueue_free(queue->cleanup_tq);
1420 ena_destroy_all_tx_queues(adapter);
1421 ena_destroy_all_rx_queues(adapter);
1425 ena_create_io_queues(struct ena_adapter *adapter)
1427 struct ena_com_dev *ena_dev = adapter->ena_dev;
1428 struct ena_com_create_io_ctx ctx;
1429 struct ena_ring *ring;
1430 struct ena_que *queue;
1432 uint32_t msix_vector;
1433 cpuset_t *cpu_mask = NULL;
1436 /* Create TX queues */
1437 for (i = 0; i < adapter->num_io_queues; i++) {
1438 msix_vector = ENA_IO_IRQ_IDX(i);
1439 ena_qid = ENA_IO_TXQ_IDX(i);
1440 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1441 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1442 ctx.queue_size = adapter->requested_tx_ring_size;
1443 ctx.msix_vector = msix_vector;
1445 ctx.numa_node = adapter->que[i].domain;
1447 rc = ena_com_create_io_queue(ena_dev, &ctx);
1449 ena_log(adapter->pdev, ERR,
1450 "Failed to create io TX queue #%d rc: %d\n", i, rc);
1453 ring = &adapter->tx_ring[i];
1454 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1455 &ring->ena_com_io_sq, &ring->ena_com_io_cq);
1457 ena_log(adapter->pdev, ERR,
1458 "Failed to get TX queue handlers. TX queue num"
1461 ena_com_destroy_io_queue(ena_dev, ena_qid);
1465 if (ctx.numa_node >= 0) {
1466 ena_com_update_numa_node(ring->ena_com_io_cq,
1471 /* Create RX queues */
1472 for (i = 0; i < adapter->num_io_queues; i++) {
1473 msix_vector = ENA_IO_IRQ_IDX(i);
1474 ena_qid = ENA_IO_RXQ_IDX(i);
1475 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1476 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1477 ctx.queue_size = adapter->requested_rx_ring_size;
1478 ctx.msix_vector = msix_vector;
1480 ctx.numa_node = adapter->que[i].domain;
1482 rc = ena_com_create_io_queue(ena_dev, &ctx);
1483 if (unlikely(rc != 0)) {
1484 ena_log(adapter->pdev, ERR,
1485 "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1489 ring = &adapter->rx_ring[i];
1490 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1491 &ring->ena_com_io_sq, &ring->ena_com_io_cq);
1492 if (unlikely(rc != 0)) {
1493 ena_log(adapter->pdev, ERR,
1494 "Failed to get RX queue handlers. RX queue num"
1497 ena_com_destroy_io_queue(ena_dev, ena_qid);
1501 if (ctx.numa_node >= 0) {
1502 ena_com_update_numa_node(ring->ena_com_io_cq,
1507 for (i = 0; i < adapter->num_io_queues; i++) {
1508 queue = &adapter->que[i];
1510 NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
1511 queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
1512 M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
1515 cpu_mask = &queue->cpu_mask;
1517 taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET,
1518 cpu_mask, "%s queue %d cleanup",
1519 device_get_nameunit(adapter->pdev), i);
1526 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1527 i = adapter->num_io_queues;
1530 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1535 /*********************************************************************
1537 * MSIX & Interrupt Service routine
1539 **********************************************************************/
1542 * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1543 * @arg: interrupt number
1546 ena_intr_msix_mgmnt(void *arg)
1548 struct ena_adapter *adapter = (struct ena_adapter *)arg;
1550 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1551 if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
1552 ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1556 * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1560 ena_handle_msix(void *arg)
1562 struct ena_que *queue = arg;
1563 struct ena_adapter *adapter = queue->adapter;
1564 if_t ifp = adapter->ifp;
1566 if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1567 return (FILTER_STRAY);
1569 taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
1571 return (FILTER_HANDLED);
1575 ena_enable_msix(struct ena_adapter *adapter)
1577 device_t dev = adapter->pdev;
1578 int msix_vecs, msix_req;
1581 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1582 ena_log(dev, ERR, "Error, MSI-X is already enabled\n");
1586 /* Reserved the max msix vectors we might need */
1587 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1589 adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1590 M_DEVBUF, M_WAITOK | M_ZERO);
1592 ena_log(dev, DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
1594 for (i = 0; i < msix_vecs; i++) {
1595 adapter->msix_entries[i].entry = i;
1596 /* Vectors must start from 1 */
1597 adapter->msix_entries[i].vector = i + 1;
1600 msix_req = msix_vecs;
1601 rc = pci_alloc_msix(dev, &msix_vecs);
1602 if (unlikely(rc != 0)) {
1603 ena_log(dev, ERR, "Failed to enable MSIX, vectors %d rc %d\n",
1610 if (msix_vecs != msix_req) {
1611 if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
1613 "Not enough number of MSI-x allocated: %d\n",
1615 pci_release_msi(dev);
1620 "Enable only %d MSI-x (out of %d), reduce "
1621 "the number of queues\n",
1622 msix_vecs, msix_req);
1625 adapter->msix_vecs = msix_vecs;
1626 ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1631 free(adapter->msix_entries, M_DEVBUF);
1632 adapter->msix_entries = NULL;
1638 ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1640 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE,
1641 "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev));
1643 * Handler is NULL on purpose, it will be set
1644 * when mgmnt interrupt is acquired
1646 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1647 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1648 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1649 adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1653 ena_setup_io_intr(struct ena_adapter *adapter)
1656 int num_buckets = rss_getnumbuckets();
1657 static int last_bind = 0;
1663 if (adapter->msix_entries == NULL)
1667 if (adapter->first_bind < 0) {
1668 adapter->first_bind = last_bind;
1669 last_bind = (last_bind + adapter->num_io_queues) % num_buckets;
1671 cur_bind = adapter->first_bind;
1674 for (int i = 0; i < adapter->num_io_queues; i++) {
1675 irq_idx = ENA_IO_IRQ_IDX(i);
1677 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1678 "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1679 adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1680 adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1681 adapter->irq_tbl[irq_idx].vector =
1682 adapter->msix_entries[irq_idx].vector;
1683 ena_log(adapter->pdev, DBG, "ena_setup_io_intr vector: %d\n",
1684 adapter->msix_entries[irq_idx].vector);
1687 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1688 rss_getcpu(cur_bind);
1689 cur_bind = (cur_bind + 1) % num_buckets;
1690 CPU_SETOF(adapter->que[i].cpu, &adapter->que[i].cpu_mask);
1692 for (idx = 0; idx < MAXMEMDOM; ++idx) {
1693 if (CPU_ISSET(adapter->que[i].cpu, &cpuset_domain[idx]))
1696 adapter->que[i].domain = idx;
1698 adapter->que[i].domain = -1;
1706 ena_request_mgmnt_irq(struct ena_adapter *adapter)
1708 device_t pdev = adapter->pdev;
1709 struct ena_irq *irq;
1710 unsigned long flags;
1713 flags = RF_ACTIVE | RF_SHAREABLE;
1715 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1716 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1717 &irq->vector, flags);
1719 if (unlikely(irq->res == NULL)) {
1720 ena_log(pdev, ERR, "could not allocate irq vector: %d\n",
1725 rc = bus_setup_intr(adapter->pdev, irq->res,
1726 INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data,
1728 if (unlikely(rc != 0)) {
1730 "failed to register interrupt handler for irq %ju: %d\n",
1731 rman_get_start(irq->res), rc);
1734 irq->requested = true;
1739 ena_log(pdev, INFO, "releasing resource for irq %d\n", irq->vector);
1740 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector,
1742 if (unlikely(rcc != 0))
1744 "dev has no parent while releasing res for irq: %d\n",
1752 ena_request_io_irq(struct ena_adapter *adapter)
1754 device_t pdev = adapter->pdev;
1755 struct ena_irq *irq;
1756 unsigned long flags = 0;
1759 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
1761 "failed to request I/O IRQ: MSI-X is not enabled\n");
1764 flags = RF_ACTIVE | RF_SHAREABLE;
1767 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1768 irq = &adapter->irq_tbl[i];
1770 if (unlikely(irq->requested))
1773 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1774 &irq->vector, flags);
1775 if (unlikely(irq->res == NULL)) {
1778 "could not allocate irq vector: %d\n", irq->vector);
1782 rc = bus_setup_intr(adapter->pdev, irq->res,
1783 INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data,
1785 if (unlikely(rc != 0)) {
1787 "failed to register interrupt handler for irq %ju: %d\n",
1788 rman_get_start(irq->res), rc);
1791 irq->requested = true;
1794 rc = bus_bind_intr(adapter->pdev, irq->res, irq->cpu);
1795 if (unlikely(rc != 0)) {
1797 "failed to bind interrupt handler for irq %ju to cpu %d: %d\n",
1798 rman_get_start(irq->res), irq->cpu, rc);
1802 ena_log(pdev, INFO, "queue %d - cpu %d\n",
1803 i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1811 for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1812 irq = &adapter->irq_tbl[i];
1815 /* Once we entered err: section and irq->requested is true we
1816 free both intr and resources */
1818 rcc = bus_teardown_intr(adapter->pdev, irq->res,
1820 if (unlikely(rcc != 0))
1822 "could not release irq: %d, error: %d\n",
1825 /* If we entered err: section without irq->requested set we know
1826 it was bus_alloc_resource_any() that needs cleanup, provided
1827 res is not NULL. In case res is NULL no work in needed in
1830 if (irq->res != NULL) {
1831 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1832 irq->vector, irq->res);
1834 if (unlikely(rcc != 0))
1836 "dev has no parent while releasing res for irq: %d\n",
1838 irq->requested = false;
1846 ena_free_mgmnt_irq(struct ena_adapter *adapter)
1848 device_t pdev = adapter->pdev;
1849 struct ena_irq *irq;
1852 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1853 if (irq->requested) {
1854 ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
1855 rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1856 if (unlikely(rc != 0))
1857 ena_log(pdev, ERR, "failed to tear down irq: %d\n",
1862 if (irq->res != NULL) {
1863 ena_log(pdev, DBG, "release resource irq: %d\n", irq->vector);
1864 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1865 irq->vector, irq->res);
1867 if (unlikely(rc != 0))
1869 "dev has no parent while releasing res for irq: %d\n",
1875 ena_free_io_irq(struct ena_adapter *adapter)
1877 device_t pdev = adapter->pdev;
1878 struct ena_irq *irq;
1881 for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1882 irq = &adapter->irq_tbl[i];
1883 if (irq->requested) {
1884 ena_log(pdev, DBG, "tear down irq: %d\n", irq->vector);
1885 rc = bus_teardown_intr(adapter->pdev, irq->res,
1887 if (unlikely(rc != 0)) {
1889 "failed to tear down irq: %d\n",
1895 if (irq->res != NULL) {
1896 ena_log(pdev, DBG, "release resource irq: %d\n",
1898 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1899 irq->vector, irq->res);
1901 if (unlikely(rc != 0)) {
1903 "dev has no parent while releasing res for irq: %d\n",
1911 ena_free_irqs(struct ena_adapter *adapter)
1913 ena_free_io_irq(adapter);
1914 ena_free_mgmnt_irq(adapter);
1915 ena_disable_msix(adapter);
1919 ena_disable_msix(struct ena_adapter *adapter)
1921 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1922 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1923 pci_release_msi(adapter->pdev);
1926 adapter->msix_vecs = 0;
1927 free(adapter->msix_entries, M_DEVBUF);
1928 adapter->msix_entries = NULL;
1932 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
1934 struct ena_com_io_cq *io_cq;
1935 struct ena_eth_io_intr_reg intr_reg;
1936 struct ena_ring *tx_ring;
1940 /* Unmask interrupts for all queues */
1941 for (i = 0; i < adapter->num_io_queues; i++) {
1942 ena_qid = ENA_IO_TXQ_IDX(i);
1943 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1944 ena_com_update_intr_reg(&intr_reg, 0, 0, true);
1945 tx_ring = &adapter->tx_ring[i];
1946 counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
1947 ena_com_unmask_intr(io_cq, &intr_reg);
1952 ena_up_complete(struct ena_adapter *adapter)
1956 if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
1957 rc = ena_rss_configure(adapter);
1959 ena_log(adapter->pdev, ERR,
1960 "Failed to configure RSS\n");
1965 rc = ena_change_mtu(adapter->ifp, if_getmtu(adapter->ifp));
1966 if (unlikely(rc != 0))
1969 ena_refill_all_rx_bufs(adapter);
1970 ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
1971 sizeof(adapter->hw_stats));
1977 set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size)
1981 for (i = 0; i < adapter->num_io_queues; i++) {
1982 adapter->tx_ring[i].ring_size = new_tx_size;
1983 adapter->rx_ring[i].ring_size = new_rx_size;
1988 create_queues_with_size_backoff(struct ena_adapter *adapter)
1990 device_t pdev = adapter->pdev;
1992 uint32_t cur_rx_ring_size, cur_tx_ring_size;
1993 uint32_t new_rx_ring_size, new_tx_ring_size;
1996 * Current queue sizes might be set to smaller than the requested
1997 * ones due to past queue allocation failures.
1999 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2000 adapter->requested_rx_ring_size);
2003 /* Allocate transmit descriptors */
2004 rc = ena_setup_all_tx_resources(adapter);
2005 if (unlikely(rc != 0)) {
2006 ena_log(pdev, ERR, "err_setup_tx\n");
2010 /* Allocate receive descriptors */
2011 rc = ena_setup_all_rx_resources(adapter);
2012 if (unlikely(rc != 0)) {
2013 ena_log(pdev, ERR, "err_setup_rx\n");
2017 /* Create IO queues for Rx & Tx */
2018 rc = ena_create_io_queues(adapter);
2019 if (unlikely(rc != 0)) {
2020 ena_log(pdev, ERR, "create IO queues failed\n");
2027 ena_free_all_rx_resources(adapter);
2029 ena_free_all_tx_resources(adapter);
2032 * Lower the ring size if ENOMEM. Otherwise, return the
2033 * error straightaway.
2035 if (unlikely(rc != ENOMEM)) {
2037 "Queue creation failed with error code: %d\n", rc);
2041 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2042 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2045 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2046 cur_tx_ring_size, cur_rx_ring_size);
2048 new_tx_ring_size = cur_tx_ring_size;
2049 new_rx_ring_size = cur_rx_ring_size;
2052 * Decrease the size of a larger queue, or decrease both if they
2053 * are the same size.
2055 if (cur_rx_ring_size <= cur_tx_ring_size)
2056 new_tx_ring_size = cur_tx_ring_size / 2;
2057 if (cur_rx_ring_size >= cur_tx_ring_size)
2058 new_rx_ring_size = cur_rx_ring_size / 2;
2060 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2061 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2063 "Queue creation failed with the smallest possible queue size"
2064 "of %d for both queues. Not retrying with smaller queues\n",
2070 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2071 new_tx_ring_size, new_rx_ring_size);
2073 set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
2078 ena_up(struct ena_adapter *adapter)
2084 if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2085 ena_log(adapter->pdev, ERR, "device is not attached!\n");
2089 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2092 ena_log(adapter->pdev, INFO, "device is going UP\n");
2094 /* setup interrupts for IO queues */
2095 rc = ena_setup_io_intr(adapter);
2096 if (unlikely(rc != 0)) {
2097 ena_log(adapter->pdev, ERR, "error setting up IO interrupt\n");
2100 rc = ena_request_io_irq(adapter);
2101 if (unlikely(rc != 0)) {
2102 ena_log(adapter->pdev, ERR, "err_req_irq\n");
2106 ena_log(adapter->pdev, INFO,
2107 "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, LLQ is %s\n",
2108 adapter->num_io_queues,
2109 adapter->requested_rx_ring_size,
2110 adapter->requested_tx_ring_size,
2111 (adapter->ena_dev->tx_mem_queue_type ==
2112 ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED");
2114 rc = create_queues_with_size_backoff(adapter);
2115 if (unlikely(rc != 0)) {
2116 ena_log(adapter->pdev, ERR,
2117 "error creating queues with size backoff\n");
2118 goto err_create_queues_with_backoff;
2121 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
2122 if_link_state_change(adapter->ifp, LINK_STATE_UP);
2124 rc = ena_up_complete(adapter);
2125 if (unlikely(rc != 0))
2126 goto err_up_complete;
2128 counter_u64_add(adapter->dev_stats.interface_up, 1);
2130 ena_update_hwassist(adapter);
2132 if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2134 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2136 ena_unmask_all_io_irqs(adapter);
2141 ena_destroy_all_io_queues(adapter);
2142 ena_free_all_rx_resources(adapter);
2143 ena_free_all_tx_resources(adapter);
2144 err_create_queues_with_backoff:
2145 ena_free_io_irq(adapter);
2151 ena_get_counter(if_t ifp, ift_counter cnt)
2153 struct ena_adapter *adapter;
2154 struct ena_hw_stats *stats;
2156 adapter = if_getsoftc(ifp);
2157 stats = &adapter->hw_stats;
2160 case IFCOUNTER_IPACKETS:
2161 return (counter_u64_fetch(stats->rx_packets));
2162 case IFCOUNTER_OPACKETS:
2163 return (counter_u64_fetch(stats->tx_packets));
2164 case IFCOUNTER_IBYTES:
2165 return (counter_u64_fetch(stats->rx_bytes));
2166 case IFCOUNTER_OBYTES:
2167 return (counter_u64_fetch(stats->tx_bytes));
2168 case IFCOUNTER_IQDROPS:
2169 return (counter_u64_fetch(stats->rx_drops));
2170 case IFCOUNTER_OQDROPS:
2171 return (counter_u64_fetch(stats->tx_drops));
2173 return (if_get_counter_default(ifp, cnt));
2178 ena_media_change(if_t ifp)
2180 /* Media Change is not supported by firmware */
2185 ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2187 struct ena_adapter *adapter = if_getsoftc(ifp);
2188 ena_log(adapter->pdev, DBG, "Media status update\n");
2192 ifmr->ifm_status = IFM_AVALID;
2193 ifmr->ifm_active = IFM_ETHER;
2195 if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
2197 ena_log(adapter->pdev, INFO, "Link is down\n");
2201 ifmr->ifm_status |= IFM_ACTIVE;
2202 ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
2210 struct ena_adapter *adapter = (struct ena_adapter *)arg;
2212 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
2220 ena_ioctl(if_t ifp, u_long command, caddr_t data)
2222 struct ena_adapter *adapter;
2226 adapter = if_getsoftc(ifp);
2227 ifr = (struct ifreq *)data;
2230 * Acquiring lock to prevent from running up and down routines parallel.
2235 if (if_getmtu(ifp) == ifr->ifr_mtu)
2240 ena_change_mtu(ifp, ifr->ifr_mtu);
2242 rc = ena_up(adapter);
2247 if ((if_getflags(ifp) & IFF_UP) != 0) {
2248 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2249 if ((if_getflags(ifp) & (IFF_PROMISC |
2250 IFF_ALLMULTI)) != 0) {
2251 ena_log(adapter->pdev, INFO,
2252 "ioctl promisc/allmulti\n");
2256 rc = ena_up(adapter);
2260 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2274 rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2281 if (ifr->ifr_reqcap != if_getcapenable(ifp)) {
2282 if_setcapenable(ifp, ifr->ifr_reqcap);
2286 if ((reinit != 0) &&
2287 ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
2290 rc = ena_up(adapter);
2297 rc = ether_ioctl(ifp, command, data);
2305 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2309 if ((feat->offload.tx &
2310 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2311 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2312 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2313 caps |= IFCAP_TXCSUM;
2315 if ((feat->offload.tx &
2316 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2317 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2318 caps |= IFCAP_TXCSUM_IPV6;
2320 if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2323 if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2326 if ((feat->offload.rx_supported &
2327 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2328 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2329 caps |= IFCAP_RXCSUM;
2331 if ((feat->offload.rx_supported &
2332 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2333 caps |= IFCAP_RXCSUM_IPV6;
2335 caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2341 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2343 host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp);
2347 ena_update_hwassist(struct ena_adapter *adapter)
2349 if_t ifp = adapter->ifp;
2350 uint32_t feat = adapter->tx_offload_cap;
2351 int cap = if_getcapenable(ifp);
2354 if_clearhwassist(ifp);
2356 if ((cap & IFCAP_TXCSUM) != 0) {
2358 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2361 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2362 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2363 flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2366 if ((cap & IFCAP_TXCSUM_IPV6) != 0)
2367 flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2369 if ((cap & IFCAP_TSO4) != 0)
2370 flags |= CSUM_IP_TSO;
2372 if ((cap & IFCAP_TSO6) != 0)
2373 flags |= CSUM_IP6_TSO;
2375 if_sethwassistbits(ifp, flags, 0);
2379 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2380 struct ena_com_dev_get_features_ctx *feat)
2385 ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2386 if (unlikely(ifp == NULL)) {
2387 ena_log(pdev, ERR, "can not allocate ifnet structure\n");
2390 if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2391 if_setdev(ifp, pdev);
2392 if_setsoftc(ifp, adapter);
2394 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2395 if_setinitfn(ifp, ena_init);
2396 if_settransmitfn(ifp, ena_mq_start);
2397 if_setqflushfn(ifp, ena_qflush);
2398 if_setioctlfn(ifp, ena_ioctl);
2399 if_setgetcounterfn(ifp, ena_get_counter);
2401 if_setsendqlen(ifp, adapter->requested_tx_ring_size);
2402 if_setsendqready(ifp);
2403 if_setmtu(ifp, ETHERMTU);
2404 if_setbaudrate(ifp, 0);
2405 /* Zeroize capabilities... */
2406 if_setcapabilities(ifp, 0);
2407 if_setcapenable(ifp, 0);
2408 /* check hardware support */
2409 caps = ena_get_dev_offloads(feat);
2410 /* ... and set them */
2411 if_setcapabilitiesbit(ifp, caps, 0);
2413 /* TSO parameters */
2414 if_sethwtsomax(ifp, ENA_TSO_MAXSIZE -
2415 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2416 if_sethwtsomaxsegcount(ifp, adapter->max_tx_sgl_size - 1);
2417 if_sethwtsomaxsegsize(ifp, ENA_TSO_MAXSIZE);
2419 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2420 if_setcapenable(ifp, if_getcapabilities(ifp));
2423 * Specify the media types supported by this adapter and register
2424 * callbacks to update media and link information
2426 ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change,
2428 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2429 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2431 ether_ifattach(ifp, adapter->mac_addr);
2437 ena_down(struct ena_adapter *adapter)
2443 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2446 ena_log(adapter->pdev, INFO, "device is going DOWN\n");
2448 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2449 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2451 ena_free_io_irq(adapter);
2453 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
2454 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2455 if (unlikely(rc != 0))
2456 ena_log(adapter->pdev, ERR, "Device reset failed\n");
2459 ena_destroy_all_io_queues(adapter);
2461 ena_free_all_tx_bufs(adapter);
2462 ena_free_all_rx_bufs(adapter);
2463 ena_free_all_tx_resources(adapter);
2464 ena_free_all_rx_resources(adapter);
2466 counter_u64_add(adapter->dev_stats.interface_down, 1);
2470 ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
2471 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2473 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
2475 /* Regular queues capabilities */
2476 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2477 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2478 &get_feat_ctx->max_queue_ext.max_queue_ext;
2479 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
2480 max_queue_ext->max_rx_cq_num);
2482 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2483 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2485 struct ena_admin_queue_feature_desc *max_queues =
2486 &get_feat_ctx->max_queues;
2487 io_tx_sq_num = max_queues->max_sq_num;
2488 io_tx_cq_num = max_queues->max_cq_num;
2489 io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
2492 /* In case of LLQ use the llq fields for the tx SQ/CQ */
2493 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2494 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2496 max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
2497 max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
2498 max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
2499 max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
2500 /* 1 IRQ for mgmnt and 1 IRQ for each TX/RX pair */
2501 max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2502 pci_msix_count(pdev) - 1);
2504 max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2505 rss_getnumbuckets());
2508 return (max_num_io_queues);
2512 ena_enable_wc(device_t pdev, struct resource *res)
2514 #if defined(__i386) || defined(__amd64) || defined(__aarch64__)
2519 va = (vm_offset_t)rman_get_virtual(res);
2520 len = rman_get_size(res);
2521 /* Enable write combining */
2522 rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
2523 if (unlikely(rc != 0)) {
2524 ena_log(pdev, ERR, "pmap_change_attr failed, %d\n", rc);
2530 return (EOPNOTSUPP);
2534 ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
2535 struct ena_admin_feature_llq_desc *llq,
2536 struct ena_llq_configurations *llq_default_configurations)
2539 uint32_t llq_feature_mask;
2541 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
2542 if (!(ena_dev->supported_features & llq_feature_mask)) {
2544 "LLQ is not supported. Fallback to host mode policy.\n");
2545 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2549 if (ena_dev->mem_bar == NULL) {
2551 "LLQ is advertised as supported but device doesn't expose mem bar.\n");
2552 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2556 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
2557 if (unlikely(rc != 0)) {
2559 "Failed to configure the device mode. "
2560 "Fallback to host mode policy.\n");
2561 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2568 ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev)
2570 struct ena_adapter *adapter = device_get_softc(pdev);
2573 /* Try to allocate resources for LLQ bar */
2574 rid = PCIR_BAR(ENA_MEM_BAR);
2575 adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
2577 if (unlikely(adapter->memory == NULL)) {
2579 "Unable to allocate LLQ bar resource. LLQ mode won't be used.\n");
2583 /* Enable write combining for better LLQ performance */
2584 rc = ena_enable_wc(adapter->pdev, adapter->memory);
2585 if (unlikely(rc != 0)) {
2586 ena_log(pdev, ERR, "failed to enable write combining.\n");
2591 * Save virtual address of the device's memory region
2592 * for the ena_com layer.
2594 ena_dev->mem_bar = rman_get_virtual(adapter->memory);
2600 set_default_llq_configurations(struct ena_llq_configurations *llq_config,
2601 struct ena_admin_feature_llq_desc *llq)
2603 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
2604 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
2605 llq_config->llq_num_decs_before_header =
2606 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
2607 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) !=
2608 0 && ena_force_large_llq_header) {
2609 llq_config->llq_ring_entry_size =
2610 ENA_ADMIN_LIST_ENTRY_SIZE_256B;
2611 llq_config->llq_ring_entry_size_value = 256;
2613 llq_config->llq_ring_entry_size =
2614 ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2615 llq_config->llq_ring_entry_size_value = 128;
2620 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
2622 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
2623 struct ena_com_dev *ena_dev = ctx->ena_dev;
2624 uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
2625 uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
2626 uint32_t max_tx_queue_size;
2627 uint32_t max_rx_queue_size;
2629 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2630 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2631 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
2632 max_rx_queue_size = min_t(uint32_t,
2633 max_queue_ext->max_rx_cq_depth,
2634 max_queue_ext->max_rx_sq_depth);
2635 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2637 if (ena_dev->tx_mem_queue_type ==
2638 ENA_ADMIN_PLACEMENT_POLICY_DEV)
2639 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2640 llq->max_llq_depth);
2642 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2643 max_queue_ext->max_tx_sq_depth);
2645 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2646 max_queue_ext->max_per_packet_tx_descs);
2647 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2648 max_queue_ext->max_per_packet_rx_descs);
2650 struct ena_admin_queue_feature_desc *max_queues =
2651 &ctx->get_feat_ctx->max_queues;
2652 max_rx_queue_size = min_t(uint32_t, max_queues->max_cq_depth,
2653 max_queues->max_sq_depth);
2654 max_tx_queue_size = max_queues->max_cq_depth;
2656 if (ena_dev->tx_mem_queue_type ==
2657 ENA_ADMIN_PLACEMENT_POLICY_DEV)
2658 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2659 llq->max_llq_depth);
2661 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2662 max_queues->max_sq_depth);
2664 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2665 max_queues->max_packet_tx_descs);
2666 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2667 max_queues->max_packet_rx_descs);
2670 /* round down to the nearest power of 2 */
2671 max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2672 max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2675 * When forcing large headers, we multiply the entry size by 2,
2676 * and therefore divide the queue size by 2, leaving the amount
2677 * of memory used by the queues unchanged.
2679 if (ena_force_large_llq_header) {
2680 if ((llq->entry_size_ctrl_supported &
2681 ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 &&
2682 ena_dev->tx_mem_queue_type ==
2683 ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2684 max_tx_queue_size /= 2;
2685 ena_log(ctx->pdev, INFO,
2686 "Forcing large headers and decreasing maximum Tx queue size to %d\n",
2689 ena_log(ctx->pdev, WARN,
2690 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
2694 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2696 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2699 tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2700 rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2702 ctx->max_tx_queue_size = max_tx_queue_size;
2703 ctx->max_rx_queue_size = max_rx_queue_size;
2704 ctx->tx_queue_size = tx_queue_size;
2705 ctx->rx_queue_size = rx_queue_size;
2711 ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
2713 struct ena_admin_host_info *host_info;
2717 /* Allocate only the host info */
2718 rc = ena_com_allocate_host_info(ena_dev);
2719 if (unlikely(rc != 0)) {
2720 ena_log(dev, ERR, "Cannot allocate host info\n");
2724 host_info = ena_dev->host_attr.host_info;
2726 if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
2727 host_info->bdf = rid;
2728 host_info->os_type = ENA_ADMIN_OS_FREEBSD;
2729 host_info->kernel_ver = osreldate;
2731 sprintf(host_info->kernel_ver_str, "%d", osreldate);
2732 host_info->os_dist = 0;
2733 strncpy(host_info->os_dist_str, osrelease,
2734 sizeof(host_info->os_dist_str) - 1);
2736 host_info->driver_version = (ENA_DRV_MODULE_VER_MAJOR) |
2737 (ENA_DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2738 (ENA_DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2739 host_info->num_cpus = mp_ncpus;
2740 host_info->driver_supported_features =
2741 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
2742 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
2744 rc = ena_com_set_host_attributes(ena_dev);
2745 if (unlikely(rc != 0)) {
2746 if (rc == EOPNOTSUPP)
2747 ena_log(dev, WARN, "Cannot set host attributes\n");
2749 ena_log(dev, ERR, "Cannot set host attributes\n");
2757 ena_com_delete_host_info(ena_dev);
2761 ena_device_init(struct ena_adapter *adapter, device_t pdev,
2762 struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
2764 struct ena_llq_configurations llq_config;
2765 struct ena_com_dev *ena_dev = adapter->ena_dev;
2766 bool readless_supported;
2767 uint32_t aenq_groups;
2771 rc = ena_com_mmio_reg_read_request_init(ena_dev);
2772 if (unlikely(rc != 0)) {
2773 ena_log(pdev, ERR, "failed to init mmio read less\n");
2778 * The PCIe configuration space revision id indicate if mmio reg
2781 readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
2782 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2784 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2785 if (unlikely(rc != 0)) {
2786 ena_log(pdev, ERR, "Can not reset device\n");
2787 goto err_mmio_read_less;
2790 rc = ena_com_validate_version(ena_dev);
2791 if (unlikely(rc != 0)) {
2792 ena_log(pdev, ERR, "device version is too low\n");
2793 goto err_mmio_read_less;
2796 dma_width = ena_com_get_dma_width(ena_dev);
2797 if (unlikely(dma_width < 0)) {
2798 ena_log(pdev, ERR, "Invalid dma width value %d", dma_width);
2800 goto err_mmio_read_less;
2802 adapter->dma_width = dma_width;
2804 /* ENA admin level init */
2805 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2806 if (unlikely(rc != 0)) {
2808 "Can not initialize ena admin queue with device\n");
2809 goto err_mmio_read_less;
2813 * To enable the msix interrupts the driver needs to know the number
2814 * of queues. So the driver uses polling mode to retrieve this
2817 ena_com_set_admin_polling_mode(ena_dev, true);
2819 ena_config_host_info(ena_dev, pdev);
2821 /* Get Device Attributes */
2822 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2823 if (unlikely(rc != 0)) {
2825 "Cannot get attribute for ena device rc: %d\n", rc);
2826 goto err_admin_init;
2829 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2830 BIT(ENA_ADMIN_FATAL_ERROR) |
2831 BIT(ENA_ADMIN_WARNING) |
2832 BIT(ENA_ADMIN_NOTIFICATION) |
2833 BIT(ENA_ADMIN_KEEP_ALIVE);
2835 aenq_groups &= get_feat_ctx->aenq.supported_groups;
2836 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2837 if (unlikely(rc != 0)) {
2838 ena_log(pdev, ERR, "Cannot configure aenq groups rc: %d\n", rc);
2839 goto err_admin_init;
2842 *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2844 set_default_llq_configurations(&llq_config, &get_feat_ctx->llq);
2846 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
2848 if (unlikely(rc != 0)) {
2849 ena_log(pdev, ERR, "Failed to set placement policy\n");
2850 goto err_admin_init;
2856 ena_com_delete_host_info(ena_dev);
2857 ena_com_admin_destroy(ena_dev);
2859 ena_com_mmio_reg_read_request_destroy(ena_dev);
2865 ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
2867 struct ena_com_dev *ena_dev = adapter->ena_dev;
2870 rc = ena_enable_msix(adapter);
2871 if (unlikely(rc != 0)) {
2872 ena_log(adapter->pdev, ERR, "Error with MSI-X enablement\n");
2876 ena_setup_mgmnt_intr(adapter);
2878 rc = ena_request_mgmnt_irq(adapter);
2879 if (unlikely(rc != 0)) {
2880 ena_log(adapter->pdev, ERR, "Cannot setup mgmnt queue intr\n");
2881 goto err_disable_msix;
2884 ena_com_set_admin_polling_mode(ena_dev, false);
2886 ena_com_admin_aenq_enable(ena_dev);
2891 ena_disable_msix(adapter);
2896 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
2898 ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
2900 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
2901 struct ena_admin_aenq_keep_alive_desc *desc;
2906 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
2908 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2909 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2910 counter_u64_zero(adapter->hw_stats.rx_drops);
2911 counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
2912 counter_u64_zero(adapter->hw_stats.tx_drops);
2913 counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
2915 stime = getsbinuptime();
2916 atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
2919 /* Check for keep alive expiration */
2921 check_for_missing_keep_alive(struct ena_adapter *adapter)
2923 sbintime_t timestamp, time;
2925 if (adapter->wd_active == 0)
2928 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2931 timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
2932 time = getsbinuptime() - timestamp;
2933 if (unlikely(time > adapter->keep_alive_timeout)) {
2934 ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n");
2935 counter_u64_add(adapter->dev_stats.wd_expired, 1);
2936 ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
2940 /* Check if admin queue is enabled */
2942 check_for_admin_com_state(struct ena_adapter *adapter)
2944 if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) {
2945 ena_log(adapter->pdev, ERR,
2946 "ENA admin queue is not in running state!\n");
2947 counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
2948 ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
2953 check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2954 struct ena_ring *rx_ring)
2956 if (likely(atomic_load_8(&rx_ring->first_interrupt)))
2959 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2962 rx_ring->no_interrupt_event_cnt++;
2964 if (rx_ring->no_interrupt_event_cnt ==
2965 ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2966 ena_log(adapter->pdev, ERR,
2967 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2969 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
2977 check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2978 struct ena_ring *tx_ring)
2980 device_t pdev = adapter->pdev;
2981 struct bintime curtime, time;
2982 struct ena_tx_buffer *tx_buf;
2983 int time_since_last_cleanup;
2984 int missing_tx_comp_to;
2985 sbintime_t time_offset;
2986 uint32_t missed_tx = 0;
2989 getbinuptime(&curtime);
2991 for (i = 0; i < tx_ring->ring_size; i++) {
2992 tx_buf = &tx_ring->tx_buffer_info[i];
2994 if (bintime_isset(&tx_buf->timestamp) == 0)
2998 bintime_sub(&time, &tx_buf->timestamp);
2999 time_offset = bttosbt(time);
3001 if (unlikely(!atomic_load_8(&tx_ring->first_interrupt) &&
3002 time_offset > 2 * adapter->missing_tx_timeout)) {
3004 * If after graceful period interrupt is still not
3005 * received, we schedule a reset.
3008 "Potential MSIX issue on Tx side Queue = %d. "
3009 "Reset the device\n",
3011 ena_trigger_reset(adapter,
3012 ENA_REGS_RESET_MISS_INTERRUPT);
3016 /* Check again if packet is still waiting */
3017 if (unlikely(time_offset > adapter->missing_tx_timeout)) {
3019 if (tx_buf->print_once) {
3020 time_since_last_cleanup = TICKS_2_USEC(ticks -
3021 tx_ring->tx_last_cleanup_ticks);
3022 missing_tx_comp_to = sbttoms(
3023 adapter->missing_tx_timeout);
3025 "Found a Tx that wasn't completed on time, qid %d, index %d. "
3026 "%d usecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n",
3027 tx_ring->qid, i, time_since_last_cleanup,
3028 missing_tx_comp_to);
3031 tx_buf->print_once = false;
3036 if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
3038 "The number of lost tx completion is above the threshold "
3039 "(%d > %d). Reset the device\n",
3040 missed_tx, adapter->missing_tx_threshold);
3041 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
3045 counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx);
3051 * Check for TX which were not completed on time.
3052 * Timeout is defined by "missing_tx_timeout".
3053 * Reset will be performed if number of incompleted
3054 * transactions exceeds "missing_tx_threshold".
3057 check_for_missing_completions(struct ena_adapter *adapter)
3059 struct ena_ring *tx_ring;
3060 struct ena_ring *rx_ring;
3063 /* Make sure the driver doesn't turn the device in other process */
3066 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3069 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3072 if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3075 budget = adapter->missing_tx_max_queues;
3077 for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
3078 tx_ring = &adapter->tx_ring[i];
3079 rx_ring = &adapter->rx_ring[i];
3081 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3082 if (unlikely(rc != 0))
3085 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3086 if (unlikely(rc != 0))
3096 adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
3099 /* trigger rx cleanup after 2 consecutive detections */
3100 #define EMPTY_RX_REFILL 2
3101 /* For the rare case where the device runs out of Rx descriptors and the
3102 * msix handler failed to refill new Rx descriptors (due to a lack of memory
3104 * This case will lead to a deadlock:
3105 * The device won't send interrupts since all the new Rx packets will be dropped
3106 * The msix handler won't allocate new Rx descriptors so the device won't be
3107 * able to send new packets.
3109 * When such a situation is detected - execute rx cleanup task in another thread
3112 check_for_empty_rx_ring(struct ena_adapter *adapter)
3114 struct ena_ring *rx_ring;
3115 int i, refill_required;
3117 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3120 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3123 for (i = 0; i < adapter->num_io_queues; i++) {
3124 rx_ring = &adapter->rx_ring[i];
3126 refill_required = ena_com_free_q_entries(
3127 rx_ring->ena_com_io_sq);
3128 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3129 rx_ring->empty_rx_queue++;
3131 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3132 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3135 ena_log(adapter->pdev, WARN,
3136 "Rx ring %d is stalled. Triggering the refill function\n",
3139 taskqueue_enqueue(rx_ring->que->cleanup_tq,
3140 &rx_ring->que->cleanup_task);
3141 rx_ring->empty_rx_queue = 0;
3144 rx_ring->empty_rx_queue = 0;
3150 ena_update_hints(struct ena_adapter *adapter,
3151 struct ena_admin_ena_hw_hints *hints)
3153 struct ena_com_dev *ena_dev = adapter->ena_dev;
3155 if (hints->admin_completion_tx_timeout)
3156 ena_dev->admin_queue.completion_timeout =
3157 hints->admin_completion_tx_timeout * 1000;
3159 if (hints->mmio_read_timeout)
3160 /* convert to usec */
3161 ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000;
3163 if (hints->missed_tx_completion_count_threshold_to_reset)
3164 adapter->missing_tx_threshold =
3165 hints->missed_tx_completion_count_threshold_to_reset;
3167 if (hints->missing_tx_completion_timeout) {
3168 if (hints->missing_tx_completion_timeout ==
3169 ENA_HW_HINTS_NO_TIMEOUT)
3170 adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3172 adapter->missing_tx_timeout = SBT_1MS *
3173 hints->missing_tx_completion_timeout;
3176 if (hints->driver_watchdog_timeout) {
3177 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3178 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3180 adapter->keep_alive_timeout = SBT_1MS *
3181 hints->driver_watchdog_timeout;
3186 * ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
3187 * @adapter: ENA device adapter
3189 * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics
3190 * and other error codes on failure.
3192 * This function can possibly cause a race with other calls to the admin queue.
3193 * Because of that, the caller should either lock this function or make sure
3194 * that there is no race in the current context.
3197 ena_copy_eni_metrics(struct ena_adapter *adapter)
3199 static bool print_once = true;
3202 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
3205 if (rc == ENA_COM_UNSUPPORTED) {
3207 ena_log(adapter->pdev, WARN,
3208 "Retrieving ENI metrics is not supported.\n");
3211 ena_log(adapter->pdev, DBG,
3212 "Retrieving ENI metrics is not supported.\n");
3215 ena_log(adapter->pdev, ERR,
3216 "Failed to get ENI metrics: %d\n", rc);
3224 ena_timer_service(void *data)
3226 struct ena_adapter *adapter = (struct ena_adapter *)data;
3227 struct ena_admin_host_info *host_info =
3228 adapter->ena_dev->host_attr.host_info;
3230 check_for_missing_keep_alive(adapter);
3232 check_for_admin_com_state(adapter);
3234 check_for_missing_completions(adapter);
3236 check_for_empty_rx_ring(adapter);
3239 * User controller update of the ENI metrics.
3240 * If the delay was set to 0, then the stats shouldn't be updated at
3242 * Otherwise, wait 'eni_metrics_sample_interval' seconds, before
3244 * As timer service is executed every second, it's enough to increment
3245 * appropriate counter each time the timer service is executed.
3247 if ((adapter->eni_metrics_sample_interval != 0) &&
3248 (++adapter->eni_metrics_sample_interval_cnt >=
3249 adapter->eni_metrics_sample_interval)) {
3250 taskqueue_enqueue(adapter->metrics_tq, &adapter->metrics_task);
3251 adapter->eni_metrics_sample_interval_cnt = 0;
3255 if (host_info != NULL)
3256 ena_update_host_info(host_info, adapter->ifp);
3258 if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3260 * Timeout when validating version indicates that the device
3261 * became unresponsive. If that happens skip the reset and
3262 * reschedule timer service, so the reset can be retried later.
3264 if (ena_com_validate_version(adapter->ena_dev) ==
3265 ENA_COM_TIMER_EXPIRED) {
3266 ena_log(adapter->pdev, WARN,
3267 "FW unresponsive, skipping reset\n");
3268 ENA_TIMER_RESET(adapter);
3271 ena_log(adapter->pdev, WARN, "Trigger reset is on\n");
3272 taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3277 * Schedule another timeout one second from now.
3279 ENA_TIMER_RESET(adapter);
3283 ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3285 if_t ifp = adapter->ifp;
3286 struct ena_com_dev *ena_dev = adapter->ena_dev;
3289 if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
3293 if_link_state_change(ifp, LINK_STATE_DOWN);
3295 ENA_TIMER_DRAIN(adapter);
3297 dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
3299 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3302 ena_com_set_admin_running_state(ena_dev, false);
3304 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3308 * Stop the device from sending AENQ events (if the device was up, and
3309 * the trigger reset was on, ena_down already performs device reset)
3311 if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
3312 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3314 ena_free_mgmnt_irq(adapter);
3316 ena_disable_msix(adapter);
3319 * IO rings resources should be freed because `ena_restore_device()`
3320 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3321 * vectors. The amount of MSIX vectors after destroy-restore may be
3322 * different than before. Therefore, IO rings resources should be
3323 * established from scratch each time.
3325 ena_free_all_io_rings_resources(adapter);
3327 ena_com_abort_admin_commands(ena_dev);
3329 ena_com_wait_for_abort_completion(ena_dev);
3331 ena_com_admin_destroy(ena_dev);
3333 ena_com_mmio_reg_read_request_destroy(ena_dev);
3335 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3337 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3338 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3342 ena_device_validate_params(struct ena_adapter *adapter,
3343 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3345 if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
3346 ETHER_ADDR_LEN) != 0) {
3347 ena_log(adapter->pdev, ERR, "Error, mac addresses differ\n");
3351 if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
3352 ena_log(adapter->pdev, ERR,
3353 "Error, device max mtu is smaller than ifp MTU\n");
3361 ena_restore_device(struct ena_adapter *adapter)
3363 struct ena_com_dev_get_features_ctx get_feat_ctx;
3364 struct ena_com_dev *ena_dev = adapter->ena_dev;
3365 if_t ifp = adapter->ifp;
3366 device_t dev = adapter->pdev;
3370 ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3372 rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
3374 ena_log(dev, ERR, "Cannot initialize device\n");
3378 * Only enable WD if it was enabled before reset, so it won't override
3379 * value set by the user by the sysctl.
3381 if (adapter->wd_active != 0)
3382 adapter->wd_active = wd_active;
3384 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3386 ena_log(dev, ERR, "Validation of device parameters failed\n");
3387 goto err_device_destroy;
3390 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3391 /* Make sure we don't have a race with AENQ Links state handler */
3392 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
3393 if_link_state_change(ifp, LINK_STATE_UP);
3395 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3397 ena_log(dev, ERR, "Enable MSI-X failed\n");
3398 goto err_device_destroy;
3402 * Effective value of used MSIX vectors should be the same as before
3403 * `ena_destroy_device()`, if possible, or closest to it if less vectors
3406 if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
3407 adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3409 /* Re-initialize rings basic information */
3410 ena_init_io_rings(adapter);
3412 /* If the interface was up before the reset bring it up */
3413 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3414 rc = ena_up(adapter);
3416 ena_log(dev, ERR, "Failed to create I/O queues\n");
3417 goto err_disable_msix;
3421 /* Indicate that device is running again and ready to work */
3422 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3425 * As the AENQ handlers weren't executed during reset because
3426 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
3427 * timestamp must be updated again That will prevent next reset
3428 * caused by missing keep alive.
3430 adapter->keep_alive_timestamp = getsbinuptime();
3431 ENA_TIMER_RESET(adapter);
3433 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3438 ena_free_mgmnt_irq(adapter);
3439 ena_disable_msix(adapter);
3441 ena_com_abort_admin_commands(ena_dev);
3442 ena_com_wait_for_abort_completion(ena_dev);
3443 ena_com_admin_destroy(ena_dev);
3444 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3445 ena_com_mmio_reg_read_request_destroy(ena_dev);
3447 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3448 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3449 ena_log(dev, ERR, "Reset attempt failed. Can not reset the device\n");
3455 ena_metrics_task(void *arg, int pending)
3457 struct ena_adapter *adapter = (struct ena_adapter *)arg;
3460 (void)ena_copy_eni_metrics(adapter);
3465 ena_reset_task(void *arg, int pending)
3467 struct ena_adapter *adapter = (struct ena_adapter *)arg;
3470 if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3471 ena_destroy_device(adapter, false);
3472 ena_restore_device(adapter);
3474 ena_log(adapter->pdev, INFO,
3475 "Device reset completed successfully, Driver info: %s\n",
3482 ena_free_stats(struct ena_adapter *adapter)
3484 ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3485 sizeof(struct ena_hw_stats));
3486 ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3487 sizeof(struct ena_stats_dev));
3491 * ena_attach - Device Initialization Routine
3492 * @pdev: device information struct
3494 * Returns 0 on success, otherwise on failure.
3496 * ena_attach initializes an adapter identified by a device structure.
3497 * The OS initialization, configuring of the adapter private structure,
3498 * and a hardware reset occur.
3501 ena_attach(device_t pdev)
3503 struct ena_com_dev_get_features_ctx get_feat_ctx;
3504 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3505 static int version_printed;
3506 struct ena_adapter *adapter;
3507 struct ena_com_dev *ena_dev = NULL;
3508 uint32_t max_num_io_queues;
3512 adapter = device_get_softc(pdev);
3513 adapter->pdev = pdev;
3514 adapter->first_bind = -1;
3517 * Set up the timer service - driver is responsible for avoiding
3518 * concurrency, as the callout won't be using any locking inside.
3520 ENA_TIMER_INIT(adapter);
3521 adapter->keep_alive_timeout = ENA_DEFAULT_KEEP_ALIVE_TO;
3522 adapter->missing_tx_timeout = ENA_DEFAULT_TX_CMP_TO;
3523 adapter->missing_tx_max_queues = ENA_DEFAULT_TX_MONITORED_QUEUES;
3524 adapter->missing_tx_threshold = ENA_DEFAULT_TX_CMP_THRESHOLD;
3526 if (version_printed++ == 0)
3527 ena_log(pdev, INFO, "%s\n", ena_version);
3529 /* Allocate memory for ena_dev structure */
3530 ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3533 adapter->ena_dev = ena_dev;
3534 ena_dev->dmadev = pdev;
3536 rid = PCIR_BAR(ENA_REG_BAR);
3537 adapter->memory = NULL;
3538 adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid,
3540 if (unlikely(adapter->registers == NULL)) {
3542 "unable to allocate bus resource: registers!\n");
3547 /* MSIx vector table may reside on BAR0 with registers or on BAR1. */
3548 msix_rid = pci_msix_table_bar(pdev);
3549 if (msix_rid != rid) {
3550 adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3551 &msix_rid, RF_ACTIVE);
3552 if (unlikely(adapter->msix == NULL)) {
3554 "unable to allocate bus resource: msix!\n");
3558 adapter->msix_rid = msix_rid;
3561 ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3564 /* Store register resources */
3565 ((struct ena_bus *)(ena_dev->bus))->reg_bar_t = rman_get_bustag(
3566 adapter->registers);
3567 ((struct ena_bus *)(ena_dev->bus))->reg_bar_h = rman_get_bushandle(
3568 adapter->registers);
3570 if (unlikely(((struct ena_bus *)(ena_dev->bus))->reg_bar_h == 0)) {
3571 ena_log(pdev, ERR, "failed to pmap registers bar\n");
3576 rc = ena_map_llq_mem_bar(pdev, ena_dev);
3577 if (unlikely(rc != 0)) {
3578 ena_log(pdev, ERR, "Failed to map ENA mem bar");
3582 /* Initially clear all the flags */
3583 ENA_FLAG_ZERO(adapter);
3585 /* Device initialization */
3586 rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3587 if (unlikely(rc != 0)) {
3588 ena_log(pdev, ERR, "ENA device init failed! (err: %d)\n", rc);
3593 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3594 adapter->disable_meta_caching = !!(
3595 get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3596 BIT(ENA_ADMIN_DISABLE_META_CACHING));
3598 adapter->keep_alive_timestamp = getsbinuptime();
3600 adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3602 memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3605 calc_queue_ctx.pdev = pdev;
3606 calc_queue_ctx.ena_dev = ena_dev;
3607 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3609 /* Calculate initial and maximum IO queue number and size */
3610 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
3612 rc = ena_calc_io_queue_size(&calc_queue_ctx);
3613 if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
3618 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3619 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3620 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3621 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3622 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3623 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3625 adapter->max_num_io_queues = max_num_io_queues;
3627 adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
3629 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3631 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3633 /* set up dma tags for rx and tx buffers */
3634 rc = ena_setup_tx_dma_tag(adapter);
3635 if (unlikely(rc != 0)) {
3636 ena_log(pdev, ERR, "Failed to create TX DMA tag\n");
3640 rc = ena_setup_rx_dma_tag(adapter);
3641 if (unlikely(rc != 0)) {
3642 ena_log(pdev, ERR, "Failed to create RX DMA tag\n");
3643 goto err_tx_tag_free;
3647 * The amount of requested MSIX vectors is equal to
3648 * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
3649 * number of admin queue interrupts. The former is initially determined
3650 * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
3651 * achieved if there are not enough system resources. By default, the
3652 * number of effectively used IO queues is the same but later on it can
3653 * be limited by the user using sysctl interface.
3655 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3656 if (unlikely(rc != 0)) {
3658 "Failed to enable and set the admin interrupts\n");
3661 /* By default all of allocated MSIX vectors are actively used */
3662 adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3664 /* initialize rings basic information */
3665 ena_init_io_rings(adapter);
3667 /* Initialize statistics */
3668 ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3669 sizeof(struct ena_stats_dev));
3670 ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3671 sizeof(struct ena_hw_stats));
3672 ena_sysctl_add_nodes(adapter);
3674 /* setup network interface */
3675 rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3676 if (unlikely(rc != 0)) {
3677 ena_log(pdev, ERR, "Error with network interface setup\n");
3681 /* Initialize reset task queue */
3682 TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3683 adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3684 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3685 taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq",
3686 device_get_nameunit(adapter->pdev));
3688 /* Initialize metrics task queue */
3689 TASK_INIT(&adapter->metrics_task, 0, ena_metrics_task, adapter);
3690 adapter->metrics_tq = taskqueue_create("ena_metrics_enqueue",
3691 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->metrics_tq);
3692 taskqueue_start_threads(&adapter->metrics_tq, 1, PI_NET, "%s metricsq",
3693 device_get_nameunit(adapter->pdev));
3696 rc = ena_netmap_attach(adapter);
3698 ena_log(pdev, ERR, "netmap attach failed: %d\n", rc);
3701 #endif /* DEV_NETMAP */
3703 /* Tell the stack that the interface is not active */
3704 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3705 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3707 /* Run the timer service */
3708 ENA_TIMER_RESET(adapter);
3714 ether_ifdetach(adapter->ifp);
3715 #endif /* DEV_NETMAP */
3717 ena_free_stats(adapter);
3718 ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
3719 ena_free_mgmnt_irq(adapter);
3720 ena_disable_msix(adapter);
3722 ena_free_all_io_rings_resources(adapter);
3723 ena_free_rx_dma_tag(adapter);
3725 ena_free_tx_dma_tag(adapter);
3727 ena_com_admin_destroy(ena_dev);
3728 ena_com_delete_host_info(ena_dev);
3729 ena_com_mmio_reg_read_request_destroy(ena_dev);
3731 free(ena_dev->bus, M_DEVBUF);
3733 ena_free_pci_resources(adapter);
3735 free(ena_dev, M_DEVBUF);
3741 * ena_detach - Device Removal Routine
3742 * @pdev: device information struct
3744 * ena_detach is called by the device subsystem to alert the driver
3745 * that it should release a PCI device.
3748 ena_detach(device_t pdev)
3750 struct ena_adapter *adapter = device_get_softc(pdev);
3751 struct ena_com_dev *ena_dev = adapter->ena_dev;
3754 /* Make sure VLANS are not using driver */
3755 if (if_vlantrunkinuse(adapter->ifp)) {
3756 ena_log(adapter->pdev, ERR, "VLAN is in use, detach first\n");
3760 ether_ifdetach(adapter->ifp);
3762 /* Stop timer service */
3764 ENA_TIMER_DRAIN(adapter);
3767 /* Release metrics task */
3768 while (taskqueue_cancel(adapter->metrics_tq, &adapter->metrics_task, NULL))
3769 taskqueue_drain(adapter->metrics_tq, &adapter->metrics_task);
3770 taskqueue_free(adapter->metrics_tq);
3772 /* Release reset task */
3773 while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
3774 taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
3775 taskqueue_free(adapter->reset_tq);
3779 ena_destroy_device(adapter, true);
3782 /* Restore unregistered sysctl queue nodes. */
3783 ena_sysctl_update_queue_node_nb(adapter, adapter->num_io_queues,
3784 adapter->max_num_io_queues);
3787 netmap_detach(adapter->ifp);
3788 #endif /* DEV_NETMAP */
3790 ena_free_stats(adapter);
3792 rc = ena_free_rx_dma_tag(adapter);
3793 if (unlikely(rc != 0))
3794 ena_log(adapter->pdev, WARN,
3795 "Unmapped RX DMA tag associations\n");
3797 rc = ena_free_tx_dma_tag(adapter);
3798 if (unlikely(rc != 0))
3799 ena_log(adapter->pdev, WARN,
3800 "Unmapped TX DMA tag associations\n");
3802 ena_free_irqs(adapter);
3804 ena_free_pci_resources(adapter);
3806 if (adapter->rss_indir != NULL)
3807 free(adapter->rss_indir, M_DEVBUF);
3809 if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
3810 ena_com_rss_destroy(ena_dev);
3812 ena_com_delete_host_info(ena_dev);
3814 if_free(adapter->ifp);
3816 free(ena_dev->bus, M_DEVBUF);
3818 free(ena_dev, M_DEVBUF);
3820 return (bus_generic_detach(pdev));
3823 /******************************************************************************
3824 ******************************** AENQ Handlers *******************************
3825 *****************************************************************************/
3827 * ena_update_on_link_change:
3828 * Notify the network interface about the change in link status
3831 ena_update_on_link_change(void *adapter_data,
3832 struct ena_admin_aenq_entry *aenq_e)
3834 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3835 struct ena_admin_aenq_link_change_desc *aenq_desc;
3839 aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3841 status = aenq_desc->flags &
3842 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3845 ena_log(adapter->pdev, INFO, "link is UP\n");
3846 ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3847 if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
3848 if_link_state_change(ifp, LINK_STATE_UP);
3850 ena_log(adapter->pdev, INFO, "link is DOWN\n");
3851 if_link_state_change(ifp, LINK_STATE_DOWN);
3852 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3857 ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e)
3859 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3860 struct ena_admin_ena_hw_hints *hints;
3862 ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3863 adapter->ena_dev, "Invalid group(%x) expected %x\n",
3864 aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION);
3866 switch (aenq_e->aenq_common_desc.syndrome) {
3867 case ENA_ADMIN_UPDATE_HINTS:
3869 (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
3870 ena_update_hints(adapter, hints);
3873 ena_log(adapter->pdev, ERR,
3874 "Invalid aenq notification link state %d\n",
3875 aenq_e->aenq_common_desc.syndrome);
3880 ena_lock_init(void *arg)
3884 SYSINIT(ena_lock_init, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_init, NULL);
3887 ena_lock_uninit(void *arg)
3891 SYSUNINIT(ena_lock_uninit, SI_SUB_LOCK, SI_ORDER_FIRST, ena_lock_uninit, NULL);
3894 * This handler will called for unknown event group or unimplemented handlers
3897 unimplemented_aenq_handler(void *adapter_data,
3898 struct ena_admin_aenq_entry *aenq_e)
3900 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3902 ena_log(adapter->pdev, ERR,
3903 "Unknown event was received or event with unimplemented handler\n");
3906 static struct ena_aenq_handlers aenq_handlers = {
3908 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3909 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3910 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3912 .unimplemented_handler = unimplemented_aenq_handler
3915 /*********************************************************************
3916 * FreeBSD Device Interface Entry Points
3917 *********************************************************************/
3919 static device_method_t ena_methods[] = { /* Device interface */
3920 DEVMETHOD(device_probe, ena_probe),
3921 DEVMETHOD(device_attach, ena_attach),
3922 DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END
3925 static driver_t ena_driver = {
3928 sizeof(struct ena_adapter),
3931 DRIVER_MODULE(ena, pci, ena_driver, 0, 0);
3932 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
3933 nitems(ena_vendor_info_array) - 1);
3934 MODULE_DEPEND(ena, pci, 1, 1, 1);
3935 MODULE_DEPEND(ena, ether, 1, 1, 1);
3937 MODULE_DEPEND(ena, netmap, 1, 1, 1);
3938 #endif /* DEV_NETMAP */
3940 /*********************************************************************/