4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
43 #include <sys/module.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/taskqueue.h>
51 #include <sys/eventhandler.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <machine/in_cksum.h>
58 #include <net/ethernet.h>
60 #include <net/if_var.h>
61 #include <net/if_arp.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64 #include <net/if_types.h>
65 #include <net/if_vlan_var.h>
67 #include <net/rss_config.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip6.h>
75 #include <netinet/tcp.h>
76 #include <netinet/udp.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcireg.h>
84 #include "ena_datapath.h"
86 #include "ena_sysctl.h"
89 #include "ena_netmap.h"
90 #endif /* DEV_NETMAP */
92 /*********************************************************
94 *********************************************************/
95 static int ena_probe(device_t);
96 static void ena_intr_msix_mgmnt(void *);
97 static void ena_free_pci_resources(struct ena_adapter *);
98 static int ena_change_mtu(if_t, int);
99 static inline void ena_alloc_counters(counter_u64_t *, int);
100 static inline void ena_free_counters(counter_u64_t *, int);
101 static inline void ena_reset_counters(counter_u64_t *, int);
102 static void ena_init_io_rings_common(struct ena_adapter *,
103 struct ena_ring *, uint16_t);
104 static void ena_init_io_rings_basic(struct ena_adapter *);
105 static void ena_init_io_rings_advanced(struct ena_adapter *);
106 static void ena_init_io_rings(struct ena_adapter *);
107 static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
108 static void ena_free_all_io_rings_resources(struct ena_adapter *);
109 static int ena_setup_tx_dma_tag(struct ena_adapter *);
110 static int ena_free_tx_dma_tag(struct ena_adapter *);
111 static int ena_setup_rx_dma_tag(struct ena_adapter *);
112 static int ena_free_rx_dma_tag(struct ena_adapter *);
113 static void ena_release_all_tx_dmamap(struct ena_ring *);
114 static int ena_setup_tx_resources(struct ena_adapter *, int);
115 static void ena_free_tx_resources(struct ena_adapter *, int);
116 static int ena_setup_all_tx_resources(struct ena_adapter *);
117 static void ena_free_all_tx_resources(struct ena_adapter *);
118 static int ena_setup_rx_resources(struct ena_adapter *, unsigned int);
119 static void ena_free_rx_resources(struct ena_adapter *, unsigned int);
120 static int ena_setup_all_rx_resources(struct ena_adapter *);
121 static void ena_free_all_rx_resources(struct ena_adapter *);
122 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
123 struct ena_rx_buffer *);
124 static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
125 struct ena_rx_buffer *);
126 static void ena_free_rx_bufs(struct ena_adapter *, unsigned int);
127 static void ena_refill_all_rx_bufs(struct ena_adapter *);
128 static void ena_free_all_rx_bufs(struct ena_adapter *);
129 static void ena_free_tx_bufs(struct ena_adapter *, unsigned int);
130 static void ena_free_all_tx_bufs(struct ena_adapter *);
131 static void ena_destroy_all_tx_queues(struct ena_adapter *);
132 static void ena_destroy_all_rx_queues(struct ena_adapter *);
133 static void ena_destroy_all_io_queues(struct ena_adapter *);
134 static int ena_create_io_queues(struct ena_adapter *);
135 static int ena_handle_msix(void *);
136 static int ena_enable_msix(struct ena_adapter *);
137 static void ena_setup_mgmnt_intr(struct ena_adapter *);
138 static int ena_setup_io_intr(struct ena_adapter *);
139 static int ena_request_mgmnt_irq(struct ena_adapter *);
140 static int ena_request_io_irq(struct ena_adapter *);
141 static void ena_free_mgmnt_irq(struct ena_adapter *);
142 static void ena_free_io_irq(struct ena_adapter *);
143 static void ena_free_irqs(struct ena_adapter*);
144 static void ena_disable_msix(struct ena_adapter *);
145 static void ena_unmask_all_io_irqs(struct ena_adapter *);
146 static int ena_rss_configure(struct ena_adapter *);
147 static int ena_up_complete(struct ena_adapter *);
148 static uint64_t ena_get_counter(if_t, ift_counter);
149 static int ena_media_change(if_t);
150 static void ena_media_status(if_t, struct ifmediareq *);
151 static void ena_init(void *);
152 static int ena_ioctl(if_t, u_long, caddr_t);
153 static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
154 static void ena_update_host_info(struct ena_admin_host_info *, if_t);
155 static void ena_update_hwassist(struct ena_adapter *);
156 static int ena_setup_ifnet(device_t, struct ena_adapter *,
157 struct ena_com_dev_get_features_ctx *);
158 static int ena_enable_wc(struct resource *);
159 static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
160 struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
161 static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
162 struct ena_com_dev_get_features_ctx *);
163 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
164 static int ena_rss_init_default(struct ena_adapter *);
165 static void ena_rss_init_default_deferred(void *);
166 static void ena_config_host_info(struct ena_com_dev *, device_t);
167 static int ena_attach(device_t);
168 static int ena_detach(device_t);
169 static int ena_device_init(struct ena_adapter *, device_t,
170 struct ena_com_dev_get_features_ctx *, int *);
171 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
172 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
173 static void unimplemented_aenq_handler(void *,
174 struct ena_admin_aenq_entry *);
175 static void ena_timer_service(void *);
177 static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
179 static ena_vendor_info_t ena_vendor_info_array[] = {
180 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
181 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0},
182 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
183 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0},
189 * Contains pointers to event handlers, e.g. link state chage.
191 static struct ena_aenq_handlers aenq_handlers;
194 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
198 *(bus_addr_t *) arg = segs[0].ds_addr;
202 ena_dma_alloc(device_t dmadev, bus_size_t size,
203 ena_mem_handle_t *dma, int mapflags, bus_size_t alignment)
205 struct ena_adapter* adapter = device_get_softc(dmadev);
207 uint64_t dma_space_addr;
210 maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
212 dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
213 if (unlikely(dma_space_addr == 0))
214 dma_space_addr = BUS_SPACE_MAXADDR;
216 error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
217 alignment, 0, /* alignment, bounds */
218 dma_space_addr, /* lowaddr of exclusion window */
219 BUS_SPACE_MAXADDR,/* highaddr of exclusion window */
220 NULL, NULL, /* filter, filterarg */
221 maxsize, /* maxsize */
223 maxsize, /* maxsegsize */
224 BUS_DMA_ALLOCNOW, /* flags */
228 if (unlikely(error != 0)) {
229 ena_trace(NULL, ENA_ALERT, "bus_dma_tag_create failed: %d\n", error);
233 error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
234 BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
235 if (unlikely(error != 0)) {
236 ena_trace(NULL, ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n",
237 (uintmax_t)size, error);
238 goto fail_map_create;
242 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
243 size, ena_dmamap_callback, &dma->paddr, mapflags);
244 if (unlikely((error != 0) || (dma->paddr == 0))) {
245 ena_trace(NULL, ENA_ALERT, ": bus_dmamap_load failed: %d\n", error);
249 bus_dmamap_sync(dma->tag, dma->map,
250 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
255 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
257 bus_dma_tag_destroy(dma->tag);
267 * This function should generate unique key for the whole driver.
268 * If the key was already genereated in the previous call (for example
269 * for another adapter), then it should be returned instead.
272 ena_rss_key_fill(void *key, size_t size)
274 static bool key_generated;
275 static uint8_t default_key[ENA_HASH_KEY_SIZE];
277 KASSERT(size <= ENA_HASH_KEY_SIZE, ("Requested more bytes than ENA RSS key can hold"));
279 if (!key_generated) {
280 arc4random_buf(default_key, ENA_HASH_KEY_SIZE);
281 key_generated = true;
284 memcpy(key, default_key, size);
288 ena_free_pci_resources(struct ena_adapter *adapter)
290 device_t pdev = adapter->pdev;
292 if (adapter->memory != NULL) {
293 bus_release_resource(pdev, SYS_RES_MEMORY,
294 PCIR_BAR(ENA_MEM_BAR), adapter->memory);
297 if (adapter->registers != NULL) {
298 bus_release_resource(pdev, SYS_RES_MEMORY,
299 PCIR_BAR(ENA_REG_BAR), adapter->registers);
304 ena_probe(device_t dev)
306 ena_vendor_info_t *ent;
307 char adapter_name[60];
308 uint16_t pci_vendor_id = 0;
309 uint16_t pci_device_id = 0;
311 pci_vendor_id = pci_get_vendor(dev);
312 pci_device_id = pci_get_device(dev);
314 ent = ena_vendor_info_array;
315 while (ent->vendor_id != 0) {
316 if ((pci_vendor_id == ent->vendor_id) &&
317 (pci_device_id == ent->device_id)) {
318 ena_trace(NULL, ENA_DBG, "vendor=%x device=%x\n",
319 pci_vendor_id, pci_device_id);
321 sprintf(adapter_name, DEVICE_DESC);
322 device_set_desc_copy(dev, adapter_name);
323 return (BUS_PROBE_DEFAULT);
334 ena_change_mtu(if_t ifp, int new_mtu)
336 struct ena_adapter *adapter = if_getsoftc(ifp);
339 if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
340 device_printf(adapter->pdev, "Invalid MTU setting. "
341 "new_mtu: %d max mtu: %d min mtu: %d\n",
342 new_mtu, adapter->max_mtu, ENA_MIN_MTU);
346 rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
347 if (likely(rc == 0)) {
348 ena_trace(NULL, ENA_DBG, "set MTU to %d\n", new_mtu);
349 if_setmtu(ifp, new_mtu);
351 device_printf(adapter->pdev, "Failed to set MTU to %d\n",
359 ena_alloc_counters(counter_u64_t *begin, int size)
361 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
363 for (; begin < end; ++begin)
364 *begin = counter_u64_alloc(M_WAITOK);
368 ena_free_counters(counter_u64_t *begin, int size)
370 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
372 for (; begin < end; ++begin)
373 counter_u64_free(*begin);
377 ena_reset_counters(counter_u64_t *begin, int size)
379 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
381 for (; begin < end; ++begin)
382 counter_u64_zero(*begin);
386 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
391 ring->adapter = adapter;
392 ring->ena_dev = adapter->ena_dev;
393 ring->first_interrupt = false;
394 ring->no_interrupt_event_cnt = 0;
398 ena_init_io_rings_basic(struct ena_adapter *adapter)
400 struct ena_com_dev *ena_dev;
401 struct ena_ring *txr, *rxr;
405 ena_dev = adapter->ena_dev;
407 for (i = 0; i < adapter->num_io_queues; i++) {
408 txr = &adapter->tx_ring[i];
409 rxr = &adapter->rx_ring[i];
411 /* TX/RX common ring state */
412 ena_init_io_rings_common(adapter, txr, i);
413 ena_init_io_rings_common(adapter, rxr, i);
415 /* TX specific ring state */
416 txr->tx_max_header_size = ena_dev->tx_max_header_size;
417 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
419 que = &adapter->que[i];
420 que->adapter = adapter;
428 rxr->empty_rx_queue = 0;
429 rxr->rx_mbuf_sz = ena_mbuf_sz;
434 ena_init_io_rings_advanced(struct ena_adapter *adapter)
436 struct ena_ring *txr, *rxr;
439 for (i = 0; i < adapter->num_io_queues; i++) {
440 txr = &adapter->tx_ring[i];
441 rxr = &adapter->rx_ring[i];
443 /* Allocate a buf ring */
444 txr->buf_ring_size = adapter->buf_ring_size;
445 txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF,
446 M_WAITOK, &txr->ring_mtx);
448 /* Allocate Tx statistics. */
449 ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
450 sizeof(txr->tx_stats));
452 /* Allocate Rx statistics. */
453 ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
454 sizeof(rxr->rx_stats));
456 /* Initialize locks */
457 snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
458 device_get_nameunit(adapter->pdev), i);
459 snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
460 device_get_nameunit(adapter->pdev), i);
462 mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
467 ena_init_io_rings(struct ena_adapter *adapter)
470 * IO rings initialization can be divided into the 2 steps:
471 * 1. Initialize variables and fields with initial values and copy
472 * them from adapter/ena_dev (basic)
473 * 2. Allocate mutex, counters and buf_ring (advanced)
475 ena_init_io_rings_basic(adapter);
476 ena_init_io_rings_advanced(adapter);
480 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
482 struct ena_ring *txr = &adapter->tx_ring[qid];
483 struct ena_ring *rxr = &adapter->rx_ring[qid];
485 ena_free_counters((counter_u64_t *)&txr->tx_stats,
486 sizeof(txr->tx_stats));
487 ena_free_counters((counter_u64_t *)&rxr->rx_stats,
488 sizeof(rxr->rx_stats));
490 ENA_RING_MTX_LOCK(txr);
491 drbr_free(txr->br, M_DEVBUF);
492 ENA_RING_MTX_UNLOCK(txr);
494 mtx_destroy(&txr->ring_mtx);
498 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
502 for (i = 0; i < adapter->num_io_queues; i++)
503 ena_free_io_ring_resources(adapter, i);
508 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
512 /* Create DMA tag for Tx buffers */
513 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
514 1, 0, /* alignment, bounds */
515 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
516 BUS_SPACE_MAXADDR, /* highaddr of excl window */
517 NULL, NULL, /* filter, filterarg */
518 ENA_TSO_MAXSIZE, /* maxsize */
519 adapter->max_tx_sgl_size - 1, /* nsegments */
520 ENA_TSO_MAXSIZE, /* maxsegsize */
523 NULL, /* lockfuncarg */
524 &adapter->tx_buf_tag);
530 ena_free_tx_dma_tag(struct ena_adapter *adapter)
534 ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
536 if (likely(ret == 0))
537 adapter->tx_buf_tag = NULL;
543 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
547 /* Create DMA tag for Rx buffers*/
548 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */
549 1, 0, /* alignment, bounds */
550 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
551 BUS_SPACE_MAXADDR, /* highaddr of excl window */
552 NULL, NULL, /* filter, filterarg */
553 ena_mbuf_sz, /* maxsize */
554 adapter->max_rx_sgl_size, /* nsegments */
555 ena_mbuf_sz, /* maxsegsize */
559 &adapter->rx_buf_tag);
565 ena_free_rx_dma_tag(struct ena_adapter *adapter)
569 ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
571 if (likely(ret == 0))
572 adapter->rx_buf_tag = NULL;
578 ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
580 struct ena_adapter *adapter = tx_ring->adapter;
581 struct ena_tx_buffer *tx_info;
582 bus_dma_tag_t tx_tag = adapter->tx_buf_tag;;
585 struct ena_netmap_tx_info *nm_info;
587 #endif /* DEV_NETMAP */
589 for (i = 0; i < tx_ring->ring_size; ++i) {
590 tx_info = &tx_ring->tx_buffer_info[i];
592 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
593 nm_info = &tx_info->nm_info;
594 for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
595 if (nm_info->map_seg[j] != NULL) {
596 bus_dmamap_destroy(tx_tag,
597 nm_info->map_seg[j]);
598 nm_info->map_seg[j] = NULL;
602 #endif /* DEV_NETMAP */
603 if (tx_info->dmamap != NULL) {
604 bus_dmamap_destroy(tx_tag, tx_info->dmamap);
605 tx_info->dmamap = NULL;
611 * ena_setup_tx_resources - allocate Tx resources (Descriptors)
612 * @adapter: network interface device structure
615 * Returns 0 on success, otherwise on failure.
618 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
620 struct ena_que *que = &adapter->que[qid];
621 struct ena_ring *tx_ring = que->tx_ring;
627 ena_netmap_reset_tx_ring(adapter, qid);
628 #endif /* DEV_NETMAP */
630 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
632 tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
633 if (unlikely(tx_ring->tx_buffer_info == NULL))
636 size = sizeof(uint16_t) * tx_ring->ring_size;
637 tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
638 if (unlikely(tx_ring->free_tx_ids == NULL))
639 goto err_buf_info_free;
641 size = tx_ring->tx_max_header_size;
642 tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
644 if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
645 goto err_tx_ids_free;
647 /* Req id stack for TX OOO completions */
648 for (i = 0; i < tx_ring->ring_size; i++)
649 tx_ring->free_tx_ids[i] = i;
651 /* Reset TX statistics. */
652 ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
653 sizeof(tx_ring->tx_stats));
655 tx_ring->next_to_use = 0;
656 tx_ring->next_to_clean = 0;
657 tx_ring->acum_pkts = 0;
659 /* Make sure that drbr is empty */
660 ENA_RING_MTX_LOCK(tx_ring);
661 drbr_flush(adapter->ifp, tx_ring->br);
662 ENA_RING_MTX_UNLOCK(tx_ring);
664 /* ... and create the buffer DMA maps */
665 for (i = 0; i < tx_ring->ring_size; i++) {
666 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
667 &tx_ring->tx_buffer_info[i].dmamap);
668 if (unlikely(err != 0)) {
669 ena_trace(NULL, ENA_ALERT,
670 "Unable to create Tx DMA map for buffer %d\n",
672 goto err_map_release;
676 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
677 map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
678 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
679 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
681 if (unlikely(err != 0)) {
682 ena_trace(NULL, ENA_ALERT, "Unable to create "
683 "Tx DMA for buffer %d %d\n", i, j);
684 goto err_map_release;
688 #endif /* DEV_NETMAP */
691 /* Allocate taskqueues */
692 TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
693 tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
694 taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
695 if (unlikely(tx_ring->enqueue_tq == NULL)) {
696 ena_trace(NULL, ENA_ALERT,
697 "Unable to create taskqueue for enqueue task\n");
698 i = tx_ring->ring_size;
699 goto err_map_release;
702 tx_ring->running = true;
704 taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET,
705 "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu);
710 ena_release_all_tx_dmamap(tx_ring);
712 free(tx_ring->free_tx_ids, M_DEVBUF);
713 tx_ring->free_tx_ids = NULL;
715 free(tx_ring->tx_buffer_info, M_DEVBUF);
716 tx_ring->tx_buffer_info = NULL;
722 * ena_free_tx_resources - Free Tx Resources per Queue
723 * @adapter: network interface device structure
726 * Free all transmit software resources
729 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
731 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
733 struct ena_netmap_tx_info *nm_info;
735 #endif /* DEV_NETMAP */
737 while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
739 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
741 taskqueue_free(tx_ring->enqueue_tq);
743 ENA_RING_MTX_LOCK(tx_ring);
744 /* Flush buffer ring, */
745 drbr_flush(adapter->ifp, tx_ring->br);
747 /* Free buffer DMA maps, */
748 for (int i = 0; i < tx_ring->ring_size; i++) {
749 bus_dmamap_sync(adapter->tx_buf_tag,
750 tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
751 bus_dmamap_unload(adapter->tx_buf_tag,
752 tx_ring->tx_buffer_info[i].dmamap);
753 bus_dmamap_destroy(adapter->tx_buf_tag,
754 tx_ring->tx_buffer_info[i].dmamap);
757 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
758 nm_info = &tx_ring->tx_buffer_info[i].nm_info;
759 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
760 if (nm_info->socket_buf_idx[j] != 0) {
761 bus_dmamap_sync(adapter->tx_buf_tag,
763 BUS_DMASYNC_POSTWRITE);
764 ena_netmap_unload(adapter,
765 nm_info->map_seg[j]);
767 bus_dmamap_destroy(adapter->tx_buf_tag,
768 nm_info->map_seg[j]);
769 nm_info->socket_buf_idx[j] = 0;
772 #endif /* DEV_NETMAP */
774 m_freem(tx_ring->tx_buffer_info[i].mbuf);
775 tx_ring->tx_buffer_info[i].mbuf = NULL;
777 ENA_RING_MTX_UNLOCK(tx_ring);
779 /* And free allocated memory. */
780 free(tx_ring->tx_buffer_info, M_DEVBUF);
781 tx_ring->tx_buffer_info = NULL;
783 free(tx_ring->free_tx_ids, M_DEVBUF);
784 tx_ring->free_tx_ids = NULL;
786 free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
787 tx_ring->push_buf_intermediate_buf = NULL;
791 * ena_setup_all_tx_resources - allocate all queues Tx resources
792 * @adapter: network interface device structure
794 * Returns 0 on success, otherwise on failure.
797 ena_setup_all_tx_resources(struct ena_adapter *adapter)
801 for (i = 0; i < adapter->num_io_queues; i++) {
802 rc = ena_setup_tx_resources(adapter, i);
804 device_printf(adapter->pdev,
805 "Allocation for Tx Queue %u failed\n", i);
813 /* Rewind the index freeing the rings as we go */
815 ena_free_tx_resources(adapter, i);
820 * ena_free_all_tx_resources - Free Tx Resources for All Queues
821 * @adapter: network interface device structure
823 * Free all transmit software resources
826 ena_free_all_tx_resources(struct ena_adapter *adapter)
830 for (i = 0; i < adapter->num_io_queues; i++)
831 ena_free_tx_resources(adapter, i);
835 * ena_setup_rx_resources - allocate Rx resources (Descriptors)
836 * @adapter: network interface device structure
839 * Returns 0 on success, otherwise on failure.
842 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
844 struct ena_que *que = &adapter->que[qid];
845 struct ena_ring *rx_ring = que->rx_ring;
848 size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
851 ena_netmap_reset_rx_ring(adapter, qid);
852 rx_ring->initialized = false;
853 #endif /* DEV_NETMAP */
856 * Alloc extra element so in rx path
857 * we can always prefetch rx_info + 1
859 size += sizeof(struct ena_rx_buffer);
861 rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
863 size = sizeof(uint16_t) * rx_ring->ring_size;
864 rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
866 for (i = 0; i < rx_ring->ring_size; i++)
867 rx_ring->free_rx_ids[i] = i;
869 /* Reset RX statistics. */
870 ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
871 sizeof(rx_ring->rx_stats));
873 rx_ring->next_to_clean = 0;
874 rx_ring->next_to_use = 0;
876 /* ... and create the buffer DMA maps */
877 for (i = 0; i < rx_ring->ring_size; i++) {
878 err = bus_dmamap_create(adapter->rx_buf_tag, 0,
879 &(rx_ring->rx_buffer_info[i].map));
881 ena_trace(NULL, ENA_ALERT,
882 "Unable to create Rx DMA map for buffer %d\n", i);
883 goto err_buf_info_unmap;
887 /* Create LRO for the ring */
888 if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) {
889 int err = tcp_lro_init(&rx_ring->lro);
891 device_printf(adapter->pdev,
892 "LRO[%d] Initialization failed!\n", qid);
894 ena_trace(NULL, ENA_INFO,
895 "RX Soft LRO[%d] Initialized\n", qid);
896 rx_ring->lro.ifp = adapter->ifp;
904 bus_dmamap_destroy(adapter->rx_buf_tag,
905 rx_ring->rx_buffer_info[i].map);
908 free(rx_ring->free_rx_ids, M_DEVBUF);
909 rx_ring->free_rx_ids = NULL;
910 free(rx_ring->rx_buffer_info, M_DEVBUF);
911 rx_ring->rx_buffer_info = NULL;
916 * ena_free_rx_resources - Free Rx Resources
917 * @adapter: network interface device structure
920 * Free all receive software resources
923 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
925 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
927 /* Free buffer DMA maps, */
928 for (int i = 0; i < rx_ring->ring_size; i++) {
929 bus_dmamap_sync(adapter->rx_buf_tag,
930 rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
931 m_freem(rx_ring->rx_buffer_info[i].mbuf);
932 rx_ring->rx_buffer_info[i].mbuf = NULL;
933 bus_dmamap_unload(adapter->rx_buf_tag,
934 rx_ring->rx_buffer_info[i].map);
935 bus_dmamap_destroy(adapter->rx_buf_tag,
936 rx_ring->rx_buffer_info[i].map);
939 /* free LRO resources, */
940 tcp_lro_free(&rx_ring->lro);
942 /* free allocated memory */
943 free(rx_ring->rx_buffer_info, M_DEVBUF);
944 rx_ring->rx_buffer_info = NULL;
946 free(rx_ring->free_rx_ids, M_DEVBUF);
947 rx_ring->free_rx_ids = NULL;
951 * ena_setup_all_rx_resources - allocate all queues Rx resources
952 * @adapter: network interface device structure
954 * Returns 0 on success, otherwise on failure.
957 ena_setup_all_rx_resources(struct ena_adapter *adapter)
961 for (i = 0; i < adapter->num_io_queues; i++) {
962 rc = ena_setup_rx_resources(adapter, i);
964 device_printf(adapter->pdev,
965 "Allocation for Rx Queue %u failed\n", i);
972 /* rewind the index freeing the rings as we go */
974 ena_free_rx_resources(adapter, i);
979 * ena_free_all_rx_resources - Free Rx resources for all queues
980 * @adapter: network interface device structure
982 * Free all receive software resources
985 ena_free_all_rx_resources(struct ena_adapter *adapter)
989 for (i = 0; i < adapter->num_io_queues; i++)
990 ena_free_rx_resources(adapter, i);
994 ena_alloc_rx_mbuf(struct ena_adapter *adapter,
995 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
997 struct ena_com_buf *ena_buf;
998 bus_dma_segment_t segs[1];
1002 /* if previous allocated frag is not used */
1003 if (unlikely(rx_info->mbuf != NULL))
1006 /* Get mbuf using UMA allocator */
1007 rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1008 rx_ring->rx_mbuf_sz);
1010 if (unlikely(rx_info->mbuf == NULL)) {
1011 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1012 rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1013 if (unlikely(rx_info->mbuf == NULL)) {
1014 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1019 mlen = rx_ring->rx_mbuf_sz;
1021 /* Set mbuf length*/
1022 rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1024 /* Map packets for DMA */
1025 ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH,
1026 "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
1027 adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
1028 error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
1029 rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1030 if (unlikely((error != 0) || (nsegs != 1))) {
1031 ena_trace(NULL, ENA_WARNING, "failed to map mbuf, error: %d, "
1032 "nsegs: %d\n", error, nsegs);
1033 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1038 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
1040 ena_buf = &rx_info->ena_buf;
1041 ena_buf->paddr = segs[0].ds_addr;
1042 ena_buf->len = mlen;
1044 ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH,
1045 "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
1046 rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
1051 m_freem(rx_info->mbuf);
1052 rx_info->mbuf = NULL;
1057 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1058 struct ena_rx_buffer *rx_info)
1061 if (rx_info->mbuf == NULL) {
1062 ena_trace(NULL, ENA_WARNING, "Trying to free unallocated buffer\n");
1066 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1067 BUS_DMASYNC_POSTREAD);
1068 bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1069 m_freem(rx_info->mbuf);
1070 rx_info->mbuf = NULL;
1074 * ena_refill_rx_bufs - Refills ring with descriptors
1075 * @rx_ring: the ring which we want to feed with free descriptors
1076 * @num: number of descriptors to refill
1077 * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1080 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1082 struct ena_adapter *adapter = rx_ring->adapter;
1083 uint16_t next_to_use, req_id;
1087 ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n",
1090 next_to_use = rx_ring->next_to_use;
1092 for (i = 0; i < num; i++) {
1093 struct ena_rx_buffer *rx_info;
1095 ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC,
1096 "RX buffer - next to use: %d\n", next_to_use);
1098 req_id = rx_ring->free_rx_ids[next_to_use];
1099 rx_info = &rx_ring->rx_buffer_info[req_id];
1101 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1102 rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info);
1104 #endif /* DEV_NETMAP */
1105 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1106 if (unlikely(rc != 0)) {
1107 ena_trace(NULL, ENA_WARNING,
1108 "failed to alloc buffer for rx queue %d\n",
1112 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1113 &rx_info->ena_buf, req_id);
1114 if (unlikely(rc != 0)) {
1115 ena_trace(NULL, ENA_WARNING,
1116 "failed to add buffer for rx queue %d\n",
1120 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1121 rx_ring->ring_size);
1124 if (unlikely(i < num)) {
1125 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1126 ena_trace(NULL, ENA_WARNING,
1127 "refilled rx qid %d with only %d mbufs (from %d)\n",
1128 rx_ring->qid, i, num);
1132 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1134 rx_ring->next_to_use = next_to_use;
1139 ena_update_buf_ring_size(struct ena_adapter *adapter,
1140 uint32_t new_buf_ring_size)
1142 uint32_t old_buf_ring_size;
1146 ENA_LOCK_LOCK(adapter);
1148 old_buf_ring_size = adapter->buf_ring_size;
1149 adapter->buf_ring_size = new_buf_ring_size;
1151 dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1154 /* Reconfigure buf ring for all Tx rings. */
1155 ena_free_all_io_rings_resources(adapter);
1156 ena_init_io_rings_advanced(adapter);
1159 * If ena_up() fails, it's not because of recent buf_ring size
1160 * changes. Because of that, we just want to revert old drbr
1161 * value and trigger the reset because something else had to
1164 rc = ena_up(adapter);
1165 if (unlikely(rc != 0)) {
1166 device_printf(adapter->pdev,
1167 "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
1168 new_buf_ring_size, old_buf_ring_size);
1170 /* Revert old size and trigger the reset */
1171 adapter->buf_ring_size = old_buf_ring_size;
1172 ena_free_all_io_rings_resources(adapter);
1173 ena_init_io_rings_advanced(adapter);
1175 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
1177 ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
1182 ENA_LOCK_UNLOCK(adapter);
1188 ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
1189 uint32_t new_rx_size)
1191 uint32_t old_tx_size, old_rx_size;
1195 ENA_LOCK_LOCK(adapter);
1197 old_tx_size = adapter->requested_tx_ring_size;
1198 old_rx_size = adapter->requested_rx_ring_size;
1199 adapter->requested_tx_ring_size = new_tx_size;
1200 adapter->requested_rx_ring_size = new_rx_size;
1202 dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1205 /* Configure queues with new size. */
1206 ena_init_io_rings_basic(adapter);
1208 rc = ena_up(adapter);
1209 if (unlikely(rc != 0)) {
1210 device_printf(adapter->pdev,
1211 "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
1212 new_tx_size, new_rx_size, old_tx_size, old_rx_size);
1214 /* Revert old size. */
1215 adapter->requested_tx_ring_size = old_tx_size;
1216 adapter->requested_rx_ring_size = old_rx_size;
1217 ena_init_io_rings_basic(adapter);
1219 /* And try again. */
1220 rc = ena_up(adapter);
1221 if (unlikely(rc != 0)) {
1222 device_printf(adapter->pdev,
1223 "Failed to revert old queue sizes. Triggering device reset.\n");
1225 * If we've failed again, something had to go
1226 * wrong. After reset, the device should try to
1229 ENA_FLAG_SET_ATOMIC(
1230 ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1231 ena_trigger_reset(adapter,
1232 ENA_REGS_RESET_OS_TRIGGER);
1237 ENA_LOCK_UNLOCK(adapter);
1243 ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
1245 ena_free_all_io_rings_resources(adapter);
1246 /* Force indirection table to be reinitialized */
1247 ena_com_rss_destroy(adapter->ena_dev);
1249 adapter->num_io_queues = num;
1250 ena_init_io_rings(adapter);
1253 /* Caller should sanitize new_num */
1255 ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
1261 ENA_LOCK_LOCK(adapter);
1263 dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1264 old_num = adapter->num_io_queues;
1267 ena_update_io_rings(adapter, new_num);
1270 rc = ena_up(adapter);
1271 if (unlikely(rc != 0)) {
1272 device_printf(adapter->pdev,
1273 "Failed to configure device with %u IO queues. "
1274 "Reverting to previous value: %u\n",
1277 ena_update_io_rings(adapter, old_num);
1279 rc = ena_up(adapter);
1280 if (unlikely(rc != 0)) {
1281 device_printf(adapter->pdev,
1282 "Failed to revert to previous setup IO "
1283 "queues. Triggering device reset.\n");
1284 ENA_FLAG_SET_ATOMIC(
1285 ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1286 ena_trigger_reset(adapter,
1287 ENA_REGS_RESET_OS_TRIGGER);
1292 ENA_LOCK_UNLOCK(adapter);
1298 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1300 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1303 for (i = 0; i < rx_ring->ring_size; i++) {
1304 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1306 if (rx_info->mbuf != NULL)
1307 ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1309 if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
1310 (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1311 if (rx_info->netmap_buf_idx != 0)
1312 ena_netmap_free_rx_slot(adapter, rx_ring,
1315 #endif /* DEV_NETMAP */
1320 * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1321 * @adapter: network interface device structure
1325 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1327 struct ena_ring *rx_ring;
1328 int i, rc, bufs_num;
1330 for (i = 0; i < adapter->num_io_queues; i++) {
1331 rx_ring = &adapter->rx_ring[i];
1332 bufs_num = rx_ring->ring_size - 1;
1333 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1334 if (unlikely(rc != bufs_num))
1335 ena_trace(NULL, ENA_WARNING, "refilling Queue %d failed. "
1336 "Allocated %d buffers from: %d\n", i, rc, bufs_num);
1338 rx_ring->initialized = true;
1339 #endif /* DEV_NETMAP */
1344 ena_free_all_rx_bufs(struct ena_adapter *adapter)
1348 for (i = 0; i < adapter->num_io_queues; i++)
1349 ena_free_rx_bufs(adapter, i);
1353 * ena_free_tx_bufs - Free Tx Buffers per Queue
1354 * @adapter: network interface device structure
1358 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1360 bool print_once = true;
1361 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1363 ENA_RING_MTX_LOCK(tx_ring);
1364 for (int i = 0; i < tx_ring->ring_size; i++) {
1365 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1367 if (tx_info->mbuf == NULL)
1371 device_printf(adapter->pdev,
1372 "free uncompleted tx mbuf qid %d idx 0x%x\n",
1376 ena_trace(NULL, ENA_DBG,
1377 "free uncompleted tx mbuf qid %d idx 0x%x\n",
1381 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1382 BUS_DMASYNC_POSTWRITE);
1383 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1385 m_free(tx_info->mbuf);
1386 tx_info->mbuf = NULL;
1388 ENA_RING_MTX_UNLOCK(tx_ring);
1392 ena_free_all_tx_bufs(struct ena_adapter *adapter)
1395 for (int i = 0; i < adapter->num_io_queues; i++)
1396 ena_free_tx_bufs(adapter, i);
1400 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1405 for (i = 0; i < adapter->num_io_queues; i++) {
1406 ena_qid = ENA_IO_TXQ_IDX(i);
1407 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1412 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1417 for (i = 0; i < adapter->num_io_queues; i++) {
1418 ena_qid = ENA_IO_RXQ_IDX(i);
1419 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1424 ena_destroy_all_io_queues(struct ena_adapter *adapter)
1426 struct ena_que *queue;
1429 for (i = 0; i < adapter->num_io_queues; i++) {
1430 queue = &adapter->que[i];
1431 while (taskqueue_cancel(queue->cleanup_tq,
1432 &queue->cleanup_task, NULL))
1433 taskqueue_drain(queue->cleanup_tq,
1434 &queue->cleanup_task);
1435 taskqueue_free(queue->cleanup_tq);
1438 ena_destroy_all_tx_queues(adapter);
1439 ena_destroy_all_rx_queues(adapter);
1443 ena_create_io_queues(struct ena_adapter *adapter)
1445 struct ena_com_dev *ena_dev = adapter->ena_dev;
1446 struct ena_com_create_io_ctx ctx;
1447 struct ena_ring *ring;
1448 struct ena_que *queue;
1450 uint32_t msix_vector;
1453 /* Create TX queues */
1454 for (i = 0; i < adapter->num_io_queues; i++) {
1455 msix_vector = ENA_IO_IRQ_IDX(i);
1456 ena_qid = ENA_IO_TXQ_IDX(i);
1457 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1458 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1459 ctx.queue_size = adapter->requested_tx_ring_size;
1460 ctx.msix_vector = msix_vector;
1462 rc = ena_com_create_io_queue(ena_dev, &ctx);
1464 device_printf(adapter->pdev,
1465 "Failed to create io TX queue #%d rc: %d\n", i, rc);
1468 ring = &adapter->tx_ring[i];
1469 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1470 &ring->ena_com_io_sq,
1471 &ring->ena_com_io_cq);
1473 device_printf(adapter->pdev,
1474 "Failed to get TX queue handlers. TX queue num"
1475 " %d rc: %d\n", i, rc);
1476 ena_com_destroy_io_queue(ena_dev, ena_qid);
1481 /* Create RX queues */
1482 for (i = 0; i < adapter->num_io_queues; i++) {
1483 msix_vector = ENA_IO_IRQ_IDX(i);
1484 ena_qid = ENA_IO_RXQ_IDX(i);
1485 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1486 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1487 ctx.queue_size = adapter->requested_rx_ring_size;
1488 ctx.msix_vector = msix_vector;
1490 rc = ena_com_create_io_queue(ena_dev, &ctx);
1491 if (unlikely(rc != 0)) {
1492 device_printf(adapter->pdev,
1493 "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1497 ring = &adapter->rx_ring[i];
1498 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1499 &ring->ena_com_io_sq,
1500 &ring->ena_com_io_cq);
1501 if (unlikely(rc != 0)) {
1502 device_printf(adapter->pdev,
1503 "Failed to get RX queue handlers. RX queue num"
1504 " %d rc: %d\n", i, rc);
1505 ena_com_destroy_io_queue(ena_dev, ena_qid);
1510 for (i = 0; i < adapter->num_io_queues; i++) {
1511 queue = &adapter->que[i];
1513 NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
1514 queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
1515 M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
1517 taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET,
1518 "%s queue %d cleanup",
1519 device_get_nameunit(adapter->pdev), i);
1526 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1527 i = adapter->num_io_queues;
1530 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1535 /*********************************************************************
1537 * MSIX & Interrupt Service routine
1539 **********************************************************************/
1542 * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1543 * @arg: interrupt number
1546 ena_intr_msix_mgmnt(void *arg)
1548 struct ena_adapter *adapter = (struct ena_adapter *)arg;
1550 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1551 if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
1552 ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1556 * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1560 ena_handle_msix(void *arg)
1562 struct ena_que *queue = arg;
1563 struct ena_adapter *adapter = queue->adapter;
1564 if_t ifp = adapter->ifp;
1566 if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1567 return (FILTER_STRAY);
1569 taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
1571 return (FILTER_HANDLED);
1575 ena_enable_msix(struct ena_adapter *adapter)
1577 device_t dev = adapter->pdev;
1578 int msix_vecs, msix_req;
1581 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1582 device_printf(dev, "Error, MSI-X is already enabled\n");
1586 /* Reserved the max msix vectors we might need */
1587 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1589 adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1590 M_DEVBUF, M_WAITOK | M_ZERO);
1592 ena_trace(NULL, ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
1594 for (i = 0; i < msix_vecs; i++) {
1595 adapter->msix_entries[i].entry = i;
1596 /* Vectors must start from 1 */
1597 adapter->msix_entries[i].vector = i + 1;
1600 msix_req = msix_vecs;
1601 rc = pci_alloc_msix(dev, &msix_vecs);
1602 if (unlikely(rc != 0)) {
1604 "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
1610 if (msix_vecs != msix_req) {
1611 if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
1613 "Not enough number of MSI-x allocated: %d\n",
1615 pci_release_msi(dev);
1619 device_printf(dev, "Enable only %d MSI-x (out of %d), reduce "
1620 "the number of queues\n", msix_vecs, msix_req);
1623 adapter->msix_vecs = msix_vecs;
1624 ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1629 free(adapter->msix_entries, M_DEVBUF);
1630 adapter->msix_entries = NULL;
1636 ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1639 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1640 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1641 device_get_nameunit(adapter->pdev));
1643 * Handler is NULL on purpose, it will be set
1644 * when mgmnt interrupt is acquired
1646 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1647 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1648 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1649 adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1653 ena_setup_io_intr(struct ena_adapter *adapter)
1655 static int last_bind_cpu = -1;
1658 if (adapter->msix_entries == NULL)
1661 for (int i = 0; i < adapter->num_io_queues; i++) {
1662 irq_idx = ENA_IO_IRQ_IDX(i);
1664 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1665 "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1666 adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1667 adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1668 adapter->irq_tbl[irq_idx].vector =
1669 adapter->msix_entries[irq_idx].vector;
1670 ena_trace(NULL, ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n",
1671 adapter->msix_entries[irq_idx].vector);
1674 * We want to bind rings to the corresponding cpu
1675 * using something similar to the RSS round-robin technique.
1677 if (unlikely(last_bind_cpu < 0))
1678 last_bind_cpu = CPU_FIRST();
1679 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1681 last_bind_cpu = CPU_NEXT(last_bind_cpu);
1688 ena_request_mgmnt_irq(struct ena_adapter *adapter)
1690 struct ena_irq *irq;
1691 unsigned long flags;
1694 flags = RF_ACTIVE | RF_SHAREABLE;
1696 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1697 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1698 &irq->vector, flags);
1700 if (unlikely(irq->res == NULL)) {
1701 device_printf(adapter->pdev, "could not allocate "
1702 "irq vector: %d\n", irq->vector);
1706 rc = bus_setup_intr(adapter->pdev, irq->res,
1707 INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt,
1708 irq->data, &irq->cookie);
1709 if (unlikely(rc != 0)) {
1710 device_printf(adapter->pdev, "failed to register "
1711 "interrupt handler for irq %ju: %d\n",
1712 rman_get_start(irq->res), rc);
1715 irq->requested = true;
1720 ena_trace(NULL, ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n",
1722 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1723 irq->vector, irq->res);
1724 if (unlikely(rcc != 0))
1725 device_printf(adapter->pdev, "dev has no parent while "
1726 "releasing res for irq: %d\n", irq->vector);
1733 ena_request_io_irq(struct ena_adapter *adapter)
1735 struct ena_irq *irq;
1736 unsigned long flags = 0;
1739 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
1740 device_printf(adapter->pdev,
1741 "failed to request I/O IRQ: MSI-X is not enabled\n");
1744 flags = RF_ACTIVE | RF_SHAREABLE;
1747 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1748 irq = &adapter->irq_tbl[i];
1750 if (unlikely(irq->requested))
1753 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1754 &irq->vector, flags);
1755 if (unlikely(irq->res == NULL)) {
1757 device_printf(adapter->pdev, "could not allocate "
1758 "irq vector: %d\n", irq->vector);
1762 rc = bus_setup_intr(adapter->pdev, irq->res,
1763 INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL,
1764 irq->data, &irq->cookie);
1765 if (unlikely(rc != 0)) {
1766 device_printf(adapter->pdev, "failed to register "
1767 "interrupt handler for irq %ju: %d\n",
1768 rman_get_start(irq->res), rc);
1771 irq->requested = true;
1773 ena_trace(NULL, ENA_INFO, "queue %d - cpu %d\n",
1774 i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1781 for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1782 irq = &adapter->irq_tbl[i];
1785 /* Once we entered err: section and irq->requested is true we
1786 free both intr and resources */
1788 rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1789 if (unlikely(rcc != 0))
1790 device_printf(adapter->pdev, "could not release"
1791 " irq: %d, error: %d\n", irq->vector, rcc);
1793 /* If we entred err: section without irq->requested set we know
1794 it was bus_alloc_resource_any() that needs cleanup, provided
1795 res is not NULL. In case res is NULL no work in needed in
1798 if (irq->res != NULL) {
1799 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1800 irq->vector, irq->res);
1802 if (unlikely(rcc != 0))
1803 device_printf(adapter->pdev, "dev has no parent while "
1804 "releasing res for irq: %d\n", irq->vector);
1805 irq->requested = false;
1813 ena_free_mgmnt_irq(struct ena_adapter *adapter)
1815 struct ena_irq *irq;
1818 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1819 if (irq->requested) {
1820 ena_trace(NULL, ENA_INFO | ENA_ADMQ, "tear down irq: %d\n",
1822 rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1823 if (unlikely(rc != 0))
1824 device_printf(adapter->pdev, "failed to tear "
1825 "down irq: %d\n", irq->vector);
1829 if (irq->res != NULL) {
1830 ena_trace(NULL, ENA_INFO | ENA_ADMQ, "release resource irq: %d\n",
1832 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1833 irq->vector, irq->res);
1835 if (unlikely(rc != 0))
1836 device_printf(adapter->pdev, "dev has no parent while "
1837 "releasing res for irq: %d\n", irq->vector);
1842 ena_free_io_irq(struct ena_adapter *adapter)
1844 struct ena_irq *irq;
1847 for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1848 irq = &adapter->irq_tbl[i];
1849 if (irq->requested) {
1850 ena_trace(NULL, ENA_INFO | ENA_IOQ, "tear down irq: %d\n",
1852 rc = bus_teardown_intr(adapter->pdev, irq->res,
1854 if (unlikely(rc != 0)) {
1855 device_printf(adapter->pdev, "failed to tear "
1856 "down irq: %d\n", irq->vector);
1861 if (irq->res != NULL) {
1862 ena_trace(NULL, ENA_INFO | ENA_IOQ, "release resource irq: %d\n",
1864 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1865 irq->vector, irq->res);
1867 if (unlikely(rc != 0)) {
1868 device_printf(adapter->pdev, "dev has no parent"
1869 " while releasing res for irq: %d\n",
1877 ena_free_irqs(struct ena_adapter* adapter)
1880 ena_free_io_irq(adapter);
1881 ena_free_mgmnt_irq(adapter);
1882 ena_disable_msix(adapter);
1886 ena_disable_msix(struct ena_adapter *adapter)
1889 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1890 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1891 pci_release_msi(adapter->pdev);
1894 adapter->msix_vecs = 0;
1895 if (adapter->msix_entries != NULL)
1896 free(adapter->msix_entries, M_DEVBUF);
1897 adapter->msix_entries = NULL;
1901 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
1903 struct ena_com_io_cq* io_cq;
1904 struct ena_eth_io_intr_reg intr_reg;
1908 /* Unmask interrupts for all queues */
1909 for (i = 0; i < adapter->num_io_queues; i++) {
1910 ena_qid = ENA_IO_TXQ_IDX(i);
1911 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1912 ena_com_update_intr_reg(&intr_reg, 0, 0, true);
1913 ena_com_unmask_intr(io_cq, &intr_reg);
1917 /* Configure the Rx forwarding */
1919 ena_rss_configure(struct ena_adapter *adapter)
1921 struct ena_com_dev *ena_dev = adapter->ena_dev;
1924 /* In case the RSS table was destroyed */
1925 if (!ena_dev->rss.tbl_log_size) {
1926 rc = ena_rss_init_default(adapter);
1927 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
1928 device_printf(adapter->pdev,
1929 "WARNING: RSS was not properly re-initialized,"
1930 " it will affect bandwidth\n");
1931 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
1936 /* Set indirect table */
1937 rc = ena_com_indirect_table_set(ena_dev);
1938 if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1941 /* Configure hash function (if supported) */
1942 rc = ena_com_set_hash_function(ena_dev);
1943 if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1946 /* Configure hash inputs (if supported) */
1947 rc = ena_com_set_hash_ctrl(ena_dev);
1948 if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1955 ena_up_complete(struct ena_adapter *adapter)
1959 if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
1960 rc = ena_rss_configure(adapter);
1962 device_printf(adapter->pdev,
1963 "Failed to configure RSS\n");
1968 rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
1969 if (unlikely(rc != 0))
1972 ena_refill_all_rx_bufs(adapter);
1973 ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
1974 sizeof(adapter->hw_stats));
1980 set_io_rings_size(struct ena_adapter *adapter, int new_tx_size,
1985 for (i = 0; i < adapter->num_io_queues; i++) {
1986 adapter->tx_ring[i].ring_size = new_tx_size;
1987 adapter->rx_ring[i].ring_size = new_rx_size;
1992 create_queues_with_size_backoff(struct ena_adapter *adapter)
1995 uint32_t cur_rx_ring_size, cur_tx_ring_size;
1996 uint32_t new_rx_ring_size, new_tx_ring_size;
1999 * Current queue sizes might be set to smaller than the requested
2000 * ones due to past queue allocation failures.
2002 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2003 adapter->requested_rx_ring_size);
2006 /* Allocate transmit descriptors */
2007 rc = ena_setup_all_tx_resources(adapter);
2008 if (unlikely(rc != 0)) {
2009 ena_trace(NULL, ENA_ALERT, "err_setup_tx\n");
2013 /* Allocate receive descriptors */
2014 rc = ena_setup_all_rx_resources(adapter);
2015 if (unlikely(rc != 0)) {
2016 ena_trace(NULL, ENA_ALERT, "err_setup_rx\n");
2020 /* Create IO queues for Rx & Tx */
2021 rc = ena_create_io_queues(adapter);
2022 if (unlikely(rc != 0)) {
2023 ena_trace(NULL, ENA_ALERT,
2024 "create IO queues failed\n");
2031 ena_free_all_rx_resources(adapter);
2033 ena_free_all_tx_resources(adapter);
2036 * Lower the ring size if ENOMEM. Otherwise, return the
2037 * error straightaway.
2039 if (unlikely(rc != ENOMEM)) {
2040 ena_trace(NULL, ENA_ALERT,
2041 "Queue creation failed with error code: %d\n", rc);
2045 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2046 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2048 device_printf(adapter->pdev,
2049 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2050 cur_tx_ring_size, cur_rx_ring_size);
2052 new_tx_ring_size = cur_tx_ring_size;
2053 new_rx_ring_size = cur_rx_ring_size;
2056 * Decrease the size of a larger queue, or decrease both if they are
2059 if (cur_rx_ring_size <= cur_tx_ring_size)
2060 new_tx_ring_size = cur_tx_ring_size / 2;
2061 if (cur_rx_ring_size >= cur_tx_ring_size)
2062 new_rx_ring_size = cur_rx_ring_size / 2;
2064 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2065 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2066 device_printf(adapter->pdev,
2067 "Queue creation failed with the smallest possible queue size"
2068 "of %d for both queues. Not retrying with smaller queues\n",
2073 set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
2078 ena_up(struct ena_adapter *adapter)
2082 if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2083 device_printf(adapter->pdev, "device is not attached!\n");
2087 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2090 device_printf(adapter->pdev, "device is going UP\n");
2092 /* setup interrupts for IO queues */
2093 rc = ena_setup_io_intr(adapter);
2094 if (unlikely(rc != 0)) {
2095 ena_trace(NULL, ENA_ALERT, "error setting up IO interrupt\n");
2098 rc = ena_request_io_irq(adapter);
2099 if (unlikely(rc != 0)) {
2100 ena_trace(NULL, ENA_ALERT, "err_req_irq\n");
2104 device_printf(adapter->pdev,
2105 "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, "
2107 adapter->num_io_queues,
2108 adapter->requested_rx_ring_size,
2109 adapter->requested_tx_ring_size,
2110 (adapter->ena_dev->tx_mem_queue_type ==
2111 ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED");
2113 rc = create_queues_with_size_backoff(adapter);
2114 if (unlikely(rc != 0)) {
2115 ena_trace(NULL, ENA_ALERT,
2116 "error creating queues with size backoff\n");
2117 goto err_create_queues_with_backoff;
2120 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
2121 if_link_state_change(adapter->ifp, LINK_STATE_UP);
2123 rc = ena_up_complete(adapter);
2124 if (unlikely(rc != 0))
2125 goto err_up_complete;
2127 counter_u64_add(adapter->dev_stats.interface_up, 1);
2129 ena_update_hwassist(adapter);
2131 if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING,
2134 /* Activate timer service only if the device is running.
2135 * If this flag is not set, it means that the driver is being
2136 * reset and timer service will be activated afterwards.
2138 if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) {
2139 callout_reset_sbt(&adapter->timer_service, SBT_1S,
2140 SBT_1S, ena_timer_service, (void *)adapter, 0);
2143 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2145 ena_unmask_all_io_irqs(adapter);
2150 ena_destroy_all_io_queues(adapter);
2151 ena_free_all_rx_resources(adapter);
2152 ena_free_all_tx_resources(adapter);
2153 err_create_queues_with_backoff:
2154 ena_free_io_irq(adapter);
2160 ena_get_counter(if_t ifp, ift_counter cnt)
2162 struct ena_adapter *adapter;
2163 struct ena_hw_stats *stats;
2165 adapter = if_getsoftc(ifp);
2166 stats = &adapter->hw_stats;
2169 case IFCOUNTER_IPACKETS:
2170 return (counter_u64_fetch(stats->rx_packets));
2171 case IFCOUNTER_OPACKETS:
2172 return (counter_u64_fetch(stats->tx_packets));
2173 case IFCOUNTER_IBYTES:
2174 return (counter_u64_fetch(stats->rx_bytes));
2175 case IFCOUNTER_OBYTES:
2176 return (counter_u64_fetch(stats->tx_bytes));
2177 case IFCOUNTER_IQDROPS:
2178 return (counter_u64_fetch(stats->rx_drops));
2179 case IFCOUNTER_OQDROPS:
2180 return (counter_u64_fetch(stats->tx_drops));
2182 return (if_get_counter_default(ifp, cnt));
2187 ena_media_change(if_t ifp)
2189 /* Media Change is not supported by firmware */
2194 ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2196 struct ena_adapter *adapter = if_getsoftc(ifp);
2197 ena_trace(NULL, ENA_DBG, "enter\n");
2199 ENA_LOCK_LOCK(adapter);
2201 ifmr->ifm_status = IFM_AVALID;
2202 ifmr->ifm_active = IFM_ETHER;
2204 if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
2205 ENA_LOCK_UNLOCK(adapter);
2206 ena_trace(NULL, ENA_INFO, "Link is down\n");
2210 ifmr->ifm_status |= IFM_ACTIVE;
2211 ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
2213 ENA_LOCK_UNLOCK(adapter);
2219 struct ena_adapter *adapter = (struct ena_adapter *)arg;
2221 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
2222 ENA_LOCK_LOCK(adapter);
2224 ENA_LOCK_UNLOCK(adapter);
2229 ena_ioctl(if_t ifp, u_long command, caddr_t data)
2231 struct ena_adapter *adapter;
2235 adapter = ifp->if_softc;
2236 ifr = (struct ifreq *)data;
2239 * Acquiring lock to prevent from running up and down routines parallel.
2244 if (ifp->if_mtu == ifr->ifr_mtu)
2246 ENA_LOCK_LOCK(adapter);
2249 ena_change_mtu(ifp, ifr->ifr_mtu);
2251 rc = ena_up(adapter);
2252 ENA_LOCK_UNLOCK(adapter);
2256 if ((ifp->if_flags & IFF_UP) != 0) {
2257 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2258 if ((ifp->if_flags & (IFF_PROMISC |
2259 IFF_ALLMULTI)) != 0) {
2260 device_printf(adapter->pdev,
2261 "ioctl promisc/allmulti\n");
2264 ENA_LOCK_LOCK(adapter);
2265 rc = ena_up(adapter);
2266 ENA_LOCK_UNLOCK(adapter);
2269 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2270 ENA_LOCK_LOCK(adapter);
2272 ENA_LOCK_UNLOCK(adapter);
2283 rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2290 if (ifr->ifr_reqcap != ifp->if_capenable) {
2291 ifp->if_capenable = ifr->ifr_reqcap;
2295 if ((reinit != 0) &&
2296 ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
2297 ENA_LOCK_LOCK(adapter);
2299 rc = ena_up(adapter);
2300 ENA_LOCK_UNLOCK(adapter);
2306 rc = ether_ioctl(ifp, command, data);
2314 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2318 if ((feat->offload.tx &
2319 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2320 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2321 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2322 caps |= IFCAP_TXCSUM;
2324 if ((feat->offload.tx &
2325 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2326 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2327 caps |= IFCAP_TXCSUM_IPV6;
2329 if ((feat->offload.tx &
2330 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2333 if ((feat->offload.tx &
2334 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2337 if ((feat->offload.rx_supported &
2338 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2339 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2340 caps |= IFCAP_RXCSUM;
2342 if ((feat->offload.rx_supported &
2343 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2344 caps |= IFCAP_RXCSUM_IPV6;
2346 caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2352 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2355 host_info->supported_network_features[0] =
2356 (uint32_t)if_getcapabilities(ifp);
2360 ena_update_hwassist(struct ena_adapter *adapter)
2362 if_t ifp = adapter->ifp;
2363 uint32_t feat = adapter->tx_offload_cap;
2364 int cap = if_getcapenable(ifp);
2367 if_clearhwassist(ifp);
2369 if ((cap & IFCAP_TXCSUM) != 0) {
2371 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2374 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2375 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2376 flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2379 if ((cap & IFCAP_TXCSUM_IPV6) != 0)
2380 flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2382 if ((cap & IFCAP_TSO4) != 0)
2383 flags |= CSUM_IP_TSO;
2385 if ((cap & IFCAP_TSO6) != 0)
2386 flags |= CSUM_IP6_TSO;
2388 if_sethwassistbits(ifp, flags, 0);
2392 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2393 struct ena_com_dev_get_features_ctx *feat)
2398 ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2399 if (unlikely(ifp == NULL)) {
2400 ena_trace(NULL, ENA_ALERT, "can not allocate ifnet structure\n");
2403 if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2404 if_setdev(ifp, pdev);
2405 if_setsoftc(ifp, adapter);
2407 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
2409 if_setinitfn(ifp, ena_init);
2410 if_settransmitfn(ifp, ena_mq_start);
2411 if_setqflushfn(ifp, ena_qflush);
2412 if_setioctlfn(ifp, ena_ioctl);
2413 if_setgetcounterfn(ifp, ena_get_counter);
2415 if_setsendqlen(ifp, adapter->requested_tx_ring_size);
2416 if_setsendqready(ifp);
2417 if_setmtu(ifp, ETHERMTU);
2418 if_setbaudrate(ifp, 0);
2419 /* Zeroize capabilities... */
2420 if_setcapabilities(ifp, 0);
2421 if_setcapenable(ifp, 0);
2422 /* check hardware support */
2423 caps = ena_get_dev_offloads(feat);
2424 /* ... and set them */
2425 if_setcapabilitiesbit(ifp, caps, 0);
2427 /* TSO parameters */
2428 ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
2429 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2430 ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
2431 ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
2433 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2434 if_setcapenable(ifp, if_getcapabilities(ifp));
2437 * Specify the media types supported by this adapter and register
2438 * callbacks to update media and link information
2440 ifmedia_init(&adapter->media, IFM_IMASK,
2441 ena_media_change, ena_media_status);
2442 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2443 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2445 ether_ifattach(ifp, adapter->mac_addr);
2451 ena_down(struct ena_adapter *adapter)
2455 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2458 device_printf(adapter->pdev, "device is going DOWN\n");
2460 callout_drain(&adapter->timer_service);
2462 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2463 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE,
2466 ena_free_io_irq(adapter);
2468 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
2469 rc = ena_com_dev_reset(adapter->ena_dev,
2470 adapter->reset_reason);
2471 if (unlikely(rc != 0))
2472 device_printf(adapter->pdev,
2473 "Device reset failed\n");
2476 ena_destroy_all_io_queues(adapter);
2478 ena_free_all_tx_bufs(adapter);
2479 ena_free_all_rx_bufs(adapter);
2480 ena_free_all_tx_resources(adapter);
2481 ena_free_all_rx_resources(adapter);
2483 counter_u64_add(adapter->dev_stats.interface_down, 1);
2487 ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
2488 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2490 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
2492 /* Regular queues capabilities */
2493 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2494 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2495 &get_feat_ctx->max_queue_ext.max_queue_ext;
2496 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
2497 max_queue_ext->max_rx_cq_num);
2499 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2500 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2502 struct ena_admin_queue_feature_desc *max_queues =
2503 &get_feat_ctx->max_queues;
2504 io_tx_sq_num = max_queues->max_sq_num;
2505 io_tx_cq_num = max_queues->max_cq_num;
2506 io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
2509 /* In case of LLQ use the llq fields for the tx SQ/CQ */
2510 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2511 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2513 max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
2514 max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
2515 max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
2516 max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
2517 /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
2518 max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2519 pci_msix_count(pdev) - 1);
2521 return (max_num_io_queues);
2525 ena_enable_wc(struct resource *res)
2527 #if defined(__i386) || defined(__amd64) || defined(__aarch64__)
2532 va = (vm_offset_t)rman_get_virtual(res);
2533 len = rman_get_size(res);
2534 /* Enable write combining */
2535 rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
2536 if (unlikely(rc != 0)) {
2537 ena_trace(NULL, ENA_ALERT, "pmap_change_attr failed, %d\n", rc);
2543 return (EOPNOTSUPP);
2547 ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
2548 struct ena_admin_feature_llq_desc *llq,
2549 struct ena_llq_configurations *llq_default_configurations)
2551 struct ena_adapter *adapter = device_get_softc(pdev);
2553 uint32_t llq_feature_mask;
2555 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
2556 if (!(ena_dev->supported_features & llq_feature_mask)) {
2558 "LLQ is not supported. Fallback to host mode policy.\n");
2559 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2563 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
2564 if (unlikely(rc != 0)) {
2565 device_printf(pdev, "Failed to configure the device mode. "
2566 "Fallback to host mode policy.\n");
2567 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2571 /* Nothing to config, exit */
2572 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
2575 /* Try to allocate resources for LLQ bar */
2576 rid = PCIR_BAR(ENA_MEM_BAR);
2577 adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
2579 if (unlikely(adapter->memory == NULL)) {
2580 device_printf(pdev, "unable to allocate LLQ bar resource. "
2581 "Fallback to host mode policy.\n");
2582 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2586 /* Enable write combining for better LLQ performance */
2587 rc = ena_enable_wc(adapter->memory);
2588 if (unlikely(rc != 0)) {
2589 device_printf(pdev, "failed to enable write combining.\n");
2594 * Save virtual address of the device's memory region
2595 * for the ena_com layer.
2597 ena_dev->mem_bar = rman_get_virtual(adapter->memory);
2603 void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
2605 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
2606 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2607 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
2608 llq_config->llq_num_decs_before_header =
2609 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
2610 llq_config->llq_ring_entry_size_value = 128;
2614 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
2616 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
2617 struct ena_com_dev *ena_dev = ctx->ena_dev;
2618 uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
2619 uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
2620 uint32_t max_tx_queue_size;
2621 uint32_t max_rx_queue_size;
2623 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2624 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2625 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
2626 max_rx_queue_size = min_t(uint32_t,
2627 max_queue_ext->max_rx_cq_depth,
2628 max_queue_ext->max_rx_sq_depth);
2629 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2631 if (ena_dev->tx_mem_queue_type ==
2632 ENA_ADMIN_PLACEMENT_POLICY_DEV)
2633 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2634 llq->max_llq_depth);
2636 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2637 max_queue_ext->max_tx_sq_depth);
2639 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2640 max_queue_ext->max_per_packet_tx_descs);
2641 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2642 max_queue_ext->max_per_packet_rx_descs);
2644 struct ena_admin_queue_feature_desc *max_queues =
2645 &ctx->get_feat_ctx->max_queues;
2646 max_rx_queue_size = min_t(uint32_t,
2647 max_queues->max_cq_depth,
2648 max_queues->max_sq_depth);
2649 max_tx_queue_size = max_queues->max_cq_depth;
2651 if (ena_dev->tx_mem_queue_type ==
2652 ENA_ADMIN_PLACEMENT_POLICY_DEV)
2653 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2654 llq->max_llq_depth);
2656 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2657 max_queues->max_sq_depth);
2659 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2660 max_queues->max_packet_tx_descs);
2661 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2662 max_queues->max_packet_rx_descs);
2665 /* round down to the nearest power of 2 */
2666 max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2667 max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2669 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2671 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2674 tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2675 rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2677 ctx->max_tx_queue_size = max_tx_queue_size;
2678 ctx->max_rx_queue_size = max_rx_queue_size;
2679 ctx->tx_queue_size = tx_queue_size;
2680 ctx->rx_queue_size = rx_queue_size;
2686 ena_rss_init_default(struct ena_adapter *adapter)
2688 struct ena_com_dev *ena_dev = adapter->ena_dev;
2689 device_t dev = adapter->pdev;
2692 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
2693 if (unlikely(rc != 0)) {
2694 device_printf(dev, "Cannot init indirect table\n");
2698 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
2699 qid = i % adapter->num_io_queues;
2700 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
2701 ENA_IO_RXQ_IDX(qid));
2702 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2703 device_printf(dev, "Cannot fill indirect table\n");
2704 goto err_rss_destroy;
2709 uint8_t rss_algo = rss_gethashalgo();
2710 if (rss_algo == RSS_HASH_TOEPLITZ) {
2711 uint8_t hash_key[RSS_KEYSIZE];
2713 rss_getkey(hash_key);
2714 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ,
2715 hash_key, RSS_KEYSIZE, 0xFFFFFFFF);
2718 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
2719 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
2720 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2721 device_printf(dev, "Cannot fill hash function\n");
2722 goto err_rss_destroy;
2725 rc = ena_com_set_default_hash_ctrl(ena_dev);
2726 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2727 device_printf(dev, "Cannot fill hash control\n");
2728 goto err_rss_destroy;
2734 ena_com_rss_destroy(ena_dev);
2739 ena_rss_init_default_deferred(void *arg)
2741 struct ena_adapter *adapter;
2746 dc = devclass_find("ena");
2747 if (unlikely(dc == NULL)) {
2748 ena_trace(NULL, ENA_ALERT, "No devclass ena\n");
2752 max = devclass_get_maxunit(dc);
2753 while (max-- >= 0) {
2754 adapter = devclass_get_softc(dc, max);
2755 if (adapter != NULL) {
2756 rc = ena_rss_init_default(adapter);
2757 ENA_FLAG_SET_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2758 if (unlikely(rc != 0)) {
2759 device_printf(adapter->pdev,
2760 "WARNING: RSS was not properly initialized,"
2761 " it will affect bandwidth\n");
2762 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2767 SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL);
2770 ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
2772 struct ena_admin_host_info *host_info;
2776 /* Allocate only the host info */
2777 rc = ena_com_allocate_host_info(ena_dev);
2778 if (unlikely(rc != 0)) {
2779 ena_trace(NULL, ENA_ALERT, "Cannot allocate host info\n");
2783 host_info = ena_dev->host_attr.host_info;
2785 if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
2786 host_info->bdf = rid;
2787 host_info->os_type = ENA_ADMIN_OS_FREEBSD;
2788 host_info->kernel_ver = osreldate;
2790 sprintf(host_info->kernel_ver_str, "%d", osreldate);
2791 host_info->os_dist = 0;
2792 strncpy(host_info->os_dist_str, osrelease,
2793 sizeof(host_info->os_dist_str) - 1);
2795 host_info->driver_version =
2796 (DRV_MODULE_VER_MAJOR) |
2797 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2798 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2799 host_info->num_cpus = mp_ncpus;
2801 rc = ena_com_set_host_attributes(ena_dev);
2802 if (unlikely(rc != 0)) {
2803 if (rc == EOPNOTSUPP)
2804 ena_trace(NULL, ENA_WARNING, "Cannot set host attributes\n");
2806 ena_trace(NULL, ENA_ALERT, "Cannot set host attributes\n");
2814 ena_com_delete_host_info(ena_dev);
2818 ena_device_init(struct ena_adapter *adapter, device_t pdev,
2819 struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
2821 struct ena_com_dev* ena_dev = adapter->ena_dev;
2822 bool readless_supported;
2823 uint32_t aenq_groups;
2827 rc = ena_com_mmio_reg_read_request_init(ena_dev);
2828 if (unlikely(rc != 0)) {
2829 device_printf(pdev, "failed to init mmio read less\n");
2834 * The PCIe configuration space revision id indicate if mmio reg
2837 readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
2838 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2840 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2841 if (unlikely(rc != 0)) {
2842 device_printf(pdev, "Can not reset device\n");
2843 goto err_mmio_read_less;
2846 rc = ena_com_validate_version(ena_dev);
2847 if (unlikely(rc != 0)) {
2848 device_printf(pdev, "device version is too low\n");
2849 goto err_mmio_read_less;
2852 dma_width = ena_com_get_dma_width(ena_dev);
2853 if (unlikely(dma_width < 0)) {
2854 device_printf(pdev, "Invalid dma width value %d", dma_width);
2856 goto err_mmio_read_less;
2858 adapter->dma_width = dma_width;
2860 /* ENA admin level init */
2861 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2862 if (unlikely(rc != 0)) {
2864 "Can not initialize ena admin queue with device\n");
2865 goto err_mmio_read_less;
2869 * To enable the msix interrupts the driver needs to know the number
2870 * of queues. So the driver uses polling mode to retrieve this
2873 ena_com_set_admin_polling_mode(ena_dev, true);
2875 ena_config_host_info(ena_dev, pdev);
2877 /* Get Device Attributes */
2878 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2879 if (unlikely(rc != 0)) {
2881 "Cannot get attribute for ena device rc: %d\n", rc);
2882 goto err_admin_init;
2885 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2886 BIT(ENA_ADMIN_FATAL_ERROR) |
2887 BIT(ENA_ADMIN_WARNING) |
2888 BIT(ENA_ADMIN_NOTIFICATION) |
2889 BIT(ENA_ADMIN_KEEP_ALIVE);
2891 aenq_groups &= get_feat_ctx->aenq.supported_groups;
2892 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2893 if (unlikely(rc != 0)) {
2894 device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
2895 goto err_admin_init;
2898 *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2903 ena_com_delete_host_info(ena_dev);
2904 ena_com_admin_destroy(ena_dev);
2906 ena_com_mmio_reg_read_request_destroy(ena_dev);
2911 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
2913 struct ena_com_dev *ena_dev = adapter->ena_dev;
2916 rc = ena_enable_msix(adapter);
2917 if (unlikely(rc != 0)) {
2918 device_printf(adapter->pdev, "Error with MSI-X enablement\n");
2922 ena_setup_mgmnt_intr(adapter);
2924 rc = ena_request_mgmnt_irq(adapter);
2925 if (unlikely(rc != 0)) {
2926 device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
2927 goto err_disable_msix;
2930 ena_com_set_admin_polling_mode(ena_dev, false);
2932 ena_com_admin_aenq_enable(ena_dev);
2937 ena_disable_msix(adapter);
2942 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
2943 static void ena_keep_alive_wd(void *adapter_data,
2944 struct ena_admin_aenq_entry *aenq_e)
2946 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
2947 struct ena_admin_aenq_keep_alive_desc *desc;
2952 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
2954 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2955 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2956 counter_u64_zero(adapter->hw_stats.rx_drops);
2957 counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
2958 counter_u64_zero(adapter->hw_stats.tx_drops);
2959 counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
2961 stime = getsbinuptime();
2962 atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
2965 /* Check for keep alive expiration */
2966 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2968 sbintime_t timestamp, time;
2970 if (adapter->wd_active == 0)
2973 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2976 timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
2977 time = getsbinuptime() - timestamp;
2978 if (unlikely(time > adapter->keep_alive_timeout)) {
2979 device_printf(adapter->pdev,
2980 "Keep alive watchdog timeout.\n");
2981 counter_u64_add(adapter->dev_stats.wd_expired, 1);
2982 ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
2986 /* Check if admin queue is enabled */
2987 static void check_for_admin_com_state(struct ena_adapter *adapter)
2989 if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) ==
2991 device_printf(adapter->pdev,
2992 "ENA admin queue is not in running state!\n");
2993 counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
2994 ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
2999 check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3000 struct ena_ring *rx_ring)
3002 if (likely(rx_ring->first_interrupt))
3005 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3008 rx_ring->no_interrupt_event_cnt++;
3010 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3011 device_printf(adapter->pdev, "Potential MSIX issue on Rx side "
3012 "Queue = %d. Reset the device\n", rx_ring->qid);
3013 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3021 check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3022 struct ena_ring *tx_ring)
3024 struct bintime curtime, time;
3025 struct ena_tx_buffer *tx_buf;
3026 sbintime_t time_offset;
3027 uint32_t missed_tx = 0;
3030 getbinuptime(&curtime);
3032 for (i = 0; i < tx_ring->ring_size; i++) {
3033 tx_buf = &tx_ring->tx_buffer_info[i];
3035 if (bintime_isset(&tx_buf->timestamp) == 0)
3039 bintime_sub(&time, &tx_buf->timestamp);
3040 time_offset = bttosbt(time);
3042 if (unlikely(!tx_ring->first_interrupt &&
3043 time_offset > 2 * adapter->missing_tx_timeout)) {
3045 * If after graceful period interrupt is still not
3046 * received, we schedule a reset.
3048 device_printf(adapter->pdev,
3049 "Potential MSIX issue on Tx side Queue = %d. "
3050 "Reset the device\n", tx_ring->qid);
3051 ena_trigger_reset(adapter,
3052 ENA_REGS_RESET_MISS_INTERRUPT);
3056 /* Check again if packet is still waiting */
3057 if (unlikely(time_offset > adapter->missing_tx_timeout)) {
3059 if (!tx_buf->print_once)
3060 ena_trace(NULL, ENA_WARNING, "Found a Tx that wasn't "
3061 "completed on time, qid %d, index %d.\n",
3064 tx_buf->print_once = true;
3069 if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
3070 device_printf(adapter->pdev,
3071 "The number of lost tx completion is above the threshold "
3072 "(%d > %d). Reset the device\n",
3073 missed_tx, adapter->missing_tx_threshold);
3074 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
3078 counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx);
3084 * Check for TX which were not completed on time.
3085 * Timeout is defined by "missing_tx_timeout".
3086 * Reset will be performed if number of incompleted
3087 * transactions exceeds "missing_tx_threshold".
3090 check_for_missing_completions(struct ena_adapter *adapter)
3092 struct ena_ring *tx_ring;
3093 struct ena_ring *rx_ring;
3096 /* Make sure the driver doesn't turn the device in other process */
3099 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3102 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3105 if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3108 budget = adapter->missing_tx_max_queues;
3110 for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
3111 tx_ring = &adapter->tx_ring[i];
3112 rx_ring = &adapter->rx_ring[i];
3114 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3115 if (unlikely(rc != 0))
3118 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3119 if (unlikely(rc != 0))
3129 adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
3132 /* trigger rx cleanup after 2 consecutive detections */
3133 #define EMPTY_RX_REFILL 2
3134 /* For the rare case where the device runs out of Rx descriptors and the
3135 * msix handler failed to refill new Rx descriptors (due to a lack of memory
3137 * This case will lead to a deadlock:
3138 * The device won't send interrupts since all the new Rx packets will be dropped
3139 * The msix handler won't allocate new Rx descriptors so the device won't be
3140 * able to send new packets.
3142 * When such a situation is detected - execute rx cleanup task in another thread
3145 check_for_empty_rx_ring(struct ena_adapter *adapter)
3147 struct ena_ring *rx_ring;
3148 int i, refill_required;
3150 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3153 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3156 for (i = 0; i < adapter->num_io_queues; i++) {
3157 rx_ring = &adapter->rx_ring[i];
3159 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3160 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3161 rx_ring->empty_rx_queue++;
3163 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3164 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3167 device_printf(adapter->pdev,
3168 "trigger refill for ring %d\n", i);
3170 taskqueue_enqueue(rx_ring->que->cleanup_tq,
3171 &rx_ring->que->cleanup_task);
3172 rx_ring->empty_rx_queue = 0;
3175 rx_ring->empty_rx_queue = 0;
3180 static void ena_update_hints(struct ena_adapter *adapter,
3181 struct ena_admin_ena_hw_hints *hints)
3183 struct ena_com_dev *ena_dev = adapter->ena_dev;
3185 if (hints->admin_completion_tx_timeout)
3186 ena_dev->admin_queue.completion_timeout =
3187 hints->admin_completion_tx_timeout * 1000;
3189 if (hints->mmio_read_timeout)
3190 /* convert to usec */
3191 ena_dev->mmio_read.reg_read_to =
3192 hints->mmio_read_timeout * 1000;
3194 if (hints->missed_tx_completion_count_threshold_to_reset)
3195 adapter->missing_tx_threshold =
3196 hints->missed_tx_completion_count_threshold_to_reset;
3198 if (hints->missing_tx_completion_timeout) {
3199 if (hints->missing_tx_completion_timeout ==
3200 ENA_HW_HINTS_NO_TIMEOUT)
3201 adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3203 adapter->missing_tx_timeout =
3204 SBT_1MS * hints->missing_tx_completion_timeout;
3207 if (hints->driver_watchdog_timeout) {
3208 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3209 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3211 adapter->keep_alive_timeout =
3212 SBT_1MS * hints->driver_watchdog_timeout;
3217 ena_timer_service(void *data)
3219 struct ena_adapter *adapter = (struct ena_adapter *)data;
3220 struct ena_admin_host_info *host_info =
3221 adapter->ena_dev->host_attr.host_info;
3223 check_for_missing_keep_alive(adapter);
3225 check_for_admin_com_state(adapter);
3227 check_for_missing_completions(adapter);
3229 check_for_empty_rx_ring(adapter);
3231 if (host_info != NULL)
3232 ena_update_host_info(host_info, adapter->ifp);
3234 if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3235 device_printf(adapter->pdev, "Trigger reset is on\n");
3236 taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3241 * Schedule another timeout one second from now.
3243 callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0);
3247 ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3249 if_t ifp = adapter->ifp;
3250 struct ena_com_dev *ena_dev = adapter->ena_dev;
3253 if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
3256 if_link_state_change(ifp, LINK_STATE_DOWN);
3258 callout_drain(&adapter->timer_service);
3260 dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
3262 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3265 ena_com_set_admin_running_state(ena_dev, false);
3267 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3271 * Stop the device from sending AENQ events (if the device was up, and
3272 * the trigger reset was on, ena_down already performs device reset)
3274 if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
3275 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3277 ena_free_mgmnt_irq(adapter);
3279 ena_disable_msix(adapter);
3282 * IO rings resources should be freed because `ena_restore_device()`
3283 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3284 * vectors. The amount of MSIX vectors after destroy-restore may be
3285 * different than before. Therefore, IO rings resources should be
3286 * established from scratch each time.
3288 ena_free_all_io_rings_resources(adapter);
3290 ena_com_abort_admin_commands(ena_dev);
3292 ena_com_wait_for_abort_completion(ena_dev);
3294 ena_com_admin_destroy(ena_dev);
3296 ena_com_mmio_reg_read_request_destroy(ena_dev);
3298 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3300 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3301 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3305 ena_device_validate_params(struct ena_adapter *adapter,
3306 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3309 if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
3310 ETHER_ADDR_LEN) != 0) {
3311 device_printf(adapter->pdev,
3312 "Error, mac address are different\n");
3316 if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
3317 device_printf(adapter->pdev,
3318 "Error, device max mtu is smaller than ifp MTU\n");
3326 ena_restore_device(struct ena_adapter *adapter)
3328 struct ena_com_dev_get_features_ctx get_feat_ctx;
3329 struct ena_com_dev *ena_dev = adapter->ena_dev;
3330 if_t ifp = adapter->ifp;
3331 device_t dev = adapter->pdev;
3335 ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3337 rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
3339 device_printf(dev, "Cannot initialize device\n");
3343 * Only enable WD if it was enabled before reset, so it won't override
3344 * value set by the user by the sysctl.
3346 if (adapter->wd_active != 0)
3347 adapter->wd_active = wd_active;
3349 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3351 device_printf(dev, "Validation of device parameters failed\n");
3352 goto err_device_destroy;
3355 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3356 /* Make sure we don't have a race with AENQ Links state handler */
3357 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
3358 if_link_state_change(ifp, LINK_STATE_UP);
3360 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3362 device_printf(dev, "Enable MSI-X failed\n");
3363 goto err_device_destroy;
3367 * Effective value of used MSIX vectors should be the same as before
3368 * `ena_destroy_device()`, if possible, or closest to it if less vectors
3371 if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
3372 adapter->num_io_queues =
3373 adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3375 /* Re-initialize rings basic information */
3376 ena_init_io_rings(adapter);
3378 /* If the interface was up before the reset bring it up */
3379 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3380 rc = ena_up(adapter);
3382 device_printf(dev, "Failed to create I/O queues\n");
3383 goto err_disable_msix;
3387 /* Indicate that device is running again and ready to work */
3388 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3390 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3392 * As the AENQ handlers weren't executed during reset because
3393 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
3394 * timestamp must be updated again That will prevent next reset
3395 * caused by missing keep alive.
3397 adapter->keep_alive_timestamp = getsbinuptime();
3398 callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
3399 ena_timer_service, (void *)adapter, 0);
3401 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3404 "Device reset completed successfully, Driver info: %s\n", ena_version);
3409 ena_free_mgmnt_irq(adapter);
3410 ena_disable_msix(adapter);
3412 ena_com_abort_admin_commands(ena_dev);
3413 ena_com_wait_for_abort_completion(ena_dev);
3414 ena_com_admin_destroy(ena_dev);
3415 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3416 ena_com_mmio_reg_read_request_destroy(ena_dev);
3418 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3419 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3420 device_printf(dev, "Reset attempt failed. Can not reset the device\n");
3426 ena_reset_task(void *arg, int pending)
3428 struct ena_adapter *adapter = (struct ena_adapter *)arg;
3430 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3431 device_printf(adapter->pdev,
3432 "device reset scheduled but trigger_reset is off\n");
3436 ENA_LOCK_LOCK(adapter);
3437 ena_destroy_device(adapter, false);
3438 ena_restore_device(adapter);
3439 ENA_LOCK_UNLOCK(adapter);
3443 * ena_attach - Device Initialization Routine
3444 * @pdev: device information struct
3446 * Returns 0 on success, otherwise on failure.
3448 * ena_attach initializes an adapter identified by a device structure.
3449 * The OS initialization, configuring of the adapter private structure,
3450 * and a hardware reset occur.
3453 ena_attach(device_t pdev)
3455 struct ena_com_dev_get_features_ctx get_feat_ctx;
3456 struct ena_llq_configurations llq_config;
3457 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3458 static int version_printed;
3459 struct ena_adapter *adapter;
3460 struct ena_com_dev *ena_dev = NULL;
3461 uint32_t max_num_io_queues;
3464 adapter = device_get_softc(pdev);
3465 adapter->pdev = pdev;
3467 ENA_LOCK_INIT(adapter);
3470 * Set up the timer service - driver is responsible for avoiding
3471 * concurrency, as the callout won't be using any locking inside.
3473 callout_init(&adapter->timer_service, true);
3474 adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
3475 adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
3476 adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
3477 adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
3479 if (version_printed++ == 0)
3480 device_printf(pdev, "%s\n", ena_version);
3482 /* Allocate memory for ena_dev structure */
3483 ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3486 adapter->ena_dev = ena_dev;
3487 ena_dev->dmadev = pdev;
3489 rid = PCIR_BAR(ENA_REG_BAR);
3490 adapter->memory = NULL;
3491 adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3493 if (unlikely(adapter->registers == NULL)) {
3495 "unable to allocate bus resource: registers!\n");
3500 ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3503 /* Store register resources */
3504 ((struct ena_bus*)(ena_dev->bus))->reg_bar_t =
3505 rman_get_bustag(adapter->registers);
3506 ((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
3507 rman_get_bushandle(adapter->registers);
3509 if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) {
3510 device_printf(pdev, "failed to pmap registers bar\n");
3515 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3517 /* Initially clear all the flags */
3518 ENA_FLAG_ZERO(adapter);
3520 /* Device initialization */
3521 rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3522 if (unlikely(rc != 0)) {
3523 device_printf(pdev, "ENA device init failed! (err: %d)\n", rc);
3528 set_default_llq_configurations(&llq_config);
3530 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
3532 if (unlikely(rc != 0)) {
3533 device_printf(pdev, "failed to set placement policy\n");
3537 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3538 adapter->disable_meta_caching =
3539 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3540 BIT(ENA_ADMIN_DISABLE_META_CACHING));
3542 adapter->keep_alive_timestamp = getsbinuptime();
3544 adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3546 memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3549 calc_queue_ctx.pdev = pdev;
3550 calc_queue_ctx.ena_dev = ena_dev;
3551 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3553 /* Calculate initial and maximum IO queue number and size */
3554 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
3556 rc = ena_calc_io_queue_size(&calc_queue_ctx);
3557 if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
3562 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3563 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3564 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3565 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3566 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3567 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3569 adapter->max_num_io_queues = max_num_io_queues;
3571 adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
3573 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3575 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3577 /* set up dma tags for rx and tx buffers */
3578 rc = ena_setup_tx_dma_tag(adapter);
3579 if (unlikely(rc != 0)) {
3580 device_printf(pdev, "Failed to create TX DMA tag\n");
3584 rc = ena_setup_rx_dma_tag(adapter);
3585 if (unlikely(rc != 0)) {
3586 device_printf(pdev, "Failed to create RX DMA tag\n");
3587 goto err_tx_tag_free;
3591 * The amount of requested MSIX vectors is equal to
3592 * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
3593 * number of admin queue interrupts. The former is initially determined
3594 * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
3595 * achieved if there are not enough system resources. By default, the
3596 * number of effectively used IO queues is the same but later on it can
3597 * be limited by the user using sysctl interface.
3599 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3600 if (unlikely(rc != 0)) {
3602 "Failed to enable and set the admin interrupts\n");
3605 /* By default all of allocated MSIX vectors are actively used */
3606 adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3608 /* initialize rings basic information */
3609 ena_init_io_rings(adapter);
3611 /* setup network interface */
3612 rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3613 if (unlikely(rc != 0)) {
3614 device_printf(pdev, "Error with network interface setup\n");
3618 /* Initialize reset task queue */
3619 TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3620 adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3621 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3622 taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET,
3623 "%s rstq", device_get_nameunit(adapter->pdev));
3625 /* Initialize statistics */
3626 ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3627 sizeof(struct ena_stats_dev));
3628 ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3629 sizeof(struct ena_hw_stats));
3630 ena_sysctl_add_nodes(adapter);
3633 rc = ena_netmap_attach(adapter);
3635 device_printf(pdev, "netmap attach failed: %d\n", rc);
3638 #endif /* DEV_NETMAP */
3640 /* Tell the stack that the interface is not active */
3641 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3642 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3648 ether_ifdetach(adapter->ifp);
3649 #endif /* DEV_NETMAP */
3651 ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
3652 ena_free_mgmnt_irq(adapter);
3653 ena_disable_msix(adapter);
3655 ena_free_all_io_rings_resources(adapter);
3656 ena_free_rx_dma_tag(adapter);
3658 ena_free_tx_dma_tag(adapter);
3660 ena_com_admin_destroy(ena_dev);
3661 ena_com_delete_host_info(ena_dev);
3662 ena_com_mmio_reg_read_request_destroy(ena_dev);
3664 free(ena_dev->bus, M_DEVBUF);
3665 ena_free_pci_resources(adapter);
3667 free(ena_dev, M_DEVBUF);
3673 * ena_detach - Device Removal Routine
3674 * @pdev: device information struct
3676 * ena_detach is called by the device subsystem to alert the driver
3677 * that it should release a PCI device.
3680 ena_detach(device_t pdev)
3682 struct ena_adapter *adapter = device_get_softc(pdev);
3683 struct ena_com_dev *ena_dev = adapter->ena_dev;
3686 /* Make sure VLANS are not using driver */
3687 if (adapter->ifp->if_vlantrunk != NULL) {
3688 device_printf(adapter->pdev ,"VLAN is in use, detach first\n");
3692 ether_ifdetach(adapter->ifp);
3694 /* Stop timer service */
3695 ENA_LOCK_LOCK(adapter);
3696 callout_drain(&adapter->timer_service);
3697 ENA_LOCK_UNLOCK(adapter);
3699 /* Release reset task */
3700 while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
3701 taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
3702 taskqueue_free(adapter->reset_tq);
3704 ENA_LOCK_LOCK(adapter);
3706 ena_destroy_device(adapter, true);
3707 ENA_LOCK_UNLOCK(adapter);
3710 netmap_detach(adapter->ifp);
3711 #endif /* DEV_NETMAP */
3713 ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3714 sizeof(struct ena_hw_stats));
3715 ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3716 sizeof(struct ena_stats_dev));
3718 rc = ena_free_rx_dma_tag(adapter);
3719 if (unlikely(rc != 0))
3720 device_printf(adapter->pdev,
3721 "Unmapped RX DMA tag associations\n");
3723 rc = ena_free_tx_dma_tag(adapter);
3724 if (unlikely(rc != 0))
3725 device_printf(adapter->pdev,
3726 "Unmapped TX DMA tag associations\n");
3728 ena_free_irqs(adapter);
3730 ena_free_pci_resources(adapter);
3732 if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
3733 ena_com_rss_destroy(ena_dev);
3735 ena_com_delete_host_info(ena_dev);
3737 ENA_LOCK_DESTROY(adapter);
3739 if_free(adapter->ifp);
3741 if (ena_dev->bus != NULL)
3742 free(ena_dev->bus, M_DEVBUF);
3744 if (ena_dev != NULL)
3745 free(ena_dev, M_DEVBUF);
3747 return (bus_generic_detach(pdev));
3750 /******************************************************************************
3751 ******************************** AENQ Handlers *******************************
3752 *****************************************************************************/
3754 * ena_update_on_link_change:
3755 * Notify the network interface about the change in link status
3758 ena_update_on_link_change(void *adapter_data,
3759 struct ena_admin_aenq_entry *aenq_e)
3761 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3762 struct ena_admin_aenq_link_change_desc *aenq_desc;
3766 aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3768 status = aenq_desc->flags &
3769 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3772 device_printf(adapter->pdev, "link is UP\n");
3773 ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3774 if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
3775 if_link_state_change(ifp, LINK_STATE_UP);
3777 device_printf(adapter->pdev, "link is DOWN\n");
3778 if_link_state_change(ifp, LINK_STATE_DOWN);
3779 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3783 static void ena_notification(void *adapter_data,
3784 struct ena_admin_aenq_entry *aenq_e)
3786 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3787 struct ena_admin_ena_hw_hints *hints;
3789 ENA_WARN(NULL, aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3790 "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group,
3791 ENA_ADMIN_NOTIFICATION);
3793 switch (aenq_e->aenq_common_desc.syndrome) {
3794 case ENA_ADMIN_UPDATE_HINTS:
3796 (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
3797 ena_update_hints(adapter, hints);
3800 device_printf(adapter->pdev,
3801 "Invalid aenq notification link state %d\n",
3802 aenq_e->aenq_common_desc.syndrome);
3807 * This handler will called for unknown event group or unimplemented handlers
3810 unimplemented_aenq_handler(void *adapter_data,
3811 struct ena_admin_aenq_entry *aenq_e)
3813 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3815 device_printf(adapter->pdev,
3816 "Unknown event was received or event with unimplemented handler\n");
3819 static struct ena_aenq_handlers aenq_handlers = {
3821 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3822 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3823 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3825 .unimplemented_handler = unimplemented_aenq_handler
3828 /*********************************************************************
3829 * FreeBSD Device Interface Entry Points
3830 *********************************************************************/
3832 static device_method_t ena_methods[] = {
3833 /* Device interface */
3834 DEVMETHOD(device_probe, ena_probe),
3835 DEVMETHOD(device_attach, ena_attach),
3836 DEVMETHOD(device_detach, ena_detach),
3840 static driver_t ena_driver = {
3841 "ena", ena_methods, sizeof(struct ena_adapter),
3844 devclass_t ena_devclass;
3845 DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
3846 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
3847 nitems(ena_vendor_info_array) - 1);
3848 MODULE_DEPEND(ena, pci, 1, 1, 1);
3849 MODULE_DEPEND(ena, ether, 1, 1, 1);
3851 MODULE_DEPEND(ena, netmap, 1, 1, 1);
3852 #endif /* DEV_NETMAP */
3854 /*********************************************************************/