2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/endian.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/malloc.h>
43 #include <sys/module.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/taskqueue.h>
51 #include <sys/eventhandler.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <machine/in_cksum.h>
58 #include <net/ethernet.h>
60 #include <net/if_var.h>
61 #include <net/if_arp.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64 #include <net/if_types.h>
65 #include <net/if_vlan_var.h>
67 #include <net/rss_config.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip6.h>
75 #include <netinet/tcp.h>
76 #include <netinet/udp.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcireg.h>
84 #include "ena_datapath.h"
86 #include "ena_sysctl.h"
89 #include "ena_netmap.h"
90 #endif /* DEV_NETMAP */
92 /*********************************************************
94 *********************************************************/
95 static int ena_probe(device_t);
96 static void ena_intr_msix_mgmnt(void *);
97 static void ena_free_pci_resources(struct ena_adapter *);
98 static int ena_change_mtu(if_t, int);
99 static inline void ena_alloc_counters(counter_u64_t *, int);
100 static inline void ena_free_counters(counter_u64_t *, int);
101 static inline void ena_reset_counters(counter_u64_t *, int);
102 static void ena_init_io_rings_common(struct ena_adapter *,
103 struct ena_ring *, uint16_t);
104 static void ena_init_io_rings_basic(struct ena_adapter *);
105 static void ena_init_io_rings_advanced(struct ena_adapter *);
106 static void ena_init_io_rings(struct ena_adapter *);
107 static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
108 static void ena_free_all_io_rings_resources(struct ena_adapter *);
109 static int ena_setup_tx_dma_tag(struct ena_adapter *);
110 static int ena_free_tx_dma_tag(struct ena_adapter *);
111 static int ena_setup_rx_dma_tag(struct ena_adapter *);
112 static int ena_free_rx_dma_tag(struct ena_adapter *);
113 static void ena_release_all_tx_dmamap(struct ena_ring *);
114 static int ena_setup_tx_resources(struct ena_adapter *, int);
115 static void ena_free_tx_resources(struct ena_adapter *, int);
116 static int ena_setup_all_tx_resources(struct ena_adapter *);
117 static void ena_free_all_tx_resources(struct ena_adapter *);
118 static int ena_setup_rx_resources(struct ena_adapter *, unsigned int);
119 static void ena_free_rx_resources(struct ena_adapter *, unsigned int);
120 static int ena_setup_all_rx_resources(struct ena_adapter *);
121 static void ena_free_all_rx_resources(struct ena_adapter *);
122 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
123 struct ena_rx_buffer *);
124 static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
125 struct ena_rx_buffer *);
126 static void ena_free_rx_bufs(struct ena_adapter *, unsigned int);
127 static void ena_refill_all_rx_bufs(struct ena_adapter *);
128 static void ena_free_all_rx_bufs(struct ena_adapter *);
129 static void ena_free_tx_bufs(struct ena_adapter *, unsigned int);
130 static void ena_free_all_tx_bufs(struct ena_adapter *);
131 static void ena_destroy_all_tx_queues(struct ena_adapter *);
132 static void ena_destroy_all_rx_queues(struct ena_adapter *);
133 static void ena_destroy_all_io_queues(struct ena_adapter *);
134 static int ena_create_io_queues(struct ena_adapter *);
135 static int ena_handle_msix(void *);
136 static int ena_enable_msix(struct ena_adapter *);
137 static void ena_setup_mgmnt_intr(struct ena_adapter *);
138 static int ena_setup_io_intr(struct ena_adapter *);
139 static int ena_request_mgmnt_irq(struct ena_adapter *);
140 static int ena_request_io_irq(struct ena_adapter *);
141 static void ena_free_mgmnt_irq(struct ena_adapter *);
142 static void ena_free_io_irq(struct ena_adapter *);
143 static void ena_free_irqs(struct ena_adapter*);
144 static void ena_disable_msix(struct ena_adapter *);
145 static void ena_unmask_all_io_irqs(struct ena_adapter *);
146 static int ena_rss_configure(struct ena_adapter *);
147 static int ena_up_complete(struct ena_adapter *);
148 static uint64_t ena_get_counter(if_t, ift_counter);
149 static int ena_media_change(if_t);
150 static void ena_media_status(if_t, struct ifmediareq *);
151 static void ena_init(void *);
152 static int ena_ioctl(if_t, u_long, caddr_t);
153 static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
154 static void ena_update_host_info(struct ena_admin_host_info *, if_t);
155 static void ena_update_hwassist(struct ena_adapter *);
156 static int ena_setup_ifnet(device_t, struct ena_adapter *,
157 struct ena_com_dev_get_features_ctx *);
158 static int ena_enable_wc(struct resource *);
159 static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
160 struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
161 static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
162 struct ena_com_dev_get_features_ctx *);
163 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
164 static int ena_rss_init_default(struct ena_adapter *);
165 static void ena_rss_init_default_deferred(void *);
166 static void ena_config_host_info(struct ena_com_dev *, device_t);
167 static int ena_attach(device_t);
168 static int ena_detach(device_t);
169 static int ena_device_init(struct ena_adapter *, device_t,
170 struct ena_com_dev_get_features_ctx *, int *);
171 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
172 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
173 static void unimplemented_aenq_handler(void *,
174 struct ena_admin_aenq_entry *);
175 static int ena_copy_eni_metrics(struct ena_adapter *);
176 static void ena_timer_service(void *);
178 static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
180 static ena_vendor_info_t ena_vendor_info_array[] = {
181 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
182 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0},
183 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
184 { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0},
190 * Contains pointers to event handlers, e.g. link state chage.
192 static struct ena_aenq_handlers aenq_handlers;
195 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
199 *(bus_addr_t *) arg = segs[0].ds_addr;
203 ena_dma_alloc(device_t dmadev, bus_size_t size,
204 ena_mem_handle_t *dma, int mapflags, bus_size_t alignment)
206 struct ena_adapter* adapter = device_get_softc(dmadev);
208 uint64_t dma_space_addr;
211 maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
213 dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
214 if (unlikely(dma_space_addr == 0))
215 dma_space_addr = BUS_SPACE_MAXADDR;
217 error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
218 alignment, 0, /* alignment, bounds */
219 dma_space_addr, /* lowaddr of exclusion window */
220 BUS_SPACE_MAXADDR,/* highaddr of exclusion window */
221 NULL, NULL, /* filter, filterarg */
222 maxsize, /* maxsize */
224 maxsize, /* maxsegsize */
225 BUS_DMA_ALLOCNOW, /* flags */
229 if (unlikely(error != 0)) {
230 ena_trace(NULL, ENA_ALERT, "bus_dma_tag_create failed: %d\n", error);
234 error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
235 BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
236 if (unlikely(error != 0)) {
237 ena_trace(NULL, ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n",
238 (uintmax_t)size, error);
239 goto fail_map_create;
243 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
244 size, ena_dmamap_callback, &dma->paddr, mapflags);
245 if (unlikely((error != 0) || (dma->paddr == 0))) {
246 ena_trace(NULL, ENA_ALERT, ": bus_dmamap_load failed: %d\n", error);
250 bus_dmamap_sync(dma->tag, dma->map,
251 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
256 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
258 bus_dma_tag_destroy(dma->tag);
268 * This function should generate unique key for the whole driver.
269 * If the key was already genereated in the previous call (for example
270 * for another adapter), then it should be returned instead.
273 ena_rss_key_fill(void *key, size_t size)
275 static bool key_generated;
276 static uint8_t default_key[ENA_HASH_KEY_SIZE];
278 KASSERT(size <= ENA_HASH_KEY_SIZE, ("Requested more bytes than ENA RSS key can hold"));
280 if (!key_generated) {
281 arc4random_buf(default_key, ENA_HASH_KEY_SIZE);
282 key_generated = true;
285 memcpy(key, default_key, size);
289 ena_free_pci_resources(struct ena_adapter *adapter)
291 device_t pdev = adapter->pdev;
293 if (adapter->memory != NULL) {
294 bus_release_resource(pdev, SYS_RES_MEMORY,
295 PCIR_BAR(ENA_MEM_BAR), adapter->memory);
298 if (adapter->registers != NULL) {
299 bus_release_resource(pdev, SYS_RES_MEMORY,
300 PCIR_BAR(ENA_REG_BAR), adapter->registers);
305 ena_probe(device_t dev)
307 ena_vendor_info_t *ent;
308 char adapter_name[60];
309 uint16_t pci_vendor_id = 0;
310 uint16_t pci_device_id = 0;
312 pci_vendor_id = pci_get_vendor(dev);
313 pci_device_id = pci_get_device(dev);
315 ent = ena_vendor_info_array;
316 while (ent->vendor_id != 0) {
317 if ((pci_vendor_id == ent->vendor_id) &&
318 (pci_device_id == ent->device_id)) {
319 ena_trace(NULL, ENA_DBG, "vendor=%x device=%x\n",
320 pci_vendor_id, pci_device_id);
322 sprintf(adapter_name, DEVICE_DESC);
323 device_set_desc_copy(dev, adapter_name);
324 return (BUS_PROBE_DEFAULT);
335 ena_change_mtu(if_t ifp, int new_mtu)
337 struct ena_adapter *adapter = if_getsoftc(ifp);
340 if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
341 device_printf(adapter->pdev, "Invalid MTU setting. "
342 "new_mtu: %d max mtu: %d min mtu: %d\n",
343 new_mtu, adapter->max_mtu, ENA_MIN_MTU);
347 rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
348 if (likely(rc == 0)) {
349 ena_trace(NULL, ENA_DBG, "set MTU to %d\n", new_mtu);
350 if_setmtu(ifp, new_mtu);
352 device_printf(adapter->pdev, "Failed to set MTU to %d\n",
360 ena_alloc_counters(counter_u64_t *begin, int size)
362 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
364 for (; begin < end; ++begin)
365 *begin = counter_u64_alloc(M_WAITOK);
369 ena_free_counters(counter_u64_t *begin, int size)
371 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
373 for (; begin < end; ++begin)
374 counter_u64_free(*begin);
378 ena_reset_counters(counter_u64_t *begin, int size)
380 counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
382 for (; begin < end; ++begin)
383 counter_u64_zero(*begin);
387 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
392 ring->adapter = adapter;
393 ring->ena_dev = adapter->ena_dev;
394 ring->first_interrupt = false;
395 ring->no_interrupt_event_cnt = 0;
399 ena_init_io_rings_basic(struct ena_adapter *adapter)
401 struct ena_com_dev *ena_dev;
402 struct ena_ring *txr, *rxr;
406 ena_dev = adapter->ena_dev;
408 for (i = 0; i < adapter->num_io_queues; i++) {
409 txr = &adapter->tx_ring[i];
410 rxr = &adapter->rx_ring[i];
412 /* TX/RX common ring state */
413 ena_init_io_rings_common(adapter, txr, i);
414 ena_init_io_rings_common(adapter, rxr, i);
416 /* TX specific ring state */
417 txr->tx_max_header_size = ena_dev->tx_max_header_size;
418 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
420 que = &adapter->que[i];
421 que->adapter = adapter;
429 rxr->empty_rx_queue = 0;
430 rxr->rx_mbuf_sz = ena_mbuf_sz;
435 ena_init_io_rings_advanced(struct ena_adapter *adapter)
437 struct ena_ring *txr, *rxr;
440 for (i = 0; i < adapter->num_io_queues; i++) {
441 txr = &adapter->tx_ring[i];
442 rxr = &adapter->rx_ring[i];
444 /* Allocate a buf ring */
445 txr->buf_ring_size = adapter->buf_ring_size;
446 txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF,
447 M_WAITOK, &txr->ring_mtx);
449 /* Allocate Tx statistics. */
450 ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
451 sizeof(txr->tx_stats));
453 /* Allocate Rx statistics. */
454 ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
455 sizeof(rxr->rx_stats));
457 /* Initialize locks */
458 snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
459 device_get_nameunit(adapter->pdev), i);
460 snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
461 device_get_nameunit(adapter->pdev), i);
463 mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
468 ena_init_io_rings(struct ena_adapter *adapter)
471 * IO rings initialization can be divided into the 2 steps:
472 * 1. Initialize variables and fields with initial values and copy
473 * them from adapter/ena_dev (basic)
474 * 2. Allocate mutex, counters and buf_ring (advanced)
476 ena_init_io_rings_basic(adapter);
477 ena_init_io_rings_advanced(adapter);
481 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
483 struct ena_ring *txr = &adapter->tx_ring[qid];
484 struct ena_ring *rxr = &adapter->rx_ring[qid];
486 ena_free_counters((counter_u64_t *)&txr->tx_stats,
487 sizeof(txr->tx_stats));
488 ena_free_counters((counter_u64_t *)&rxr->rx_stats,
489 sizeof(rxr->rx_stats));
491 ENA_RING_MTX_LOCK(txr);
492 drbr_free(txr->br, M_DEVBUF);
493 ENA_RING_MTX_UNLOCK(txr);
495 mtx_destroy(&txr->ring_mtx);
499 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
503 for (i = 0; i < adapter->num_io_queues; i++)
504 ena_free_io_ring_resources(adapter, i);
509 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
513 /* Create DMA tag for Tx buffers */
514 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
515 1, 0, /* alignment, bounds */
516 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
517 BUS_SPACE_MAXADDR, /* highaddr of excl window */
518 NULL, NULL, /* filter, filterarg */
519 ENA_TSO_MAXSIZE, /* maxsize */
520 adapter->max_tx_sgl_size - 1, /* nsegments */
521 ENA_TSO_MAXSIZE, /* maxsegsize */
524 NULL, /* lockfuncarg */
525 &adapter->tx_buf_tag);
531 ena_free_tx_dma_tag(struct ena_adapter *adapter)
535 ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
537 if (likely(ret == 0))
538 adapter->tx_buf_tag = NULL;
544 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
548 /* Create DMA tag for Rx buffers*/
549 ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */
550 1, 0, /* alignment, bounds */
551 ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */
552 BUS_SPACE_MAXADDR, /* highaddr of excl window */
553 NULL, NULL, /* filter, filterarg */
554 ena_mbuf_sz, /* maxsize */
555 adapter->max_rx_sgl_size, /* nsegments */
556 ena_mbuf_sz, /* maxsegsize */
560 &adapter->rx_buf_tag);
566 ena_free_rx_dma_tag(struct ena_adapter *adapter)
570 ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
572 if (likely(ret == 0))
573 adapter->rx_buf_tag = NULL;
579 ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
581 struct ena_adapter *adapter = tx_ring->adapter;
582 struct ena_tx_buffer *tx_info;
583 bus_dma_tag_t tx_tag = adapter->tx_buf_tag;;
586 struct ena_netmap_tx_info *nm_info;
588 #endif /* DEV_NETMAP */
590 for (i = 0; i < tx_ring->ring_size; ++i) {
591 tx_info = &tx_ring->tx_buffer_info[i];
593 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
594 nm_info = &tx_info->nm_info;
595 for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
596 if (nm_info->map_seg[j] != NULL) {
597 bus_dmamap_destroy(tx_tag,
598 nm_info->map_seg[j]);
599 nm_info->map_seg[j] = NULL;
603 #endif /* DEV_NETMAP */
604 if (tx_info->dmamap != NULL) {
605 bus_dmamap_destroy(tx_tag, tx_info->dmamap);
606 tx_info->dmamap = NULL;
612 * ena_setup_tx_resources - allocate Tx resources (Descriptors)
613 * @adapter: network interface device structure
616 * Returns 0 on success, otherwise on failure.
619 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
621 struct ena_que *que = &adapter->que[qid];
622 struct ena_ring *tx_ring = que->tx_ring;
628 ena_netmap_reset_tx_ring(adapter, qid);
629 #endif /* DEV_NETMAP */
631 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
633 tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
634 if (unlikely(tx_ring->tx_buffer_info == NULL))
637 size = sizeof(uint16_t) * tx_ring->ring_size;
638 tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
639 if (unlikely(tx_ring->free_tx_ids == NULL))
640 goto err_buf_info_free;
642 size = tx_ring->tx_max_header_size;
643 tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
645 if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
646 goto err_tx_ids_free;
648 /* Req id stack for TX OOO completions */
649 for (i = 0; i < tx_ring->ring_size; i++)
650 tx_ring->free_tx_ids[i] = i;
652 /* Reset TX statistics. */
653 ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
654 sizeof(tx_ring->tx_stats));
656 tx_ring->next_to_use = 0;
657 tx_ring->next_to_clean = 0;
658 tx_ring->acum_pkts = 0;
660 /* Make sure that drbr is empty */
661 ENA_RING_MTX_LOCK(tx_ring);
662 drbr_flush(adapter->ifp, tx_ring->br);
663 ENA_RING_MTX_UNLOCK(tx_ring);
665 /* ... and create the buffer DMA maps */
666 for (i = 0; i < tx_ring->ring_size; i++) {
667 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
668 &tx_ring->tx_buffer_info[i].dmamap);
669 if (unlikely(err != 0)) {
670 ena_trace(NULL, ENA_ALERT,
671 "Unable to create Tx DMA map for buffer %d\n",
673 goto err_map_release;
677 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
678 map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
679 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
680 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
682 if (unlikely(err != 0)) {
683 ena_trace(NULL, ENA_ALERT, "Unable to create "
684 "Tx DMA for buffer %d %d\n", i, j);
685 goto err_map_release;
689 #endif /* DEV_NETMAP */
692 /* Allocate taskqueues */
693 TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
694 tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
695 taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
696 if (unlikely(tx_ring->enqueue_tq == NULL)) {
697 ena_trace(NULL, ENA_ALERT,
698 "Unable to create taskqueue for enqueue task\n");
699 i = tx_ring->ring_size;
700 goto err_map_release;
703 tx_ring->running = true;
705 taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET,
706 "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu);
711 ena_release_all_tx_dmamap(tx_ring);
713 free(tx_ring->free_tx_ids, M_DEVBUF);
714 tx_ring->free_tx_ids = NULL;
716 free(tx_ring->tx_buffer_info, M_DEVBUF);
717 tx_ring->tx_buffer_info = NULL;
723 * ena_free_tx_resources - Free Tx Resources per Queue
724 * @adapter: network interface device structure
727 * Free all transmit software resources
730 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
732 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
734 struct ena_netmap_tx_info *nm_info;
736 #endif /* DEV_NETMAP */
738 while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
740 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
742 taskqueue_free(tx_ring->enqueue_tq);
744 ENA_RING_MTX_LOCK(tx_ring);
745 /* Flush buffer ring, */
746 drbr_flush(adapter->ifp, tx_ring->br);
748 /* Free buffer DMA maps, */
749 for (int i = 0; i < tx_ring->ring_size; i++) {
750 bus_dmamap_sync(adapter->tx_buf_tag,
751 tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
752 bus_dmamap_unload(adapter->tx_buf_tag,
753 tx_ring->tx_buffer_info[i].dmamap);
754 bus_dmamap_destroy(adapter->tx_buf_tag,
755 tx_ring->tx_buffer_info[i].dmamap);
758 if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
759 nm_info = &tx_ring->tx_buffer_info[i].nm_info;
760 for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
761 if (nm_info->socket_buf_idx[j] != 0) {
762 bus_dmamap_sync(adapter->tx_buf_tag,
764 BUS_DMASYNC_POSTWRITE);
765 ena_netmap_unload(adapter,
766 nm_info->map_seg[j]);
768 bus_dmamap_destroy(adapter->tx_buf_tag,
769 nm_info->map_seg[j]);
770 nm_info->socket_buf_idx[j] = 0;
773 #endif /* DEV_NETMAP */
775 m_freem(tx_ring->tx_buffer_info[i].mbuf);
776 tx_ring->tx_buffer_info[i].mbuf = NULL;
778 ENA_RING_MTX_UNLOCK(tx_ring);
780 /* And free allocated memory. */
781 free(tx_ring->tx_buffer_info, M_DEVBUF);
782 tx_ring->tx_buffer_info = NULL;
784 free(tx_ring->free_tx_ids, M_DEVBUF);
785 tx_ring->free_tx_ids = NULL;
787 free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
788 tx_ring->push_buf_intermediate_buf = NULL;
792 * ena_setup_all_tx_resources - allocate all queues Tx resources
793 * @adapter: network interface device structure
795 * Returns 0 on success, otherwise on failure.
798 ena_setup_all_tx_resources(struct ena_adapter *adapter)
802 for (i = 0; i < adapter->num_io_queues; i++) {
803 rc = ena_setup_tx_resources(adapter, i);
805 device_printf(adapter->pdev,
806 "Allocation for Tx Queue %u failed\n", i);
814 /* Rewind the index freeing the rings as we go */
816 ena_free_tx_resources(adapter, i);
821 * ena_free_all_tx_resources - Free Tx Resources for All Queues
822 * @adapter: network interface device structure
824 * Free all transmit software resources
827 ena_free_all_tx_resources(struct ena_adapter *adapter)
831 for (i = 0; i < adapter->num_io_queues; i++)
832 ena_free_tx_resources(adapter, i);
836 * ena_setup_rx_resources - allocate Rx resources (Descriptors)
837 * @adapter: network interface device structure
840 * Returns 0 on success, otherwise on failure.
843 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
845 struct ena_que *que = &adapter->que[qid];
846 struct ena_ring *rx_ring = que->rx_ring;
849 size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
852 ena_netmap_reset_rx_ring(adapter, qid);
853 rx_ring->initialized = false;
854 #endif /* DEV_NETMAP */
857 * Alloc extra element so in rx path
858 * we can always prefetch rx_info + 1
860 size += sizeof(struct ena_rx_buffer);
862 rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
864 size = sizeof(uint16_t) * rx_ring->ring_size;
865 rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
867 for (i = 0; i < rx_ring->ring_size; i++)
868 rx_ring->free_rx_ids[i] = i;
870 /* Reset RX statistics. */
871 ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
872 sizeof(rx_ring->rx_stats));
874 rx_ring->next_to_clean = 0;
875 rx_ring->next_to_use = 0;
877 /* ... and create the buffer DMA maps */
878 for (i = 0; i < rx_ring->ring_size; i++) {
879 err = bus_dmamap_create(adapter->rx_buf_tag, 0,
880 &(rx_ring->rx_buffer_info[i].map));
882 ena_trace(NULL, ENA_ALERT,
883 "Unable to create Rx DMA map for buffer %d\n", i);
884 goto err_buf_info_unmap;
888 /* Create LRO for the ring */
889 if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) {
890 int err = tcp_lro_init(&rx_ring->lro);
892 device_printf(adapter->pdev,
893 "LRO[%d] Initialization failed!\n", qid);
895 ena_trace(NULL, ENA_INFO,
896 "RX Soft LRO[%d] Initialized\n", qid);
897 rx_ring->lro.ifp = adapter->ifp;
905 bus_dmamap_destroy(adapter->rx_buf_tag,
906 rx_ring->rx_buffer_info[i].map);
909 free(rx_ring->free_rx_ids, M_DEVBUF);
910 rx_ring->free_rx_ids = NULL;
911 free(rx_ring->rx_buffer_info, M_DEVBUF);
912 rx_ring->rx_buffer_info = NULL;
917 * ena_free_rx_resources - Free Rx Resources
918 * @adapter: network interface device structure
921 * Free all receive software resources
924 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
926 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
928 /* Free buffer DMA maps, */
929 for (int i = 0; i < rx_ring->ring_size; i++) {
930 bus_dmamap_sync(adapter->rx_buf_tag,
931 rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
932 m_freem(rx_ring->rx_buffer_info[i].mbuf);
933 rx_ring->rx_buffer_info[i].mbuf = NULL;
934 bus_dmamap_unload(adapter->rx_buf_tag,
935 rx_ring->rx_buffer_info[i].map);
936 bus_dmamap_destroy(adapter->rx_buf_tag,
937 rx_ring->rx_buffer_info[i].map);
940 /* free LRO resources, */
941 tcp_lro_free(&rx_ring->lro);
943 /* free allocated memory */
944 free(rx_ring->rx_buffer_info, M_DEVBUF);
945 rx_ring->rx_buffer_info = NULL;
947 free(rx_ring->free_rx_ids, M_DEVBUF);
948 rx_ring->free_rx_ids = NULL;
952 * ena_setup_all_rx_resources - allocate all queues Rx resources
953 * @adapter: network interface device structure
955 * Returns 0 on success, otherwise on failure.
958 ena_setup_all_rx_resources(struct ena_adapter *adapter)
962 for (i = 0; i < adapter->num_io_queues; i++) {
963 rc = ena_setup_rx_resources(adapter, i);
965 device_printf(adapter->pdev,
966 "Allocation for Rx Queue %u failed\n", i);
973 /* rewind the index freeing the rings as we go */
975 ena_free_rx_resources(adapter, i);
980 * ena_free_all_rx_resources - Free Rx resources for all queues
981 * @adapter: network interface device structure
983 * Free all receive software resources
986 ena_free_all_rx_resources(struct ena_adapter *adapter)
990 for (i = 0; i < adapter->num_io_queues; i++)
991 ena_free_rx_resources(adapter, i);
995 ena_alloc_rx_mbuf(struct ena_adapter *adapter,
996 struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
998 struct ena_com_buf *ena_buf;
999 bus_dma_segment_t segs[1];
1003 /* if previous allocated frag is not used */
1004 if (unlikely(rx_info->mbuf != NULL))
1007 /* Get mbuf using UMA allocator */
1008 rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1009 rx_ring->rx_mbuf_sz);
1011 if (unlikely(rx_info->mbuf == NULL)) {
1012 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1013 rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1014 if (unlikely(rx_info->mbuf == NULL)) {
1015 counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1020 mlen = rx_ring->rx_mbuf_sz;
1022 /* Set mbuf length*/
1023 rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1025 /* Map packets for DMA */
1026 ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH,
1027 "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
1028 adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
1029 error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
1030 rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1031 if (unlikely((error != 0) || (nsegs != 1))) {
1032 ena_trace(NULL, ENA_WARNING, "failed to map mbuf, error: %d, "
1033 "nsegs: %d\n", error, nsegs);
1034 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1039 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
1041 ena_buf = &rx_info->ena_buf;
1042 ena_buf->paddr = segs[0].ds_addr;
1043 ena_buf->len = mlen;
1045 ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH,
1046 "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
1047 rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
1052 m_freem(rx_info->mbuf);
1053 rx_info->mbuf = NULL;
1058 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1059 struct ena_rx_buffer *rx_info)
1062 if (rx_info->mbuf == NULL) {
1063 ena_trace(NULL, ENA_WARNING, "Trying to free unallocated buffer\n");
1067 bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1068 BUS_DMASYNC_POSTREAD);
1069 bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1070 m_freem(rx_info->mbuf);
1071 rx_info->mbuf = NULL;
1075 * ena_refill_rx_bufs - Refills ring with descriptors
1076 * @rx_ring: the ring which we want to feed with free descriptors
1077 * @num: number of descriptors to refill
1078 * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1081 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1083 struct ena_adapter *adapter = rx_ring->adapter;
1084 uint16_t next_to_use, req_id;
1088 ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n",
1091 next_to_use = rx_ring->next_to_use;
1093 for (i = 0; i < num; i++) {
1094 struct ena_rx_buffer *rx_info;
1096 ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC,
1097 "RX buffer - next to use: %d\n", next_to_use);
1099 req_id = rx_ring->free_rx_ids[next_to_use];
1100 rx_info = &rx_ring->rx_buffer_info[req_id];
1102 if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1103 rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info);
1105 #endif /* DEV_NETMAP */
1106 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1107 if (unlikely(rc != 0)) {
1108 ena_trace(NULL, ENA_WARNING,
1109 "failed to alloc buffer for rx queue %d\n",
1113 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1114 &rx_info->ena_buf, req_id);
1115 if (unlikely(rc != 0)) {
1116 ena_trace(NULL, ENA_WARNING,
1117 "failed to add buffer for rx queue %d\n",
1121 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1122 rx_ring->ring_size);
1125 if (unlikely(i < num)) {
1126 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1127 ena_trace(NULL, ENA_WARNING,
1128 "refilled rx qid %d with only %d mbufs (from %d)\n",
1129 rx_ring->qid, i, num);
1133 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1135 rx_ring->next_to_use = next_to_use;
1140 ena_update_buf_ring_size(struct ena_adapter *adapter,
1141 uint32_t new_buf_ring_size)
1143 uint32_t old_buf_ring_size;
1147 ENA_LOCK_LOCK(adapter);
1149 old_buf_ring_size = adapter->buf_ring_size;
1150 adapter->buf_ring_size = new_buf_ring_size;
1152 dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1155 /* Reconfigure buf ring for all Tx rings. */
1156 ena_free_all_io_rings_resources(adapter);
1157 ena_init_io_rings_advanced(adapter);
1160 * If ena_up() fails, it's not because of recent buf_ring size
1161 * changes. Because of that, we just want to revert old drbr
1162 * value and trigger the reset because something else had to
1165 rc = ena_up(adapter);
1166 if (unlikely(rc != 0)) {
1167 device_printf(adapter->pdev,
1168 "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
1169 new_buf_ring_size, old_buf_ring_size);
1171 /* Revert old size and trigger the reset */
1172 adapter->buf_ring_size = old_buf_ring_size;
1173 ena_free_all_io_rings_resources(adapter);
1174 ena_init_io_rings_advanced(adapter);
1176 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
1178 ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
1183 ENA_LOCK_UNLOCK(adapter);
1189 ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
1190 uint32_t new_rx_size)
1192 uint32_t old_tx_size, old_rx_size;
1196 ENA_LOCK_LOCK(adapter);
1198 old_tx_size = adapter->requested_tx_ring_size;
1199 old_rx_size = adapter->requested_rx_ring_size;
1200 adapter->requested_tx_ring_size = new_tx_size;
1201 adapter->requested_rx_ring_size = new_rx_size;
1203 dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1206 /* Configure queues with new size. */
1207 ena_init_io_rings_basic(adapter);
1209 rc = ena_up(adapter);
1210 if (unlikely(rc != 0)) {
1211 device_printf(adapter->pdev,
1212 "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
1213 new_tx_size, new_rx_size, old_tx_size, old_rx_size);
1215 /* Revert old size. */
1216 adapter->requested_tx_ring_size = old_tx_size;
1217 adapter->requested_rx_ring_size = old_rx_size;
1218 ena_init_io_rings_basic(adapter);
1220 /* And try again. */
1221 rc = ena_up(adapter);
1222 if (unlikely(rc != 0)) {
1223 device_printf(adapter->pdev,
1224 "Failed to revert old queue sizes. Triggering device reset.\n");
1226 * If we've failed again, something had to go
1227 * wrong. After reset, the device should try to
1230 ENA_FLAG_SET_ATOMIC(
1231 ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1232 ena_trigger_reset(adapter,
1233 ENA_REGS_RESET_OS_TRIGGER);
1238 ENA_LOCK_UNLOCK(adapter);
1244 ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
1246 ena_free_all_io_rings_resources(adapter);
1247 /* Force indirection table to be reinitialized */
1248 ena_com_rss_destroy(adapter->ena_dev);
1250 adapter->num_io_queues = num;
1251 ena_init_io_rings(adapter);
1254 /* Caller should sanitize new_num */
1256 ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
1262 ENA_LOCK_LOCK(adapter);
1264 dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1265 old_num = adapter->num_io_queues;
1268 ena_update_io_rings(adapter, new_num);
1271 rc = ena_up(adapter);
1272 if (unlikely(rc != 0)) {
1273 device_printf(adapter->pdev,
1274 "Failed to configure device with %u IO queues. "
1275 "Reverting to previous value: %u\n",
1278 ena_update_io_rings(adapter, old_num);
1280 rc = ena_up(adapter);
1281 if (unlikely(rc != 0)) {
1282 device_printf(adapter->pdev,
1283 "Failed to revert to previous setup IO "
1284 "queues. Triggering device reset.\n");
1285 ENA_FLAG_SET_ATOMIC(
1286 ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1287 ena_trigger_reset(adapter,
1288 ENA_REGS_RESET_OS_TRIGGER);
1293 ENA_LOCK_UNLOCK(adapter);
1299 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1301 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1304 for (i = 0; i < rx_ring->ring_size; i++) {
1305 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1307 if (rx_info->mbuf != NULL)
1308 ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1310 if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
1311 (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1312 if (rx_info->netmap_buf_idx != 0)
1313 ena_netmap_free_rx_slot(adapter, rx_ring,
1316 #endif /* DEV_NETMAP */
1321 * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1322 * @adapter: network interface device structure
1326 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1328 struct ena_ring *rx_ring;
1329 int i, rc, bufs_num;
1331 for (i = 0; i < adapter->num_io_queues; i++) {
1332 rx_ring = &adapter->rx_ring[i];
1333 bufs_num = rx_ring->ring_size - 1;
1334 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1335 if (unlikely(rc != bufs_num))
1336 ena_trace(NULL, ENA_WARNING, "refilling Queue %d failed. "
1337 "Allocated %d buffers from: %d\n", i, rc, bufs_num);
1339 rx_ring->initialized = true;
1340 #endif /* DEV_NETMAP */
1345 ena_free_all_rx_bufs(struct ena_adapter *adapter)
1349 for (i = 0; i < adapter->num_io_queues; i++)
1350 ena_free_rx_bufs(adapter, i);
1354 * ena_free_tx_bufs - Free Tx Buffers per Queue
1355 * @adapter: network interface device structure
1359 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1361 bool print_once = true;
1362 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1364 ENA_RING_MTX_LOCK(tx_ring);
1365 for (int i = 0; i < tx_ring->ring_size; i++) {
1366 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1368 if (tx_info->mbuf == NULL)
1372 device_printf(adapter->pdev,
1373 "free uncompleted tx mbuf qid %d idx 0x%x\n",
1377 ena_trace(NULL, ENA_DBG,
1378 "free uncompleted tx mbuf qid %d idx 0x%x\n",
1382 bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1383 BUS_DMASYNC_POSTWRITE);
1384 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1386 m_free(tx_info->mbuf);
1387 tx_info->mbuf = NULL;
1389 ENA_RING_MTX_UNLOCK(tx_ring);
1393 ena_free_all_tx_bufs(struct ena_adapter *adapter)
1396 for (int i = 0; i < adapter->num_io_queues; i++)
1397 ena_free_tx_bufs(adapter, i);
1401 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1406 for (i = 0; i < adapter->num_io_queues; i++) {
1407 ena_qid = ENA_IO_TXQ_IDX(i);
1408 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1413 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1418 for (i = 0; i < adapter->num_io_queues; i++) {
1419 ena_qid = ENA_IO_RXQ_IDX(i);
1420 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1425 ena_destroy_all_io_queues(struct ena_adapter *adapter)
1427 struct ena_que *queue;
1430 for (i = 0; i < adapter->num_io_queues; i++) {
1431 queue = &adapter->que[i];
1432 while (taskqueue_cancel(queue->cleanup_tq,
1433 &queue->cleanup_task, NULL))
1434 taskqueue_drain(queue->cleanup_tq,
1435 &queue->cleanup_task);
1436 taskqueue_free(queue->cleanup_tq);
1439 ena_destroy_all_tx_queues(adapter);
1440 ena_destroy_all_rx_queues(adapter);
1444 ena_create_io_queues(struct ena_adapter *adapter)
1446 struct ena_com_dev *ena_dev = adapter->ena_dev;
1447 struct ena_com_create_io_ctx ctx;
1448 struct ena_ring *ring;
1449 struct ena_que *queue;
1451 uint32_t msix_vector;
1454 /* Create TX queues */
1455 for (i = 0; i < adapter->num_io_queues; i++) {
1456 msix_vector = ENA_IO_IRQ_IDX(i);
1457 ena_qid = ENA_IO_TXQ_IDX(i);
1458 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1459 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1460 ctx.queue_size = adapter->requested_tx_ring_size;
1461 ctx.msix_vector = msix_vector;
1463 rc = ena_com_create_io_queue(ena_dev, &ctx);
1465 device_printf(adapter->pdev,
1466 "Failed to create io TX queue #%d rc: %d\n", i, rc);
1469 ring = &adapter->tx_ring[i];
1470 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1471 &ring->ena_com_io_sq,
1472 &ring->ena_com_io_cq);
1474 device_printf(adapter->pdev,
1475 "Failed to get TX queue handlers. TX queue num"
1476 " %d rc: %d\n", i, rc);
1477 ena_com_destroy_io_queue(ena_dev, ena_qid);
1482 /* Create RX queues */
1483 for (i = 0; i < adapter->num_io_queues; i++) {
1484 msix_vector = ENA_IO_IRQ_IDX(i);
1485 ena_qid = ENA_IO_RXQ_IDX(i);
1486 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1487 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1488 ctx.queue_size = adapter->requested_rx_ring_size;
1489 ctx.msix_vector = msix_vector;
1491 rc = ena_com_create_io_queue(ena_dev, &ctx);
1492 if (unlikely(rc != 0)) {
1493 device_printf(adapter->pdev,
1494 "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1498 ring = &adapter->rx_ring[i];
1499 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1500 &ring->ena_com_io_sq,
1501 &ring->ena_com_io_cq);
1502 if (unlikely(rc != 0)) {
1503 device_printf(adapter->pdev,
1504 "Failed to get RX queue handlers. RX queue num"
1505 " %d rc: %d\n", i, rc);
1506 ena_com_destroy_io_queue(ena_dev, ena_qid);
1511 for (i = 0; i < adapter->num_io_queues; i++) {
1512 queue = &adapter->que[i];
1514 NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
1515 queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
1516 M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
1518 taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET,
1519 "%s queue %d cleanup",
1520 device_get_nameunit(adapter->pdev), i);
1527 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1528 i = adapter->num_io_queues;
1531 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1536 /*********************************************************************
1538 * MSIX & Interrupt Service routine
1540 **********************************************************************/
1543 * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1544 * @arg: interrupt number
1547 ena_intr_msix_mgmnt(void *arg)
1549 struct ena_adapter *adapter = (struct ena_adapter *)arg;
1551 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1552 if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
1553 ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1557 * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1561 ena_handle_msix(void *arg)
1563 struct ena_que *queue = arg;
1564 struct ena_adapter *adapter = queue->adapter;
1565 if_t ifp = adapter->ifp;
1567 if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1568 return (FILTER_STRAY);
1570 taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
1572 return (FILTER_HANDLED);
1576 ena_enable_msix(struct ena_adapter *adapter)
1578 device_t dev = adapter->pdev;
1579 int msix_vecs, msix_req;
1582 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1583 device_printf(dev, "Error, MSI-X is already enabled\n");
1587 /* Reserved the max msix vectors we might need */
1588 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1590 adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1591 M_DEVBUF, M_WAITOK | M_ZERO);
1593 ena_trace(NULL, ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
1595 for (i = 0; i < msix_vecs; i++) {
1596 adapter->msix_entries[i].entry = i;
1597 /* Vectors must start from 1 */
1598 adapter->msix_entries[i].vector = i + 1;
1601 msix_req = msix_vecs;
1602 rc = pci_alloc_msix(dev, &msix_vecs);
1603 if (unlikely(rc != 0)) {
1605 "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
1611 if (msix_vecs != msix_req) {
1612 if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
1614 "Not enough number of MSI-x allocated: %d\n",
1616 pci_release_msi(dev);
1620 device_printf(dev, "Enable only %d MSI-x (out of %d), reduce "
1621 "the number of queues\n", msix_vecs, msix_req);
1624 adapter->msix_vecs = msix_vecs;
1625 ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1630 free(adapter->msix_entries, M_DEVBUF);
1631 adapter->msix_entries = NULL;
1637 ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1640 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1641 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1642 device_get_nameunit(adapter->pdev));
1644 * Handler is NULL on purpose, it will be set
1645 * when mgmnt interrupt is acquired
1647 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1648 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1649 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1650 adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1654 ena_setup_io_intr(struct ena_adapter *adapter)
1656 static int last_bind_cpu = -1;
1659 if (adapter->msix_entries == NULL)
1662 for (int i = 0; i < adapter->num_io_queues; i++) {
1663 irq_idx = ENA_IO_IRQ_IDX(i);
1665 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1666 "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1667 adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1668 adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1669 adapter->irq_tbl[irq_idx].vector =
1670 adapter->msix_entries[irq_idx].vector;
1671 ena_trace(NULL, ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n",
1672 adapter->msix_entries[irq_idx].vector);
1675 * We want to bind rings to the corresponding cpu
1676 * using something similar to the RSS round-robin technique.
1678 if (unlikely(last_bind_cpu < 0))
1679 last_bind_cpu = CPU_FIRST();
1680 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1682 last_bind_cpu = CPU_NEXT(last_bind_cpu);
1689 ena_request_mgmnt_irq(struct ena_adapter *adapter)
1691 struct ena_irq *irq;
1692 unsigned long flags;
1695 flags = RF_ACTIVE | RF_SHAREABLE;
1697 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1698 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1699 &irq->vector, flags);
1701 if (unlikely(irq->res == NULL)) {
1702 device_printf(adapter->pdev, "could not allocate "
1703 "irq vector: %d\n", irq->vector);
1707 rc = bus_setup_intr(adapter->pdev, irq->res,
1708 INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt,
1709 irq->data, &irq->cookie);
1710 if (unlikely(rc != 0)) {
1711 device_printf(adapter->pdev, "failed to register "
1712 "interrupt handler for irq %ju: %d\n",
1713 rman_get_start(irq->res), rc);
1716 irq->requested = true;
1721 ena_trace(NULL, ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n",
1723 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1724 irq->vector, irq->res);
1725 if (unlikely(rcc != 0))
1726 device_printf(adapter->pdev, "dev has no parent while "
1727 "releasing res for irq: %d\n", irq->vector);
1734 ena_request_io_irq(struct ena_adapter *adapter)
1736 struct ena_irq *irq;
1737 unsigned long flags = 0;
1740 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
1741 device_printf(adapter->pdev,
1742 "failed to request I/O IRQ: MSI-X is not enabled\n");
1745 flags = RF_ACTIVE | RF_SHAREABLE;
1748 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1749 irq = &adapter->irq_tbl[i];
1751 if (unlikely(irq->requested))
1754 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1755 &irq->vector, flags);
1756 if (unlikely(irq->res == NULL)) {
1758 device_printf(adapter->pdev, "could not allocate "
1759 "irq vector: %d\n", irq->vector);
1763 rc = bus_setup_intr(adapter->pdev, irq->res,
1764 INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL,
1765 irq->data, &irq->cookie);
1766 if (unlikely(rc != 0)) {
1767 device_printf(adapter->pdev, "failed to register "
1768 "interrupt handler for irq %ju: %d\n",
1769 rman_get_start(irq->res), rc);
1772 irq->requested = true;
1774 ena_trace(NULL, ENA_INFO, "queue %d - cpu %d\n",
1775 i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1782 for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1783 irq = &adapter->irq_tbl[i];
1786 /* Once we entered err: section and irq->requested is true we
1787 free both intr and resources */
1789 rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1790 if (unlikely(rcc != 0))
1791 device_printf(adapter->pdev, "could not release"
1792 " irq: %d, error: %d\n", irq->vector, rcc);
1794 /* If we entred err: section without irq->requested set we know
1795 it was bus_alloc_resource_any() that needs cleanup, provided
1796 res is not NULL. In case res is NULL no work in needed in
1799 if (irq->res != NULL) {
1800 rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1801 irq->vector, irq->res);
1803 if (unlikely(rcc != 0))
1804 device_printf(adapter->pdev, "dev has no parent while "
1805 "releasing res for irq: %d\n", irq->vector);
1806 irq->requested = false;
1814 ena_free_mgmnt_irq(struct ena_adapter *adapter)
1816 struct ena_irq *irq;
1819 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1820 if (irq->requested) {
1821 ena_trace(NULL, ENA_INFO | ENA_ADMQ, "tear down irq: %d\n",
1823 rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1824 if (unlikely(rc != 0))
1825 device_printf(adapter->pdev, "failed to tear "
1826 "down irq: %d\n", irq->vector);
1830 if (irq->res != NULL) {
1831 ena_trace(NULL, ENA_INFO | ENA_ADMQ, "release resource irq: %d\n",
1833 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1834 irq->vector, irq->res);
1836 if (unlikely(rc != 0))
1837 device_printf(adapter->pdev, "dev has no parent while "
1838 "releasing res for irq: %d\n", irq->vector);
1843 ena_free_io_irq(struct ena_adapter *adapter)
1845 struct ena_irq *irq;
1848 for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1849 irq = &adapter->irq_tbl[i];
1850 if (irq->requested) {
1851 ena_trace(NULL, ENA_INFO | ENA_IOQ, "tear down irq: %d\n",
1853 rc = bus_teardown_intr(adapter->pdev, irq->res,
1855 if (unlikely(rc != 0)) {
1856 device_printf(adapter->pdev, "failed to tear "
1857 "down irq: %d\n", irq->vector);
1862 if (irq->res != NULL) {
1863 ena_trace(NULL, ENA_INFO | ENA_IOQ, "release resource irq: %d\n",
1865 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1866 irq->vector, irq->res);
1868 if (unlikely(rc != 0)) {
1869 device_printf(adapter->pdev, "dev has no parent"
1870 " while releasing res for irq: %d\n",
1878 ena_free_irqs(struct ena_adapter* adapter)
1881 ena_free_io_irq(adapter);
1882 ena_free_mgmnt_irq(adapter);
1883 ena_disable_msix(adapter);
1887 ena_disable_msix(struct ena_adapter *adapter)
1890 if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1891 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1892 pci_release_msi(adapter->pdev);
1895 adapter->msix_vecs = 0;
1896 if (adapter->msix_entries != NULL)
1897 free(adapter->msix_entries, M_DEVBUF);
1898 adapter->msix_entries = NULL;
1902 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
1904 struct ena_com_io_cq* io_cq;
1905 struct ena_eth_io_intr_reg intr_reg;
1909 /* Unmask interrupts for all queues */
1910 for (i = 0; i < adapter->num_io_queues; i++) {
1911 ena_qid = ENA_IO_TXQ_IDX(i);
1912 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1913 ena_com_update_intr_reg(&intr_reg, 0, 0, true);
1914 ena_com_unmask_intr(io_cq, &intr_reg);
1918 /* Configure the Rx forwarding */
1920 ena_rss_configure(struct ena_adapter *adapter)
1922 struct ena_com_dev *ena_dev = adapter->ena_dev;
1925 /* In case the RSS table was destroyed */
1926 if (!ena_dev->rss.tbl_log_size) {
1927 rc = ena_rss_init_default(adapter);
1928 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
1929 device_printf(adapter->pdev,
1930 "WARNING: RSS was not properly re-initialized,"
1931 " it will affect bandwidth\n");
1932 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
1937 /* Set indirect table */
1938 rc = ena_com_indirect_table_set(ena_dev);
1939 if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1942 /* Configure hash function (if supported) */
1943 rc = ena_com_set_hash_function(ena_dev);
1944 if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1947 /* Configure hash inputs (if supported) */
1948 rc = ena_com_set_hash_ctrl(ena_dev);
1949 if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1956 ena_up_complete(struct ena_adapter *adapter)
1960 if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
1961 rc = ena_rss_configure(adapter);
1963 device_printf(adapter->pdev,
1964 "Failed to configure RSS\n");
1969 rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
1970 if (unlikely(rc != 0))
1973 ena_refill_all_rx_bufs(adapter);
1974 ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
1975 sizeof(adapter->hw_stats));
1981 set_io_rings_size(struct ena_adapter *adapter, int new_tx_size,
1986 for (i = 0; i < adapter->num_io_queues; i++) {
1987 adapter->tx_ring[i].ring_size = new_tx_size;
1988 adapter->rx_ring[i].ring_size = new_rx_size;
1993 create_queues_with_size_backoff(struct ena_adapter *adapter)
1996 uint32_t cur_rx_ring_size, cur_tx_ring_size;
1997 uint32_t new_rx_ring_size, new_tx_ring_size;
2000 * Current queue sizes might be set to smaller than the requested
2001 * ones due to past queue allocation failures.
2003 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2004 adapter->requested_rx_ring_size);
2007 /* Allocate transmit descriptors */
2008 rc = ena_setup_all_tx_resources(adapter);
2009 if (unlikely(rc != 0)) {
2010 ena_trace(NULL, ENA_ALERT, "err_setup_tx\n");
2014 /* Allocate receive descriptors */
2015 rc = ena_setup_all_rx_resources(adapter);
2016 if (unlikely(rc != 0)) {
2017 ena_trace(NULL, ENA_ALERT, "err_setup_rx\n");
2021 /* Create IO queues for Rx & Tx */
2022 rc = ena_create_io_queues(adapter);
2023 if (unlikely(rc != 0)) {
2024 ena_trace(NULL, ENA_ALERT,
2025 "create IO queues failed\n");
2032 ena_free_all_rx_resources(adapter);
2034 ena_free_all_tx_resources(adapter);
2037 * Lower the ring size if ENOMEM. Otherwise, return the
2038 * error straightaway.
2040 if (unlikely(rc != ENOMEM)) {
2041 ena_trace(NULL, ENA_ALERT,
2042 "Queue creation failed with error code: %d\n", rc);
2046 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2047 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2049 device_printf(adapter->pdev,
2050 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2051 cur_tx_ring_size, cur_rx_ring_size);
2053 new_tx_ring_size = cur_tx_ring_size;
2054 new_rx_ring_size = cur_rx_ring_size;
2057 * Decrease the size of a larger queue, or decrease both if they are
2060 if (cur_rx_ring_size <= cur_tx_ring_size)
2061 new_tx_ring_size = cur_tx_ring_size / 2;
2062 if (cur_rx_ring_size >= cur_tx_ring_size)
2063 new_rx_ring_size = cur_rx_ring_size / 2;
2065 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2066 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2067 device_printf(adapter->pdev,
2068 "Queue creation failed with the smallest possible queue size"
2069 "of %d for both queues. Not retrying with smaller queues\n",
2074 set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
2079 ena_up(struct ena_adapter *adapter)
2083 if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2084 device_printf(adapter->pdev, "device is not attached!\n");
2088 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2091 device_printf(adapter->pdev, "device is going UP\n");
2093 /* setup interrupts for IO queues */
2094 rc = ena_setup_io_intr(adapter);
2095 if (unlikely(rc != 0)) {
2096 ena_trace(NULL, ENA_ALERT, "error setting up IO interrupt\n");
2099 rc = ena_request_io_irq(adapter);
2100 if (unlikely(rc != 0)) {
2101 ena_trace(NULL, ENA_ALERT, "err_req_irq\n");
2105 device_printf(adapter->pdev,
2106 "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, "
2108 adapter->num_io_queues,
2109 adapter->requested_rx_ring_size,
2110 adapter->requested_tx_ring_size,
2111 (adapter->ena_dev->tx_mem_queue_type ==
2112 ENA_ADMIN_PLACEMENT_POLICY_DEV) ? "ENABLED" : "DISABLED");
2114 rc = create_queues_with_size_backoff(adapter);
2115 if (unlikely(rc != 0)) {
2116 ena_trace(NULL, ENA_ALERT,
2117 "error creating queues with size backoff\n");
2118 goto err_create_queues_with_backoff;
2121 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
2122 if_link_state_change(adapter->ifp, LINK_STATE_UP);
2124 rc = ena_up_complete(adapter);
2125 if (unlikely(rc != 0))
2126 goto err_up_complete;
2128 counter_u64_add(adapter->dev_stats.interface_up, 1);
2130 ena_update_hwassist(adapter);
2132 if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING,
2135 /* Activate timer service only if the device is running.
2136 * If this flag is not set, it means that the driver is being
2137 * reset and timer service will be activated afterwards.
2139 if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) {
2140 callout_reset_sbt(&adapter->timer_service, SBT_1S,
2141 SBT_1S, ena_timer_service, (void *)adapter, 0);
2144 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2146 ena_unmask_all_io_irqs(adapter);
2151 ena_destroy_all_io_queues(adapter);
2152 ena_free_all_rx_resources(adapter);
2153 ena_free_all_tx_resources(adapter);
2154 err_create_queues_with_backoff:
2155 ena_free_io_irq(adapter);
2161 ena_get_counter(if_t ifp, ift_counter cnt)
2163 struct ena_adapter *adapter;
2164 struct ena_hw_stats *stats;
2166 adapter = if_getsoftc(ifp);
2167 stats = &adapter->hw_stats;
2170 case IFCOUNTER_IPACKETS:
2171 return (counter_u64_fetch(stats->rx_packets));
2172 case IFCOUNTER_OPACKETS:
2173 return (counter_u64_fetch(stats->tx_packets));
2174 case IFCOUNTER_IBYTES:
2175 return (counter_u64_fetch(stats->rx_bytes));
2176 case IFCOUNTER_OBYTES:
2177 return (counter_u64_fetch(stats->tx_bytes));
2178 case IFCOUNTER_IQDROPS:
2179 return (counter_u64_fetch(stats->rx_drops));
2180 case IFCOUNTER_OQDROPS:
2181 return (counter_u64_fetch(stats->tx_drops));
2183 return (if_get_counter_default(ifp, cnt));
2188 ena_media_change(if_t ifp)
2190 /* Media Change is not supported by firmware */
2195 ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2197 struct ena_adapter *adapter = if_getsoftc(ifp);
2198 ena_trace(NULL, ENA_DBG, "enter\n");
2200 ENA_LOCK_LOCK(adapter);
2202 ifmr->ifm_status = IFM_AVALID;
2203 ifmr->ifm_active = IFM_ETHER;
2205 if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
2206 ENA_LOCK_UNLOCK(adapter);
2207 ena_trace(NULL, ENA_INFO, "Link is down\n");
2211 ifmr->ifm_status |= IFM_ACTIVE;
2212 ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
2214 ENA_LOCK_UNLOCK(adapter);
2220 struct ena_adapter *adapter = (struct ena_adapter *)arg;
2222 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
2223 ENA_LOCK_LOCK(adapter);
2225 ENA_LOCK_UNLOCK(adapter);
2230 ena_ioctl(if_t ifp, u_long command, caddr_t data)
2232 struct ena_adapter *adapter;
2236 adapter = ifp->if_softc;
2237 ifr = (struct ifreq *)data;
2240 * Acquiring lock to prevent from running up and down routines parallel.
2245 if (ifp->if_mtu == ifr->ifr_mtu)
2247 ENA_LOCK_LOCK(adapter);
2250 ena_change_mtu(ifp, ifr->ifr_mtu);
2252 rc = ena_up(adapter);
2253 ENA_LOCK_UNLOCK(adapter);
2257 if ((ifp->if_flags & IFF_UP) != 0) {
2258 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2259 if ((ifp->if_flags & (IFF_PROMISC |
2260 IFF_ALLMULTI)) != 0) {
2261 device_printf(adapter->pdev,
2262 "ioctl promisc/allmulti\n");
2265 ENA_LOCK_LOCK(adapter);
2266 rc = ena_up(adapter);
2267 ENA_LOCK_UNLOCK(adapter);
2270 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2271 ENA_LOCK_LOCK(adapter);
2273 ENA_LOCK_UNLOCK(adapter);
2284 rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2291 if (ifr->ifr_reqcap != ifp->if_capenable) {
2292 ifp->if_capenable = ifr->ifr_reqcap;
2296 if ((reinit != 0) &&
2297 ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
2298 ENA_LOCK_LOCK(adapter);
2300 rc = ena_up(adapter);
2301 ENA_LOCK_UNLOCK(adapter);
2307 rc = ether_ioctl(ifp, command, data);
2315 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2319 if ((feat->offload.tx &
2320 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2321 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2322 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2323 caps |= IFCAP_TXCSUM;
2325 if ((feat->offload.tx &
2326 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2327 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2328 caps |= IFCAP_TXCSUM_IPV6;
2330 if ((feat->offload.tx &
2331 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2334 if ((feat->offload.tx &
2335 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2338 if ((feat->offload.rx_supported &
2339 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2340 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2341 caps |= IFCAP_RXCSUM;
2343 if ((feat->offload.rx_supported &
2344 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2345 caps |= IFCAP_RXCSUM_IPV6;
2347 caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2353 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2356 host_info->supported_network_features[0] =
2357 (uint32_t)if_getcapabilities(ifp);
2361 ena_update_hwassist(struct ena_adapter *adapter)
2363 if_t ifp = adapter->ifp;
2364 uint32_t feat = adapter->tx_offload_cap;
2365 int cap = if_getcapenable(ifp);
2368 if_clearhwassist(ifp);
2370 if ((cap & IFCAP_TXCSUM) != 0) {
2372 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2375 (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2376 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2377 flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2380 if ((cap & IFCAP_TXCSUM_IPV6) != 0)
2381 flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2383 if ((cap & IFCAP_TSO4) != 0)
2384 flags |= CSUM_IP_TSO;
2386 if ((cap & IFCAP_TSO6) != 0)
2387 flags |= CSUM_IP6_TSO;
2389 if_sethwassistbits(ifp, flags, 0);
2393 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2394 struct ena_com_dev_get_features_ctx *feat)
2399 ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2400 if (unlikely(ifp == NULL)) {
2401 ena_trace(NULL, ENA_ALERT, "can not allocate ifnet structure\n");
2404 if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2405 if_setdev(ifp, pdev);
2406 if_setsoftc(ifp, adapter);
2408 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
2410 if_setinitfn(ifp, ena_init);
2411 if_settransmitfn(ifp, ena_mq_start);
2412 if_setqflushfn(ifp, ena_qflush);
2413 if_setioctlfn(ifp, ena_ioctl);
2414 if_setgetcounterfn(ifp, ena_get_counter);
2416 if_setsendqlen(ifp, adapter->requested_tx_ring_size);
2417 if_setsendqready(ifp);
2418 if_setmtu(ifp, ETHERMTU);
2419 if_setbaudrate(ifp, 0);
2420 /* Zeroize capabilities... */
2421 if_setcapabilities(ifp, 0);
2422 if_setcapenable(ifp, 0);
2423 /* check hardware support */
2424 caps = ena_get_dev_offloads(feat);
2425 /* ... and set them */
2426 if_setcapabilitiesbit(ifp, caps, 0);
2428 /* TSO parameters */
2429 ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
2430 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2431 ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
2432 ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
2434 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2435 if_setcapenable(ifp, if_getcapabilities(ifp));
2438 * Specify the media types supported by this adapter and register
2439 * callbacks to update media and link information
2441 ifmedia_init(&adapter->media, IFM_IMASK,
2442 ena_media_change, ena_media_status);
2443 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2444 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2446 ether_ifattach(ifp, adapter->mac_addr);
2452 ena_down(struct ena_adapter *adapter)
2456 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2459 device_printf(adapter->pdev, "device is going DOWN\n");
2461 callout_drain(&adapter->timer_service);
2463 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2464 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE,
2467 ena_free_io_irq(adapter);
2469 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
2470 rc = ena_com_dev_reset(adapter->ena_dev,
2471 adapter->reset_reason);
2472 if (unlikely(rc != 0))
2473 device_printf(adapter->pdev,
2474 "Device reset failed\n");
2477 ena_destroy_all_io_queues(adapter);
2479 ena_free_all_tx_bufs(adapter);
2480 ena_free_all_rx_bufs(adapter);
2481 ena_free_all_tx_resources(adapter);
2482 ena_free_all_rx_resources(adapter);
2484 counter_u64_add(adapter->dev_stats.interface_down, 1);
2488 ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
2489 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2491 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
2493 /* Regular queues capabilities */
2494 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2495 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2496 &get_feat_ctx->max_queue_ext.max_queue_ext;
2497 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
2498 max_queue_ext->max_rx_cq_num);
2500 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2501 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2503 struct ena_admin_queue_feature_desc *max_queues =
2504 &get_feat_ctx->max_queues;
2505 io_tx_sq_num = max_queues->max_sq_num;
2506 io_tx_cq_num = max_queues->max_cq_num;
2507 io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
2510 /* In case of LLQ use the llq fields for the tx SQ/CQ */
2511 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2512 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2514 max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
2515 max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
2516 max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
2517 max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
2518 /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
2519 max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2520 pci_msix_count(pdev) - 1);
2522 return (max_num_io_queues);
2526 ena_enable_wc(struct resource *res)
2528 #if defined(__i386) || defined(__amd64) || defined(__aarch64__)
2533 va = (vm_offset_t)rman_get_virtual(res);
2534 len = rman_get_size(res);
2535 /* Enable write combining */
2536 rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
2537 if (unlikely(rc != 0)) {
2538 ena_trace(NULL, ENA_ALERT, "pmap_change_attr failed, %d\n", rc);
2544 return (EOPNOTSUPP);
2548 ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
2549 struct ena_admin_feature_llq_desc *llq,
2550 struct ena_llq_configurations *llq_default_configurations)
2552 struct ena_adapter *adapter = device_get_softc(pdev);
2554 uint32_t llq_feature_mask;
2556 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
2557 if (!(ena_dev->supported_features & llq_feature_mask)) {
2559 "LLQ is not supported. Fallback to host mode policy.\n");
2560 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2564 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
2565 if (unlikely(rc != 0)) {
2566 device_printf(pdev, "Failed to configure the device mode. "
2567 "Fallback to host mode policy.\n");
2568 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2572 /* Nothing to config, exit */
2573 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
2576 /* Try to allocate resources for LLQ bar */
2577 rid = PCIR_BAR(ENA_MEM_BAR);
2578 adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
2580 if (unlikely(adapter->memory == NULL)) {
2581 device_printf(pdev, "unable to allocate LLQ bar resource. "
2582 "Fallback to host mode policy.\n");
2583 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2587 /* Enable write combining for better LLQ performance */
2588 rc = ena_enable_wc(adapter->memory);
2589 if (unlikely(rc != 0)) {
2590 device_printf(pdev, "failed to enable write combining.\n");
2595 * Save virtual address of the device's memory region
2596 * for the ena_com layer.
2598 ena_dev->mem_bar = rman_get_virtual(adapter->memory);
2604 void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
2606 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
2607 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2608 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
2609 llq_config->llq_num_decs_before_header =
2610 ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
2611 llq_config->llq_ring_entry_size_value = 128;
2615 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
2617 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
2618 struct ena_com_dev *ena_dev = ctx->ena_dev;
2619 uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
2620 uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
2621 uint32_t max_tx_queue_size;
2622 uint32_t max_rx_queue_size;
2624 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2625 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2626 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
2627 max_rx_queue_size = min_t(uint32_t,
2628 max_queue_ext->max_rx_cq_depth,
2629 max_queue_ext->max_rx_sq_depth);
2630 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2632 if (ena_dev->tx_mem_queue_type ==
2633 ENA_ADMIN_PLACEMENT_POLICY_DEV)
2634 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2635 llq->max_llq_depth);
2637 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2638 max_queue_ext->max_tx_sq_depth);
2640 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2641 max_queue_ext->max_per_packet_tx_descs);
2642 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2643 max_queue_ext->max_per_packet_rx_descs);
2645 struct ena_admin_queue_feature_desc *max_queues =
2646 &ctx->get_feat_ctx->max_queues;
2647 max_rx_queue_size = min_t(uint32_t,
2648 max_queues->max_cq_depth,
2649 max_queues->max_sq_depth);
2650 max_tx_queue_size = max_queues->max_cq_depth;
2652 if (ena_dev->tx_mem_queue_type ==
2653 ENA_ADMIN_PLACEMENT_POLICY_DEV)
2654 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2655 llq->max_llq_depth);
2657 max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2658 max_queues->max_sq_depth);
2660 ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2661 max_queues->max_packet_tx_descs);
2662 ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2663 max_queues->max_packet_rx_descs);
2666 /* round down to the nearest power of 2 */
2667 max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2668 max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2670 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2672 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2675 tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2676 rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2678 ctx->max_tx_queue_size = max_tx_queue_size;
2679 ctx->max_rx_queue_size = max_rx_queue_size;
2680 ctx->tx_queue_size = tx_queue_size;
2681 ctx->rx_queue_size = rx_queue_size;
2687 ena_rss_init_default(struct ena_adapter *adapter)
2689 struct ena_com_dev *ena_dev = adapter->ena_dev;
2690 device_t dev = adapter->pdev;
2693 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
2694 if (unlikely(rc != 0)) {
2695 device_printf(dev, "Cannot init indirect table\n");
2699 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
2700 qid = i % adapter->num_io_queues;
2701 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
2702 ENA_IO_RXQ_IDX(qid));
2703 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2704 device_printf(dev, "Cannot fill indirect table\n");
2705 goto err_rss_destroy;
2710 uint8_t rss_algo = rss_gethashalgo();
2711 if (rss_algo == RSS_HASH_TOEPLITZ) {
2712 uint8_t hash_key[RSS_KEYSIZE];
2714 rss_getkey(hash_key);
2715 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ,
2716 hash_key, RSS_KEYSIZE, 0xFFFFFFFF);
2719 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
2720 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
2721 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2722 device_printf(dev, "Cannot fill hash function\n");
2723 goto err_rss_destroy;
2726 rc = ena_com_set_default_hash_ctrl(ena_dev);
2727 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2728 device_printf(dev, "Cannot fill hash control\n");
2729 goto err_rss_destroy;
2735 ena_com_rss_destroy(ena_dev);
2740 ena_rss_init_default_deferred(void *arg)
2742 struct ena_adapter *adapter;
2747 dc = devclass_find("ena");
2748 if (unlikely(dc == NULL)) {
2749 ena_trace(NULL, ENA_ALERT, "No devclass ena\n");
2753 max = devclass_get_maxunit(dc);
2754 while (max-- >= 0) {
2755 adapter = devclass_get_softc(dc, max);
2756 if (adapter != NULL) {
2757 rc = ena_rss_init_default(adapter);
2758 ENA_FLAG_SET_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2759 if (unlikely(rc != 0)) {
2760 device_printf(adapter->pdev,
2761 "WARNING: RSS was not properly initialized,"
2762 " it will affect bandwidth\n");
2763 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2768 SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL);
2771 ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
2773 struct ena_admin_host_info *host_info;
2777 /* Allocate only the host info */
2778 rc = ena_com_allocate_host_info(ena_dev);
2779 if (unlikely(rc != 0)) {
2780 ena_trace(NULL, ENA_ALERT, "Cannot allocate host info\n");
2784 host_info = ena_dev->host_attr.host_info;
2786 if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
2787 host_info->bdf = rid;
2788 host_info->os_type = ENA_ADMIN_OS_FREEBSD;
2789 host_info->kernel_ver = osreldate;
2791 sprintf(host_info->kernel_ver_str, "%d", osreldate);
2792 host_info->os_dist = 0;
2793 strncpy(host_info->os_dist_str, osrelease,
2794 sizeof(host_info->os_dist_str) - 1);
2796 host_info->driver_version =
2797 (DRV_MODULE_VER_MAJOR) |
2798 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2799 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2800 host_info->num_cpus = mp_ncpus;
2801 host_info->driver_supported_features =
2802 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
2804 rc = ena_com_set_host_attributes(ena_dev);
2805 if (unlikely(rc != 0)) {
2806 if (rc == EOPNOTSUPP)
2807 ena_trace(NULL, ENA_WARNING, "Cannot set host attributes\n");
2809 ena_trace(NULL, ENA_ALERT, "Cannot set host attributes\n");
2817 ena_com_delete_host_info(ena_dev);
2821 ena_device_init(struct ena_adapter *adapter, device_t pdev,
2822 struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
2824 struct ena_com_dev* ena_dev = adapter->ena_dev;
2825 bool readless_supported;
2826 uint32_t aenq_groups;
2830 rc = ena_com_mmio_reg_read_request_init(ena_dev);
2831 if (unlikely(rc != 0)) {
2832 device_printf(pdev, "failed to init mmio read less\n");
2837 * The PCIe configuration space revision id indicate if mmio reg
2840 readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
2841 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2843 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2844 if (unlikely(rc != 0)) {
2845 device_printf(pdev, "Can not reset device\n");
2846 goto err_mmio_read_less;
2849 rc = ena_com_validate_version(ena_dev);
2850 if (unlikely(rc != 0)) {
2851 device_printf(pdev, "device version is too low\n");
2852 goto err_mmio_read_less;
2855 dma_width = ena_com_get_dma_width(ena_dev);
2856 if (unlikely(dma_width < 0)) {
2857 device_printf(pdev, "Invalid dma width value %d", dma_width);
2859 goto err_mmio_read_less;
2861 adapter->dma_width = dma_width;
2863 /* ENA admin level init */
2864 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2865 if (unlikely(rc != 0)) {
2867 "Can not initialize ena admin queue with device\n");
2868 goto err_mmio_read_less;
2872 * To enable the msix interrupts the driver needs to know the number
2873 * of queues. So the driver uses polling mode to retrieve this
2876 ena_com_set_admin_polling_mode(ena_dev, true);
2878 ena_config_host_info(ena_dev, pdev);
2880 /* Get Device Attributes */
2881 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2882 if (unlikely(rc != 0)) {
2884 "Cannot get attribute for ena device rc: %d\n", rc);
2885 goto err_admin_init;
2888 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2889 BIT(ENA_ADMIN_FATAL_ERROR) |
2890 BIT(ENA_ADMIN_WARNING) |
2891 BIT(ENA_ADMIN_NOTIFICATION) |
2892 BIT(ENA_ADMIN_KEEP_ALIVE);
2894 aenq_groups &= get_feat_ctx->aenq.supported_groups;
2895 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2896 if (unlikely(rc != 0)) {
2897 device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
2898 goto err_admin_init;
2901 *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2906 ena_com_delete_host_info(ena_dev);
2907 ena_com_admin_destroy(ena_dev);
2909 ena_com_mmio_reg_read_request_destroy(ena_dev);
2914 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
2916 struct ena_com_dev *ena_dev = adapter->ena_dev;
2919 rc = ena_enable_msix(adapter);
2920 if (unlikely(rc != 0)) {
2921 device_printf(adapter->pdev, "Error with MSI-X enablement\n");
2925 ena_setup_mgmnt_intr(adapter);
2927 rc = ena_request_mgmnt_irq(adapter);
2928 if (unlikely(rc != 0)) {
2929 device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
2930 goto err_disable_msix;
2933 ena_com_set_admin_polling_mode(ena_dev, false);
2935 ena_com_admin_aenq_enable(ena_dev);
2940 ena_disable_msix(adapter);
2945 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
2946 static void ena_keep_alive_wd(void *adapter_data,
2947 struct ena_admin_aenq_entry *aenq_e)
2949 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
2950 struct ena_admin_aenq_keep_alive_desc *desc;
2955 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
2957 rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2958 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2959 counter_u64_zero(adapter->hw_stats.rx_drops);
2960 counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
2961 counter_u64_zero(adapter->hw_stats.tx_drops);
2962 counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
2964 stime = getsbinuptime();
2965 atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
2968 /* Check for keep alive expiration */
2969 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2971 sbintime_t timestamp, time;
2973 if (adapter->wd_active == 0)
2976 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2979 timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
2980 time = getsbinuptime() - timestamp;
2981 if (unlikely(time > adapter->keep_alive_timeout)) {
2982 device_printf(adapter->pdev,
2983 "Keep alive watchdog timeout.\n");
2984 counter_u64_add(adapter->dev_stats.wd_expired, 1);
2985 ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
2989 /* Check if admin queue is enabled */
2990 static void check_for_admin_com_state(struct ena_adapter *adapter)
2992 if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) ==
2994 device_printf(adapter->pdev,
2995 "ENA admin queue is not in running state!\n");
2996 counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
2997 ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
3002 check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3003 struct ena_ring *rx_ring)
3005 if (likely(rx_ring->first_interrupt))
3008 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3011 rx_ring->no_interrupt_event_cnt++;
3013 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3014 device_printf(adapter->pdev, "Potential MSIX issue on Rx side "
3015 "Queue = %d. Reset the device\n", rx_ring->qid);
3016 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3024 check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3025 struct ena_ring *tx_ring)
3027 struct bintime curtime, time;
3028 struct ena_tx_buffer *tx_buf;
3029 sbintime_t time_offset;
3030 uint32_t missed_tx = 0;
3033 getbinuptime(&curtime);
3035 for (i = 0; i < tx_ring->ring_size; i++) {
3036 tx_buf = &tx_ring->tx_buffer_info[i];
3038 if (bintime_isset(&tx_buf->timestamp) == 0)
3042 bintime_sub(&time, &tx_buf->timestamp);
3043 time_offset = bttosbt(time);
3045 if (unlikely(!tx_ring->first_interrupt &&
3046 time_offset > 2 * adapter->missing_tx_timeout)) {
3048 * If after graceful period interrupt is still not
3049 * received, we schedule a reset.
3051 device_printf(adapter->pdev,
3052 "Potential MSIX issue on Tx side Queue = %d. "
3053 "Reset the device\n", tx_ring->qid);
3054 ena_trigger_reset(adapter,
3055 ENA_REGS_RESET_MISS_INTERRUPT);
3059 /* Check again if packet is still waiting */
3060 if (unlikely(time_offset > adapter->missing_tx_timeout)) {
3062 if (!tx_buf->print_once)
3063 ena_trace(NULL, ENA_WARNING, "Found a Tx that wasn't "
3064 "completed on time, qid %d, index %d.\n",
3067 tx_buf->print_once = true;
3072 if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
3073 device_printf(adapter->pdev,
3074 "The number of lost tx completion is above the threshold "
3075 "(%d > %d). Reset the device\n",
3076 missed_tx, adapter->missing_tx_threshold);
3077 ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
3081 counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx);
3087 * Check for TX which were not completed on time.
3088 * Timeout is defined by "missing_tx_timeout".
3089 * Reset will be performed if number of incompleted
3090 * transactions exceeds "missing_tx_threshold".
3093 check_for_missing_completions(struct ena_adapter *adapter)
3095 struct ena_ring *tx_ring;
3096 struct ena_ring *rx_ring;
3099 /* Make sure the driver doesn't turn the device in other process */
3102 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3105 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3108 if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3111 budget = adapter->missing_tx_max_queues;
3113 for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
3114 tx_ring = &adapter->tx_ring[i];
3115 rx_ring = &adapter->rx_ring[i];
3117 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3118 if (unlikely(rc != 0))
3121 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3122 if (unlikely(rc != 0))
3132 adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
3135 /* trigger rx cleanup after 2 consecutive detections */
3136 #define EMPTY_RX_REFILL 2
3137 /* For the rare case where the device runs out of Rx descriptors and the
3138 * msix handler failed to refill new Rx descriptors (due to a lack of memory
3140 * This case will lead to a deadlock:
3141 * The device won't send interrupts since all the new Rx packets will be dropped
3142 * The msix handler won't allocate new Rx descriptors so the device won't be
3143 * able to send new packets.
3145 * When such a situation is detected - execute rx cleanup task in another thread
3148 check_for_empty_rx_ring(struct ena_adapter *adapter)
3150 struct ena_ring *rx_ring;
3151 int i, refill_required;
3153 if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3156 if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3159 for (i = 0; i < adapter->num_io_queues; i++) {
3160 rx_ring = &adapter->rx_ring[i];
3162 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3163 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3164 rx_ring->empty_rx_queue++;
3166 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3167 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3170 device_printf(adapter->pdev,
3171 "trigger refill for ring %d\n", i);
3173 taskqueue_enqueue(rx_ring->que->cleanup_tq,
3174 &rx_ring->que->cleanup_task);
3175 rx_ring->empty_rx_queue = 0;
3178 rx_ring->empty_rx_queue = 0;
3183 static void ena_update_hints(struct ena_adapter *adapter,
3184 struct ena_admin_ena_hw_hints *hints)
3186 struct ena_com_dev *ena_dev = adapter->ena_dev;
3188 if (hints->admin_completion_tx_timeout)
3189 ena_dev->admin_queue.completion_timeout =
3190 hints->admin_completion_tx_timeout * 1000;
3192 if (hints->mmio_read_timeout)
3193 /* convert to usec */
3194 ena_dev->mmio_read.reg_read_to =
3195 hints->mmio_read_timeout * 1000;
3197 if (hints->missed_tx_completion_count_threshold_to_reset)
3198 adapter->missing_tx_threshold =
3199 hints->missed_tx_completion_count_threshold_to_reset;
3201 if (hints->missing_tx_completion_timeout) {
3202 if (hints->missing_tx_completion_timeout ==
3203 ENA_HW_HINTS_NO_TIMEOUT)
3204 adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3206 adapter->missing_tx_timeout =
3207 SBT_1MS * hints->missing_tx_completion_timeout;
3210 if (hints->driver_watchdog_timeout) {
3211 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3212 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3214 adapter->keep_alive_timeout =
3215 SBT_1MS * hints->driver_watchdog_timeout;
3220 * ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
3221 * @adapter: ENA device adapter
3223 * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics
3224 * and other error codes on failure.
3226 * This function can possibly cause a race with other calls to the admin queue.
3227 * Because of that, the caller should either lock this function or make sure
3228 * that there is no race in the current context.
3231 ena_copy_eni_metrics(struct ena_adapter *adapter)
3233 static bool print_once = true;
3236 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
3239 if (rc == ENA_COM_UNSUPPORTED) {
3241 device_printf(adapter->pdev,
3242 "Retrieving ENI metrics is not supported.\n");
3245 ena_trace(NULL, ENA_DBG,
3246 "Retrieving ENI metrics is not supported.\n");
3249 device_printf(adapter->pdev,
3250 "Failed to get ENI metrics: %d\n", rc);
3258 ena_timer_service(void *data)
3260 struct ena_adapter *adapter = (struct ena_adapter *)data;
3261 struct ena_admin_host_info *host_info =
3262 adapter->ena_dev->host_attr.host_info;
3264 check_for_missing_keep_alive(adapter);
3266 check_for_admin_com_state(adapter);
3268 check_for_missing_completions(adapter);
3270 check_for_empty_rx_ring(adapter);
3273 * User controller update of the ENI metrics.
3274 * If the delay was set to 0, then the stats shouldn't be updated at
3276 * Otherwise, wait 'eni_metrics_sample_interval' seconds, before
3278 * As timer service is executed every second, it's enough to increment
3279 * appropriate counter each time the timer service is executed.
3281 if ((adapter->eni_metrics_sample_interval != 0) &&
3282 (++adapter->eni_metrics_sample_interval_cnt >=
3283 adapter->eni_metrics_sample_interval)) {
3285 * There is no race with other admin queue calls, as:
3286 * - Timer service runs after interface is up, so all
3287 * configuration calls to the admin queue are finished.
3288 * - After interface is up, the driver doesn't use (at least
3289 * for now) other functions writing to the admin queue.
3291 * It may change in the future, so in that situation, the lock
3292 * will be needed. ENA_LOCK_*() cannot be used for that purpose,
3293 * as callout ena_timer_service is protected by them. It could
3294 * lead to the deadlock if callout_drain() would hold the lock
3295 * before ena_copy_eni_metrics() was executed. It's advised to
3296 * use separate lock in that situation which will be used only
3297 * for the admin queue.
3299 (void)ena_copy_eni_metrics(adapter);
3300 adapter->eni_metrics_sample_interval_cnt = 0;
3304 if (host_info != NULL)
3305 ena_update_host_info(host_info, adapter->ifp);
3307 if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3308 device_printf(adapter->pdev, "Trigger reset is on\n");
3309 taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3314 * Schedule another timeout one second from now.
3316 callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0);
3320 ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3322 if_t ifp = adapter->ifp;
3323 struct ena_com_dev *ena_dev = adapter->ena_dev;
3326 if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
3329 if_link_state_change(ifp, LINK_STATE_DOWN);
3331 callout_drain(&adapter->timer_service);
3333 dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
3335 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3338 ena_com_set_admin_running_state(ena_dev, false);
3340 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3344 * Stop the device from sending AENQ events (if the device was up, and
3345 * the trigger reset was on, ena_down already performs device reset)
3347 if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
3348 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3350 ena_free_mgmnt_irq(adapter);
3352 ena_disable_msix(adapter);
3355 * IO rings resources should be freed because `ena_restore_device()`
3356 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3357 * vectors. The amount of MSIX vectors after destroy-restore may be
3358 * different than before. Therefore, IO rings resources should be
3359 * established from scratch each time.
3361 ena_free_all_io_rings_resources(adapter);
3363 ena_com_abort_admin_commands(ena_dev);
3365 ena_com_wait_for_abort_completion(ena_dev);
3367 ena_com_admin_destroy(ena_dev);
3369 ena_com_mmio_reg_read_request_destroy(ena_dev);
3371 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3373 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3374 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3378 ena_device_validate_params(struct ena_adapter *adapter,
3379 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3382 if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
3383 ETHER_ADDR_LEN) != 0) {
3384 device_printf(adapter->pdev,
3385 "Error, mac address are different\n");
3389 if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
3390 device_printf(adapter->pdev,
3391 "Error, device max mtu is smaller than ifp MTU\n");
3399 ena_restore_device(struct ena_adapter *adapter)
3401 struct ena_com_dev_get_features_ctx get_feat_ctx;
3402 struct ena_com_dev *ena_dev = adapter->ena_dev;
3403 if_t ifp = adapter->ifp;
3404 device_t dev = adapter->pdev;
3408 ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3410 rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
3412 device_printf(dev, "Cannot initialize device\n");
3416 * Only enable WD if it was enabled before reset, so it won't override
3417 * value set by the user by the sysctl.
3419 if (adapter->wd_active != 0)
3420 adapter->wd_active = wd_active;
3422 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3424 device_printf(dev, "Validation of device parameters failed\n");
3425 goto err_device_destroy;
3428 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3429 /* Make sure we don't have a race with AENQ Links state handler */
3430 if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
3431 if_link_state_change(ifp, LINK_STATE_UP);
3433 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3435 device_printf(dev, "Enable MSI-X failed\n");
3436 goto err_device_destroy;
3440 * Effective value of used MSIX vectors should be the same as before
3441 * `ena_destroy_device()`, if possible, or closest to it if less vectors
3444 if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
3445 adapter->num_io_queues =
3446 adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3448 /* Re-initialize rings basic information */
3449 ena_init_io_rings(adapter);
3451 /* If the interface was up before the reset bring it up */
3452 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3453 rc = ena_up(adapter);
3455 device_printf(dev, "Failed to create I/O queues\n");
3456 goto err_disable_msix;
3460 /* Indicate that device is running again and ready to work */
3461 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3463 if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3465 * As the AENQ handlers weren't executed during reset because
3466 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
3467 * timestamp must be updated again That will prevent next reset
3468 * caused by missing keep alive.
3470 adapter->keep_alive_timestamp = getsbinuptime();
3471 callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
3472 ena_timer_service, (void *)adapter, 0);
3474 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3477 "Device reset completed successfully, Driver info: %s\n", ena_version);
3482 ena_free_mgmnt_irq(adapter);
3483 ena_disable_msix(adapter);
3485 ena_com_abort_admin_commands(ena_dev);
3486 ena_com_wait_for_abort_completion(ena_dev);
3487 ena_com_admin_destroy(ena_dev);
3488 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3489 ena_com_mmio_reg_read_request_destroy(ena_dev);
3491 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3492 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3493 device_printf(dev, "Reset attempt failed. Can not reset the device\n");
3499 ena_reset_task(void *arg, int pending)
3501 struct ena_adapter *adapter = (struct ena_adapter *)arg;
3503 if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3504 device_printf(adapter->pdev,
3505 "device reset scheduled but trigger_reset is off\n");
3509 ENA_LOCK_LOCK(adapter);
3510 ena_destroy_device(adapter, false);
3511 ena_restore_device(adapter);
3512 ENA_LOCK_UNLOCK(adapter);
3516 * ena_attach - Device Initialization Routine
3517 * @pdev: device information struct
3519 * Returns 0 on success, otherwise on failure.
3521 * ena_attach initializes an adapter identified by a device structure.
3522 * The OS initialization, configuring of the adapter private structure,
3523 * and a hardware reset occur.
3526 ena_attach(device_t pdev)
3528 struct ena_com_dev_get_features_ctx get_feat_ctx;
3529 struct ena_llq_configurations llq_config;
3530 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3531 static int version_printed;
3532 struct ena_adapter *adapter;
3533 struct ena_com_dev *ena_dev = NULL;
3534 uint32_t max_num_io_queues;
3537 adapter = device_get_softc(pdev);
3538 adapter->pdev = pdev;
3540 ENA_LOCK_INIT(adapter);
3543 * Set up the timer service - driver is responsible for avoiding
3544 * concurrency, as the callout won't be using any locking inside.
3546 callout_init(&adapter->timer_service, true);
3547 adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
3548 adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
3549 adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
3550 adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
3552 if (version_printed++ == 0)
3553 device_printf(pdev, "%s\n", ena_version);
3555 /* Allocate memory for ena_dev structure */
3556 ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3559 adapter->ena_dev = ena_dev;
3560 ena_dev->dmadev = pdev;
3562 rid = PCIR_BAR(ENA_REG_BAR);
3563 adapter->memory = NULL;
3564 adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3566 if (unlikely(adapter->registers == NULL)) {
3568 "unable to allocate bus resource: registers!\n");
3573 ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3576 /* Store register resources */
3577 ((struct ena_bus*)(ena_dev->bus))->reg_bar_t =
3578 rman_get_bustag(adapter->registers);
3579 ((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
3580 rman_get_bushandle(adapter->registers);
3582 if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) {
3583 device_printf(pdev, "failed to pmap registers bar\n");
3588 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3590 /* Initially clear all the flags */
3591 ENA_FLAG_ZERO(adapter);
3593 /* Device initialization */
3594 rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3595 if (unlikely(rc != 0)) {
3596 device_printf(pdev, "ENA device init failed! (err: %d)\n", rc);
3601 set_default_llq_configurations(&llq_config);
3603 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
3605 if (unlikely(rc != 0)) {
3606 device_printf(pdev, "failed to set placement policy\n");
3610 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3611 adapter->disable_meta_caching =
3612 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3613 BIT(ENA_ADMIN_DISABLE_META_CACHING));
3615 adapter->keep_alive_timestamp = getsbinuptime();
3617 adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3619 memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3622 calc_queue_ctx.pdev = pdev;
3623 calc_queue_ctx.ena_dev = ena_dev;
3624 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3626 /* Calculate initial and maximum IO queue number and size */
3627 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
3629 rc = ena_calc_io_queue_size(&calc_queue_ctx);
3630 if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
3635 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3636 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3637 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3638 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3639 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3640 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3642 adapter->max_num_io_queues = max_num_io_queues;
3644 adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
3646 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3648 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3650 /* set up dma tags for rx and tx buffers */
3651 rc = ena_setup_tx_dma_tag(adapter);
3652 if (unlikely(rc != 0)) {
3653 device_printf(pdev, "Failed to create TX DMA tag\n");
3657 rc = ena_setup_rx_dma_tag(adapter);
3658 if (unlikely(rc != 0)) {
3659 device_printf(pdev, "Failed to create RX DMA tag\n");
3660 goto err_tx_tag_free;
3664 * The amount of requested MSIX vectors is equal to
3665 * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
3666 * number of admin queue interrupts. The former is initially determined
3667 * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
3668 * achieved if there are not enough system resources. By default, the
3669 * number of effectively used IO queues is the same but later on it can
3670 * be limited by the user using sysctl interface.
3672 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3673 if (unlikely(rc != 0)) {
3675 "Failed to enable and set the admin interrupts\n");
3678 /* By default all of allocated MSIX vectors are actively used */
3679 adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3681 /* initialize rings basic information */
3682 ena_init_io_rings(adapter);
3684 /* setup network interface */
3685 rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3686 if (unlikely(rc != 0)) {
3687 device_printf(pdev, "Error with network interface setup\n");
3691 /* Initialize reset task queue */
3692 TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3693 adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3694 M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3695 taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET,
3696 "%s rstq", device_get_nameunit(adapter->pdev));
3698 /* Initialize statistics */
3699 ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3700 sizeof(struct ena_stats_dev));
3701 ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3702 sizeof(struct ena_hw_stats));
3703 ena_sysctl_add_nodes(adapter);
3706 rc = ena_netmap_attach(adapter);
3708 device_printf(pdev, "netmap attach failed: %d\n", rc);
3711 #endif /* DEV_NETMAP */
3713 /* Tell the stack that the interface is not active */
3714 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3715 ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3721 ether_ifdetach(adapter->ifp);
3722 #endif /* DEV_NETMAP */
3724 ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
3725 ena_free_mgmnt_irq(adapter);
3726 ena_disable_msix(adapter);
3728 ena_free_all_io_rings_resources(adapter);
3729 ena_free_rx_dma_tag(adapter);
3731 ena_free_tx_dma_tag(adapter);
3733 ena_com_admin_destroy(ena_dev);
3734 ena_com_delete_host_info(ena_dev);
3735 ena_com_mmio_reg_read_request_destroy(ena_dev);
3737 free(ena_dev->bus, M_DEVBUF);
3738 ena_free_pci_resources(adapter);
3740 free(ena_dev, M_DEVBUF);
3746 * ena_detach - Device Removal Routine
3747 * @pdev: device information struct
3749 * ena_detach is called by the device subsystem to alert the driver
3750 * that it should release a PCI device.
3753 ena_detach(device_t pdev)
3755 struct ena_adapter *adapter = device_get_softc(pdev);
3756 struct ena_com_dev *ena_dev = adapter->ena_dev;
3759 /* Make sure VLANS are not using driver */
3760 if (adapter->ifp->if_vlantrunk != NULL) {
3761 device_printf(adapter->pdev ,"VLAN is in use, detach first\n");
3765 ether_ifdetach(adapter->ifp);
3767 /* Stop timer service */
3768 ENA_LOCK_LOCK(adapter);
3769 callout_drain(&adapter->timer_service);
3770 ENA_LOCK_UNLOCK(adapter);
3772 /* Release reset task */
3773 while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
3774 taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
3775 taskqueue_free(adapter->reset_tq);
3777 ENA_LOCK_LOCK(adapter);
3779 ena_destroy_device(adapter, true);
3780 ENA_LOCK_UNLOCK(adapter);
3783 netmap_detach(adapter->ifp);
3784 #endif /* DEV_NETMAP */
3786 ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3787 sizeof(struct ena_hw_stats));
3788 ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3789 sizeof(struct ena_stats_dev));
3791 rc = ena_free_rx_dma_tag(adapter);
3792 if (unlikely(rc != 0))
3793 device_printf(adapter->pdev,
3794 "Unmapped RX DMA tag associations\n");
3796 rc = ena_free_tx_dma_tag(adapter);
3797 if (unlikely(rc != 0))
3798 device_printf(adapter->pdev,
3799 "Unmapped TX DMA tag associations\n");
3801 ena_free_irqs(adapter);
3803 ena_free_pci_resources(adapter);
3805 if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
3806 ena_com_rss_destroy(ena_dev);
3808 ena_com_delete_host_info(ena_dev);
3810 ENA_LOCK_DESTROY(adapter);
3812 if_free(adapter->ifp);
3814 if (ena_dev->bus != NULL)
3815 free(ena_dev->bus, M_DEVBUF);
3817 if (ena_dev != NULL)
3818 free(ena_dev, M_DEVBUF);
3820 return (bus_generic_detach(pdev));
3823 /******************************************************************************
3824 ******************************** AENQ Handlers *******************************
3825 *****************************************************************************/
3827 * ena_update_on_link_change:
3828 * Notify the network interface about the change in link status
3831 ena_update_on_link_change(void *adapter_data,
3832 struct ena_admin_aenq_entry *aenq_e)
3834 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3835 struct ena_admin_aenq_link_change_desc *aenq_desc;
3839 aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3841 status = aenq_desc->flags &
3842 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3845 device_printf(adapter->pdev, "link is UP\n");
3846 ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3847 if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
3848 if_link_state_change(ifp, LINK_STATE_UP);
3850 device_printf(adapter->pdev, "link is DOWN\n");
3851 if_link_state_change(ifp, LINK_STATE_DOWN);
3852 ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3856 static void ena_notification(void *adapter_data,
3857 struct ena_admin_aenq_entry *aenq_e)
3859 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3860 struct ena_admin_ena_hw_hints *hints;
3862 ENA_WARN(NULL, aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3863 "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group,
3864 ENA_ADMIN_NOTIFICATION);
3866 switch (aenq_e->aenq_common_desc.syndrome) {
3867 case ENA_ADMIN_UPDATE_HINTS:
3869 (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
3870 ena_update_hints(adapter, hints);
3873 device_printf(adapter->pdev,
3874 "Invalid aenq notification link state %d\n",
3875 aenq_e->aenq_common_desc.syndrome);
3880 * This handler will called for unknown event group or unimplemented handlers
3883 unimplemented_aenq_handler(void *adapter_data,
3884 struct ena_admin_aenq_entry *aenq_e)
3886 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3888 device_printf(adapter->pdev,
3889 "Unknown event was received or event with unimplemented handler\n");
3892 static struct ena_aenq_handlers aenq_handlers = {
3894 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3895 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3896 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3898 .unimplemented_handler = unimplemented_aenq_handler
3901 /*********************************************************************
3902 * FreeBSD Device Interface Entry Points
3903 *********************************************************************/
3905 static device_method_t ena_methods[] = {
3906 /* Device interface */
3907 DEVMETHOD(device_probe, ena_probe),
3908 DEVMETHOD(device_attach, ena_attach),
3909 DEVMETHOD(device_detach, ena_detach),
3913 static driver_t ena_driver = {
3914 "ena", ena_methods, sizeof(struct ena_adapter),
3917 devclass_t ena_devclass;
3918 DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
3919 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
3920 nitems(ena_vendor_info_array) - 1);
3921 MODULE_DEPEND(ena, pci, 1, 1, 1);
3922 MODULE_DEPEND(ena, ether, 1, 1, 1);
3924 MODULE_DEPEND(ena, netmap, 1, 1, 1);
3925 #endif /* DEV_NETMAP */
3927 /*********************************************************************/