]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ena/ena.c
Update to Zstandard 1.4.0
[FreeBSD/FreeBSD.git] / sys / dev / ena / ena.c
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/rman.h>
43 #include <sys/smp.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 #include <sys/taskqueue.h>
48 #include <sys/time.h>
49 #include <sys/eventhandler.h>
50
51 #include <machine/bus.h>
52 #include <machine/resource.h>
53 #include <machine/in_cksum.h>
54
55 #include <net/bpf.h>
56 #include <net/ethernet.h>
57 #include <net/if.h>
58 #include <net/if_var.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/rss_config.h>
63 #include <net/if_types.h>
64 #include <net/if_vlan_var.h>
65
66 #include <netinet/in_rss.h>
67 #include <netinet/in_systm.h>
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip6.h>
72 #include <netinet/tcp.h>
73 #include <netinet/udp.h>
74
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcireg.h>
77
78 #include "ena.h"
79 #include "ena_sysctl.h"
80
81 /*********************************************************
82  *  Function prototypes
83  *********************************************************/
84 static int      ena_probe(device_t);
85 static void     ena_intr_msix_mgmnt(void *);
86 static int      ena_allocate_pci_resources(struct ena_adapter*);
87 static void     ena_free_pci_resources(struct ena_adapter *);
88 static int      ena_change_mtu(if_t, int);
89 static inline void ena_alloc_counters(counter_u64_t *, int);
90 static inline void ena_free_counters(counter_u64_t *, int);
91 static inline void ena_reset_counters(counter_u64_t *, int);
92 static void     ena_init_io_rings_common(struct ena_adapter *,
93     struct ena_ring *, uint16_t);
94 static void     ena_init_io_rings(struct ena_adapter *);
95 static void     ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
96 static void     ena_free_all_io_rings_resources(struct ena_adapter *);
97 static int      ena_setup_tx_dma_tag(struct ena_adapter *);
98 static int      ena_free_tx_dma_tag(struct ena_adapter *);
99 static int      ena_setup_rx_dma_tag(struct ena_adapter *);
100 static int      ena_free_rx_dma_tag(struct ena_adapter *);
101 static int      ena_setup_tx_resources(struct ena_adapter *, int);
102 static void     ena_free_tx_resources(struct ena_adapter *, int);
103 static int      ena_setup_all_tx_resources(struct ena_adapter *);
104 static void     ena_free_all_tx_resources(struct ena_adapter *);
105 static inline int validate_rx_req_id(struct ena_ring *, uint16_t);
106 static int      ena_setup_rx_resources(struct ena_adapter *, unsigned int);
107 static void     ena_free_rx_resources(struct ena_adapter *, unsigned int);
108 static int      ena_setup_all_rx_resources(struct ena_adapter *);
109 static void     ena_free_all_rx_resources(struct ena_adapter *);
110 static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
111     struct ena_rx_buffer *);
112 static void     ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
113     struct ena_rx_buffer *);
114 static int      ena_refill_rx_bufs(struct ena_ring *, uint32_t);
115 static void     ena_free_rx_bufs(struct ena_adapter *, unsigned int);
116 static void     ena_refill_all_rx_bufs(struct ena_adapter *);
117 static void     ena_free_all_rx_bufs(struct ena_adapter *);
118 static void     ena_free_tx_bufs(struct ena_adapter *, unsigned int);
119 static void     ena_free_all_tx_bufs(struct ena_adapter *);
120 static void     ena_destroy_all_tx_queues(struct ena_adapter *);
121 static void     ena_destroy_all_rx_queues(struct ena_adapter *);
122 static void     ena_destroy_all_io_queues(struct ena_adapter *);
123 static int      ena_create_io_queues(struct ena_adapter *);
124 static int      ena_tx_cleanup(struct ena_ring *);
125 static void     ena_deferred_rx_cleanup(void *, int);
126 static int      ena_rx_cleanup(struct ena_ring *);
127 static inline int validate_tx_req_id(struct ena_ring *, uint16_t);
128 static void     ena_rx_hash_mbuf(struct ena_ring *, struct ena_com_rx_ctx *,
129     struct mbuf *);
130 static struct mbuf* ena_rx_mbuf(struct ena_ring *, struct ena_com_rx_buf_info *,
131     struct ena_com_rx_ctx *, uint16_t *);
132 static inline void ena_rx_checksum(struct ena_ring *, struct ena_com_rx_ctx *,
133     struct mbuf *);
134 static void     ena_handle_msix(void *);
135 static int      ena_enable_msix(struct ena_adapter *);
136 static void     ena_setup_mgmnt_intr(struct ena_adapter *);
137 static void     ena_setup_io_intr(struct ena_adapter *);
138 static int      ena_request_mgmnt_irq(struct ena_adapter *);
139 static int      ena_request_io_irq(struct ena_adapter *);
140 static void     ena_free_mgmnt_irq(struct ena_adapter *);
141 static void     ena_free_io_irq(struct ena_adapter *);
142 static void     ena_free_irqs(struct ena_adapter*);
143 static void     ena_disable_msix(struct ena_adapter *);
144 static void     ena_unmask_all_io_irqs(struct ena_adapter *);
145 static int      ena_rss_configure(struct ena_adapter *);
146 static int      ena_up_complete(struct ena_adapter *);
147 static int      ena_up(struct ena_adapter *);
148 static void     ena_down(struct ena_adapter *);
149 static uint64_t ena_get_counter(if_t, ift_counter);
150 static int      ena_media_change(if_t);
151 static void     ena_media_status(if_t, struct ifmediareq *);
152 static void     ena_init(void *);
153 static int      ena_ioctl(if_t, u_long, caddr_t);
154 static int      ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
155 static void     ena_update_host_info(struct ena_admin_host_info *, if_t);
156 static void     ena_update_hwassist(struct ena_adapter *);
157 static int      ena_setup_ifnet(device_t, struct ena_adapter *,
158     struct ena_com_dev_get_features_ctx *);
159 static void     ena_tx_csum(struct ena_com_tx_ctx *, struct mbuf *);
160 static int      ena_check_and_collapse_mbuf(struct ena_ring *tx_ring,
161     struct mbuf **mbuf);
162 static int      ena_xmit_mbuf(struct ena_ring *, struct mbuf **);
163 static void     ena_start_xmit(struct ena_ring *);
164 static int      ena_mq_start(if_t, struct mbuf *);
165 static void     ena_deferred_mq_start(void *, int);
166 static void     ena_qflush(if_t);
167 static int      ena_calc_io_queue_num(struct ena_adapter *,
168     struct ena_com_dev_get_features_ctx *);
169 static int      ena_calc_queue_size(struct ena_adapter *, uint16_t *,
170     uint16_t *, struct ena_com_dev_get_features_ctx *);
171 static int      ena_rss_init_default(struct ena_adapter *);
172 static void     ena_rss_init_default_deferred(void *);
173 static void     ena_config_host_info(struct ena_com_dev *);
174 static int      ena_attach(device_t);
175 static int      ena_detach(device_t);
176 static int      ena_device_init(struct ena_adapter *, device_t,
177     struct ena_com_dev_get_features_ctx *, int *);
178 static int      ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *,
179     int);
180 static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
181 static void     unimplemented_aenq_handler(void *,
182     struct ena_admin_aenq_entry *);
183 static void     ena_timer_service(void *);
184
185 static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
186
187 static SYSCTL_NODE(_hw, OID_AUTO, ena, CTLFLAG_RD, 0, "ENA driver parameters");
188
189 /*
190  * Tuneable number of buffers in the buf-ring (drbr)
191  */
192 static int ena_buf_ring_size = 4096;
193 SYSCTL_INT(_hw_ena, OID_AUTO, buf_ring_size, CTLFLAG_RWTUN,
194     &ena_buf_ring_size, 0, "Size of the bufring");
195
196 /*
197  * Logging level for changing verbosity of the output
198  */
199 int ena_log_level = ENA_ALERT | ENA_WARNING;
200 SYSCTL_INT(_hw_ena, OID_AUTO, log_level, CTLFLAG_RWTUN,
201     &ena_log_level, 0, "Logging level indicating verbosity of the logs");
202
203 static ena_vendor_info_t ena_vendor_info_array[] = {
204     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
205     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0},
206     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
207     { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0},
208     /* Last entry */
209     { 0, 0, 0 }
210 };
211
212 /*
213  * Contains pointers to event handlers, e.g. link state chage.
214  */
215 static struct ena_aenq_handlers aenq_handlers;
216
217 void
218 ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
219 {
220         if (error != 0)
221                 return;
222         *(bus_addr_t *) arg = segs[0].ds_addr;
223 }
224
225 int
226 ena_dma_alloc(device_t dmadev, bus_size_t size,
227     ena_mem_handle_t *dma , int mapflags)
228 {
229         struct ena_adapter* adapter = device_get_softc(dmadev);
230         uint32_t maxsize;
231         uint64_t dma_space_addr;
232         int error;
233
234         maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
235
236         dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
237         if (unlikely(dma_space_addr == 0))
238                 dma_space_addr = BUS_SPACE_MAXADDR;
239
240         error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
241             8, 0,             /* alignment, bounds              */
242             dma_space_addr,   /* lowaddr of exclusion window    */
243             BUS_SPACE_MAXADDR,/* highaddr of exclusion window   */
244             NULL, NULL,       /* filter, filterarg              */
245             maxsize,          /* maxsize                        */
246             1,                /* nsegments                      */
247             maxsize,          /* maxsegsize                     */
248             BUS_DMA_ALLOCNOW, /* flags                          */
249             NULL,             /* lockfunc                       */
250             NULL,             /* lockarg                        */
251             &dma->tag);
252         if (unlikely(error != 0)) {
253                 ena_trace(ENA_ALERT, "bus_dma_tag_create failed: %d\n", error);
254                 goto fail_tag;
255         }
256
257         error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
258             BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
259         if (unlikely(error != 0)) {
260                 ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n",
261                     (uintmax_t)size, error);
262                 goto fail_map_create;
263         }
264
265         dma->paddr = 0;
266         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
267             size, ena_dmamap_callback, &dma->paddr, mapflags);
268         if (unlikely((error != 0) || (dma->paddr == 0))) {
269                 ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error);
270                 goto fail_map_load;
271         }
272
273         return (0);
274
275 fail_map_load:
276         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
277 fail_map_create:
278         bus_dma_tag_destroy(dma->tag);
279 fail_tag:
280         dma->tag = NULL;
281
282         return (error);
283 }
284
285 static int
286 ena_allocate_pci_resources(struct ena_adapter* adapter)
287 {
288         device_t pdev = adapter->pdev;
289         int rid;
290
291         rid = PCIR_BAR(ENA_REG_BAR);
292         adapter->memory = NULL;
293         adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
294             &rid, RF_ACTIVE);
295         if (unlikely(adapter->registers == NULL)) {
296                 device_printf(pdev, "Unable to allocate bus resource: "
297                     "registers\n");
298                 return (ENXIO);
299         }
300
301         return (0);
302 }
303
304 static void
305 ena_free_pci_resources(struct ena_adapter *adapter)
306 {
307         device_t pdev = adapter->pdev;
308
309         if (adapter->memory != NULL) {
310                 bus_release_resource(pdev, SYS_RES_MEMORY,
311                     PCIR_BAR(ENA_MEM_BAR), adapter->memory);
312         }
313
314         if (adapter->registers != NULL) {
315                 bus_release_resource(pdev, SYS_RES_MEMORY,
316                     PCIR_BAR(ENA_REG_BAR), adapter->registers);
317         }
318 }
319
320 static int
321 ena_probe(device_t dev)
322 {
323         ena_vendor_info_t *ent;
324         char            adapter_name[60];
325         uint16_t        pci_vendor_id = 0;
326         uint16_t        pci_device_id = 0;
327
328         pci_vendor_id = pci_get_vendor(dev);
329         pci_device_id = pci_get_device(dev);
330
331         ent = ena_vendor_info_array;
332         while (ent->vendor_id != 0) {
333                 if ((pci_vendor_id == ent->vendor_id) &&
334                     (pci_device_id == ent->device_id)) {
335                         ena_trace(ENA_DBG, "vendor=%x device=%x ",
336                             pci_vendor_id, pci_device_id);
337
338                         sprintf(adapter_name, DEVICE_DESC);
339                         device_set_desc_copy(dev, adapter_name);
340                         return (BUS_PROBE_DEFAULT);
341                 }
342
343                 ent++;
344
345         }
346
347         return (ENXIO);
348 }
349
350 static int
351 ena_change_mtu(if_t ifp, int new_mtu)
352 {
353         struct ena_adapter *adapter = if_getsoftc(ifp);
354         int rc;
355
356         if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
357                 device_printf(adapter->pdev, "Invalid MTU setting. "
358                     "new_mtu: %d max mtu: %d min mtu: %d\n",
359                     new_mtu, adapter->max_mtu, ENA_MIN_MTU);
360                 return (EINVAL);
361         }
362
363         rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
364         if (likely(rc == 0)) {
365                 ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu);
366                 if_setmtu(ifp, new_mtu);
367         } else {
368                 device_printf(adapter->pdev, "Failed to set MTU to %d\n",
369                     new_mtu);
370         }
371
372         return (rc);
373 }
374
375 static inline void
376 ena_alloc_counters(counter_u64_t *begin, int size)
377 {
378         counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
379
380         for (; begin < end; ++begin)
381                 *begin = counter_u64_alloc(M_WAITOK);
382 }
383
384 static inline void
385 ena_free_counters(counter_u64_t *begin, int size)
386 {
387         counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
388
389         for (; begin < end; ++begin)
390                 counter_u64_free(*begin);
391 }
392
393 static inline void
394 ena_reset_counters(counter_u64_t *begin, int size)
395 {
396         counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
397
398         for (; begin < end; ++begin)
399                 counter_u64_zero(*begin);
400 }
401
402 static void
403 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
404     uint16_t qid)
405 {
406
407         ring->qid = qid;
408         ring->adapter = adapter;
409         ring->ena_dev = adapter->ena_dev;
410 }
411
412 static void
413 ena_init_io_rings(struct ena_adapter *adapter)
414 {
415         struct ena_com_dev *ena_dev;
416         struct ena_ring *txr, *rxr;
417         struct ena_que *que;
418         int i;
419
420         ena_dev = adapter->ena_dev;
421
422         for (i = 0; i < adapter->num_queues; i++) {
423                 txr = &adapter->tx_ring[i];
424                 rxr = &adapter->rx_ring[i];
425
426                 /* TX/RX common ring state */
427                 ena_init_io_rings_common(adapter, txr, i);
428                 ena_init_io_rings_common(adapter, rxr, i);
429
430                 /* TX specific ring state */
431                 txr->ring_size = adapter->tx_ring_size;
432                 txr->tx_max_header_size = ena_dev->tx_max_header_size;
433                 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
434                 txr->smoothed_interval =
435                     ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
436
437                 /* Allocate a buf ring */
438                 txr->br = buf_ring_alloc(ena_buf_ring_size, M_DEVBUF,
439                     M_WAITOK, &txr->ring_mtx);
440
441                 /* Alloc TX statistics. */
442                 ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
443                     sizeof(txr->tx_stats));
444
445                 /* RX specific ring state */
446                 rxr->ring_size = adapter->rx_ring_size;
447                 rxr->smoothed_interval =
448                     ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
449
450                 /* Alloc RX statistics. */
451                 ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
452                     sizeof(rxr->rx_stats));
453
454                 /* Initialize locks */
455                 snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
456                     device_get_nameunit(adapter->pdev), i);
457                 snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
458                     device_get_nameunit(adapter->pdev), i);
459
460                 mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
461                 mtx_init(&rxr->ring_mtx, rxr->mtx_name, NULL, MTX_DEF);
462
463                 que = &adapter->que[i];
464                 que->adapter = adapter;
465                 que->id = i;
466                 que->tx_ring = txr;
467                 que->rx_ring = rxr;
468
469                 txr->que = que;
470                 rxr->que = que;
471
472                 rxr->empty_rx_queue = 0;
473         }
474 }
475
476 static void
477 ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
478 {
479         struct ena_ring *txr = &adapter->tx_ring[qid];
480         struct ena_ring *rxr = &adapter->rx_ring[qid];
481
482         ena_free_counters((counter_u64_t *)&txr->tx_stats,
483             sizeof(txr->tx_stats));
484         ena_free_counters((counter_u64_t *)&rxr->rx_stats,
485             sizeof(rxr->rx_stats));
486
487         ENA_RING_MTX_LOCK(txr);
488         drbr_free(txr->br, M_DEVBUF);
489         ENA_RING_MTX_UNLOCK(txr);
490
491         mtx_destroy(&txr->ring_mtx);
492         mtx_destroy(&rxr->ring_mtx);
493 }
494
495 static void
496 ena_free_all_io_rings_resources(struct ena_adapter *adapter)
497 {
498         int i;
499
500         for (i = 0; i < adapter->num_queues; i++)
501                 ena_free_io_ring_resources(adapter, i);
502
503 }
504
505 static int
506 ena_setup_tx_dma_tag(struct ena_adapter *adapter)
507 {
508         int ret;
509
510         /* Create DMA tag for Tx buffers */
511         ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
512             1, 0,                                 /* alignment, bounds       */
513             ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
514             BUS_SPACE_MAXADDR,                    /* highaddr of excl window */
515             NULL, NULL,                           /* filter, filterarg       */
516             ENA_TSO_MAXSIZE,                      /* maxsize                 */
517             adapter->max_tx_sgl_size - 1,         /* nsegments               */
518             ENA_TSO_MAXSIZE,                      /* maxsegsize              */
519             0,                                    /* flags                   */
520             NULL,                                 /* lockfunc                */
521             NULL,                                 /* lockfuncarg             */
522             &adapter->tx_buf_tag);
523
524         return (ret);
525 }
526
527 static int
528 ena_free_tx_dma_tag(struct ena_adapter *adapter)
529 {
530         int ret;
531
532         ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
533
534         if (likely(ret == 0))
535                 adapter->tx_buf_tag = NULL;
536
537         return (ret);
538 }
539
540 static int
541 ena_setup_rx_dma_tag(struct ena_adapter *adapter)
542 {
543         int ret;
544
545         /* Create DMA tag for Rx buffers*/
546         ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent   */
547             1, 0,                                 /* alignment, bounds       */
548             ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
549             BUS_SPACE_MAXADDR,                    /* highaddr of excl window */
550             NULL, NULL,                           /* filter, filterarg       */
551             MJUM16BYTES,                          /* maxsize                 */
552             adapter->max_rx_sgl_size,             /* nsegments               */
553             MJUM16BYTES,                          /* maxsegsize              */
554             0,                                    /* flags                   */
555             NULL,                                 /* lockfunc                */
556             NULL,                                 /* lockarg                 */
557             &adapter->rx_buf_tag);
558
559         return (ret);
560 }
561
562 static int
563 ena_free_rx_dma_tag(struct ena_adapter *adapter)
564 {
565         int ret;
566
567         ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
568
569         if (likely(ret == 0))
570                 adapter->rx_buf_tag = NULL;
571
572         return (ret);
573 }
574
575 /**
576  * ena_setup_tx_resources - allocate Tx resources (Descriptors)
577  * @adapter: network interface device structure
578  * @qid: queue index
579  *
580  * Returns 0 on success, otherwise on failure.
581  **/
582 static int
583 ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
584 {
585         struct ena_que *que = &adapter->que[qid];
586         struct ena_ring *tx_ring = que->tx_ring;
587         int size, i, err;
588 #ifdef  RSS
589         cpuset_t cpu_mask;
590 #endif
591
592         size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
593
594         tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
595         if (unlikely(tx_ring->tx_buffer_info == NULL))
596                 return (ENOMEM);
597
598         size = sizeof(uint16_t) * tx_ring->ring_size;
599         tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
600         if (unlikely(tx_ring->free_tx_ids == NULL))
601                 goto err_buf_info_free;
602
603         /* Req id stack for TX OOO completions */
604         for (i = 0; i < tx_ring->ring_size; i++)
605                 tx_ring->free_tx_ids[i] = i;
606
607         /* Reset TX statistics. */
608         ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
609             sizeof(tx_ring->tx_stats));
610
611         tx_ring->next_to_use = 0;
612         tx_ring->next_to_clean = 0;
613
614         /* Make sure that drbr is empty */
615         ENA_RING_MTX_LOCK(tx_ring);
616         drbr_flush(adapter->ifp, tx_ring->br);
617         ENA_RING_MTX_UNLOCK(tx_ring);
618
619         /* ... and create the buffer DMA maps */
620         for (i = 0; i < tx_ring->ring_size; i++) {
621                 err = bus_dmamap_create(adapter->tx_buf_tag, 0,
622                     &tx_ring->tx_buffer_info[i].map);
623                 if (unlikely(err != 0)) {
624                         ena_trace(ENA_ALERT,
625                              "Unable to create Tx DMA map for buffer %d\n", i);
626                         goto err_buf_info_unmap;
627                 }
628         }
629
630         /* Allocate taskqueues */
631         TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
632         tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
633             taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
634         if (unlikely(tx_ring->enqueue_tq == NULL)) {
635                 ena_trace(ENA_ALERT,
636                     "Unable to create taskqueue for enqueue task\n");
637                 i = tx_ring->ring_size;
638                 goto err_buf_info_unmap;
639         }
640
641         /* RSS set cpu for thread */
642 #ifdef RSS
643         CPU_SETOF(que->cpu, &cpu_mask);
644         taskqueue_start_threads_cpuset(&tx_ring->enqueue_tq, 1, PI_NET,
645             &cpu_mask, "%s tx_ring enq (bucket %d)",
646             device_get_nameunit(adapter->pdev), que->cpu);
647 #else /* RSS */
648         taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET,
649             "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu);
650 #endif /* RSS */
651
652         return (0);
653
654 err_buf_info_unmap:
655         while (i--) {
656                 bus_dmamap_destroy(adapter->tx_buf_tag,
657                     tx_ring->tx_buffer_info[i].map);
658         }
659         free(tx_ring->free_tx_ids, M_DEVBUF);
660         tx_ring->free_tx_ids = NULL;
661 err_buf_info_free:
662         free(tx_ring->tx_buffer_info, M_DEVBUF);
663         tx_ring->tx_buffer_info = NULL;
664
665         return (ENOMEM);
666 }
667
668 /**
669  * ena_free_tx_resources - Free Tx Resources per Queue
670  * @adapter: network interface device structure
671  * @qid: queue index
672  *
673  * Free all transmit software resources
674  **/
675 static void
676 ena_free_tx_resources(struct ena_adapter *adapter, int qid)
677 {
678         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
679
680         while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
681             NULL))
682                 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
683
684         taskqueue_free(tx_ring->enqueue_tq);
685
686         ENA_RING_MTX_LOCK(tx_ring);
687         /* Flush buffer ring, */
688         drbr_flush(adapter->ifp, tx_ring->br);
689
690         /* Free buffer DMA maps, */
691         for (int i = 0; i < tx_ring->ring_size; i++) {
692                 m_freem(tx_ring->tx_buffer_info[i].mbuf);
693                 tx_ring->tx_buffer_info[i].mbuf = NULL;
694                 bus_dmamap_unload(adapter->tx_buf_tag,
695                     tx_ring->tx_buffer_info[i].map);
696                 bus_dmamap_destroy(adapter->tx_buf_tag,
697                     tx_ring->tx_buffer_info[i].map);
698         }
699         ENA_RING_MTX_UNLOCK(tx_ring);
700
701         /* And free allocated memory. */
702         free(tx_ring->tx_buffer_info, M_DEVBUF);
703         tx_ring->tx_buffer_info = NULL;
704
705         free(tx_ring->free_tx_ids, M_DEVBUF);
706         tx_ring->free_tx_ids = NULL;
707 }
708
709 /**
710  * ena_setup_all_tx_resources - allocate all queues Tx resources
711  * @adapter: network interface device structure
712  *
713  * Returns 0 on success, otherwise on failure.
714  **/
715 static int
716 ena_setup_all_tx_resources(struct ena_adapter *adapter)
717 {
718         int i, rc;
719
720         for (i = 0; i < adapter->num_queues; i++) {
721                 rc = ena_setup_tx_resources(adapter, i);
722                 if (rc != 0) {
723                         device_printf(adapter->pdev,
724                             "Allocation for Tx Queue %u failed\n", i);
725                         goto err_setup_tx;
726                 }
727         }
728
729         return (0);
730
731 err_setup_tx:
732         /* Rewind the index freeing the rings as we go */
733         while (i--)
734                 ena_free_tx_resources(adapter, i);
735         return (rc);
736 }
737
738 /**
739  * ena_free_all_tx_resources - Free Tx Resources for All Queues
740  * @adapter: network interface device structure
741  *
742  * Free all transmit software resources
743  **/
744 static void
745 ena_free_all_tx_resources(struct ena_adapter *adapter)
746 {
747         int i;
748
749         for (i = 0; i < adapter->num_queues; i++)
750                 ena_free_tx_resources(adapter, i);
751 }
752
753 static inline int
754 validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
755 {
756         if (likely(req_id < rx_ring->ring_size))
757                 return (0);
758
759         device_printf(rx_ring->adapter->pdev, "Invalid rx req_id: %hu\n",
760             req_id);
761         counter_u64_add(rx_ring->rx_stats.bad_req_id, 1);
762
763         /* Trigger device reset */
764         rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
765         rx_ring->adapter->trigger_reset = true;
766
767         return (EFAULT);
768 }
769
770 /**
771  * ena_setup_rx_resources - allocate Rx resources (Descriptors)
772  * @adapter: network interface device structure
773  * @qid: queue index
774  *
775  * Returns 0 on success, otherwise on failure.
776  **/
777 static int
778 ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
779 {
780         struct ena_que *que = &adapter->que[qid];
781         struct ena_ring *rx_ring = que->rx_ring;
782         int size, err, i;
783 #ifdef  RSS
784         cpuset_t cpu_mask;
785 #endif
786
787         size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
788
789         /*
790          * Alloc extra element so in rx path
791          * we can always prefetch rx_info + 1
792          */
793         size += sizeof(struct ena_rx_buffer);
794
795         rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
796
797         size = sizeof(uint16_t) * rx_ring->ring_size;
798         rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
799
800         for (i = 0; i < rx_ring->ring_size; i++)
801                 rx_ring->free_rx_ids[i] = i;
802
803         /* Reset RX statistics. */
804         ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
805             sizeof(rx_ring->rx_stats));
806
807         rx_ring->next_to_clean = 0;
808         rx_ring->next_to_use = 0;
809
810         /* ... and create the buffer DMA maps */
811         for (i = 0; i < rx_ring->ring_size; i++) {
812                 err = bus_dmamap_create(adapter->rx_buf_tag, 0,
813                     &(rx_ring->rx_buffer_info[i].map));
814                 if (err != 0) {
815                         ena_trace(ENA_ALERT,
816                             "Unable to create Rx DMA map for buffer %d\n", i);
817                         goto err_buf_info_unmap;
818                 }
819         }
820
821         /* Create LRO for the ring */
822         if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) {
823                 int err = tcp_lro_init(&rx_ring->lro);
824                 if (err != 0) {
825                         device_printf(adapter->pdev,
826                             "LRO[%d] Initialization failed!\n", qid);
827                 } else {
828                         ena_trace(ENA_INFO,
829                             "RX Soft LRO[%d] Initialized\n", qid);
830                         rx_ring->lro.ifp = adapter->ifp;
831                 }
832         }
833
834         /* Allocate taskqueues */
835         TASK_INIT(&rx_ring->cmpl_task, 0, ena_deferred_rx_cleanup, rx_ring);
836         rx_ring->cmpl_tq = taskqueue_create_fast("ena RX completion", M_WAITOK,
837             taskqueue_thread_enqueue, &rx_ring->cmpl_tq);
838
839         /* RSS set cpu for thread */
840 #ifdef RSS
841         CPU_SETOF(que->cpu, &cpu_mask);
842         taskqueue_start_threads_cpuset(&rx_ring->cmpl_tq, 1, PI_NET, &cpu_mask,
843             "%s rx_ring cmpl (bucket %d)",
844             device_get_nameunit(adapter->pdev), que->cpu);
845 #else
846         taskqueue_start_threads(&rx_ring->cmpl_tq, 1, PI_NET,
847             "%s rx_ring cmpl %d", device_get_nameunit(adapter->pdev), que->cpu);
848 #endif
849
850         return (0);
851
852 err_buf_info_unmap:
853         while (i--) {
854                 bus_dmamap_destroy(adapter->rx_buf_tag,
855                     rx_ring->rx_buffer_info[i].map);
856         }
857
858         free(rx_ring->free_rx_ids, M_DEVBUF);
859         rx_ring->free_rx_ids = NULL;
860         free(rx_ring->rx_buffer_info, M_DEVBUF);
861         rx_ring->rx_buffer_info = NULL;
862         return (ENOMEM);
863 }
864
865 /**
866  * ena_free_rx_resources - Free Rx Resources
867  * @adapter: network interface device structure
868  * @qid: queue index
869  *
870  * Free all receive software resources
871  **/
872 static void
873 ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
874 {
875         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
876
877         while (taskqueue_cancel(rx_ring->cmpl_tq, &rx_ring->cmpl_task, NULL) != 0)
878                 taskqueue_drain(rx_ring->cmpl_tq, &rx_ring->cmpl_task);
879
880         taskqueue_free(rx_ring->cmpl_tq);
881
882         /* Free buffer DMA maps, */
883         for (int i = 0; i < rx_ring->ring_size; i++) {
884                 m_freem(rx_ring->rx_buffer_info[i].mbuf);
885                 rx_ring->rx_buffer_info[i].mbuf = NULL;
886                 bus_dmamap_unload(adapter->rx_buf_tag,
887                     rx_ring->rx_buffer_info[i].map);
888                 bus_dmamap_destroy(adapter->rx_buf_tag,
889                     rx_ring->rx_buffer_info[i].map);
890         }
891
892         /* free LRO resources, */
893         tcp_lro_free(&rx_ring->lro);
894
895         /* free allocated memory */
896         free(rx_ring->rx_buffer_info, M_DEVBUF);
897         rx_ring->rx_buffer_info = NULL;
898
899         free(rx_ring->free_rx_ids, M_DEVBUF);
900         rx_ring->free_rx_ids = NULL;
901 }
902
903 /**
904  * ena_setup_all_rx_resources - allocate all queues Rx resources
905  * @adapter: network interface device structure
906  *
907  * Returns 0 on success, otherwise on failure.
908  **/
909 static int
910 ena_setup_all_rx_resources(struct ena_adapter *adapter)
911 {
912         int i, rc = 0;
913
914         for (i = 0; i < adapter->num_queues; i++) {
915                 rc = ena_setup_rx_resources(adapter, i);
916                 if (rc != 0) {
917                         device_printf(adapter->pdev,
918                             "Allocation for Rx Queue %u failed\n", i);
919                         goto err_setup_rx;
920                 }
921         }
922         return (0);
923
924 err_setup_rx:
925         /* rewind the index freeing the rings as we go */
926         while (i--)
927                 ena_free_rx_resources(adapter, i);
928         return (rc);
929 }
930
931 /**
932  * ena_free_all_rx_resources - Free Rx resources for all queues
933  * @adapter: network interface device structure
934  *
935  * Free all receive software resources
936  **/
937 static void
938 ena_free_all_rx_resources(struct ena_adapter *adapter)
939 {
940         int i;
941
942         for (i = 0; i < adapter->num_queues; i++)
943                 ena_free_rx_resources(adapter, i);
944 }
945
946 static inline int
947 ena_alloc_rx_mbuf(struct ena_adapter *adapter,
948     struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
949 {
950         struct ena_com_buf *ena_buf;
951         bus_dma_segment_t segs[1];
952         int nsegs, error;
953         int mlen;
954
955         /* if previous allocated frag is not used */
956         if (unlikely(rx_info->mbuf != NULL))
957                 return (0);
958
959         /* Get mbuf using UMA allocator */
960         rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES);
961
962         if (unlikely(rx_info->mbuf == NULL)) {
963                 counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
964                 rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
965                 if (unlikely(rx_info->mbuf == NULL)) {
966                         counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
967                         return (ENOMEM);
968                 }
969                 mlen = MCLBYTES;
970         } else {
971                 mlen = MJUM16BYTES;
972         }
973         /* Set mbuf length*/
974         rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
975
976         /* Map packets for DMA */
977         ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
978             "Using tag %p for buffers' DMA mapping, mbuf %p len: %d",
979             adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
980         error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
981             rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
982         if (unlikely((error != 0) || (nsegs != 1))) {
983                 ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, "
984                     "nsegs: %d\n", error, nsegs);
985                 counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
986                 goto exit;
987
988         }
989
990         bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
991
992         ena_buf = &rx_info->ena_buf;
993         ena_buf->paddr = segs[0].ds_addr;
994         ena_buf->len = mlen;
995
996         ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH,
997             "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
998             rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
999
1000         return (0);
1001
1002 exit:
1003         m_freem(rx_info->mbuf);
1004         rx_info->mbuf = NULL;
1005         return (EFAULT);
1006 }
1007
1008 static void
1009 ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1010     struct ena_rx_buffer *rx_info)
1011 {
1012
1013         if (rx_info->mbuf == NULL) {
1014                 ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n");
1015                 return;
1016         }
1017
1018         bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1019         m_freem(rx_info->mbuf);
1020         rx_info->mbuf = NULL;
1021 }
1022
1023 /**
1024  * ena_refill_rx_bufs - Refills ring with descriptors
1025  * @rx_ring: the ring which we want to feed with free descriptors
1026  * @num: number of descriptors to refill
1027  * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1028  **/
1029 static int
1030 ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1031 {
1032         struct ena_adapter *adapter = rx_ring->adapter;
1033         uint16_t next_to_use, req_id;
1034         uint32_t i;
1035         int rc;
1036
1037         ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d",
1038             rx_ring->qid);
1039
1040         next_to_use = rx_ring->next_to_use;
1041
1042         for (i = 0; i < num; i++) {
1043                 struct ena_rx_buffer *rx_info;
1044
1045                 ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC,
1046                     "RX buffer - next to use: %d", next_to_use);
1047
1048                 req_id = rx_ring->free_rx_ids[next_to_use];
1049                 rx_info = &rx_ring->rx_buffer_info[req_id];
1050
1051                 rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1052                 if (unlikely(rc != 0)) {
1053                         ena_trace(ENA_WARNING,
1054                             "failed to alloc buffer for rx queue %d\n",
1055                             rx_ring->qid);
1056                         break;
1057                 }
1058                 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1059                     &rx_info->ena_buf, req_id);
1060                 if (unlikely(rc != 0)) {
1061                         ena_trace(ENA_WARNING,
1062                             "failed to add buffer for rx queue %d\n",
1063                             rx_ring->qid);
1064                         break;
1065                 }
1066                 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1067                     rx_ring->ring_size);
1068         }
1069
1070         if (unlikely(i < num)) {
1071                 counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1072                 ena_trace(ENA_WARNING,
1073                      "refilled rx qid %d with only %d mbufs (from %d)\n",
1074                      rx_ring->qid, i, num);
1075         }
1076
1077         if (likely(i != 0)) {
1078                 wmb();
1079                 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1080         }
1081         rx_ring->next_to_use = next_to_use;
1082         return (i);
1083 }
1084
1085 static void
1086 ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1087 {
1088         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1089         unsigned int i;
1090
1091         for (i = 0; i < rx_ring->ring_size; i++) {
1092                 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1093
1094                 if (rx_info->mbuf != NULL)
1095                         ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1096         }
1097 }
1098
1099 /**
1100  * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1101  * @adapter: network interface device structure
1102  *
1103  */
1104 static void
1105 ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1106 {
1107         struct ena_ring *rx_ring;
1108         int i, rc, bufs_num;
1109
1110         for (i = 0; i < adapter->num_queues; i++) {
1111                 rx_ring = &adapter->rx_ring[i];
1112                 bufs_num = rx_ring->ring_size - 1;
1113                 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1114
1115                 if (unlikely(rc != bufs_num))
1116                         ena_trace(ENA_WARNING, "refilling Queue %d failed. "
1117                             "Allocated %d buffers from: %d\n", i, rc, bufs_num);
1118         }
1119 }
1120
1121 static void
1122 ena_free_all_rx_bufs(struct ena_adapter *adapter)
1123 {
1124         int i;
1125
1126         for (i = 0; i < adapter->num_queues; i++)
1127                 ena_free_rx_bufs(adapter, i);
1128 }
1129
1130 /**
1131  * ena_free_tx_bufs - Free Tx Buffers per Queue
1132  * @adapter: network interface device structure
1133  * @qid: queue index
1134  **/
1135 static void
1136 ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1137 {
1138         bool print_once = true;
1139         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1140
1141         ENA_RING_MTX_LOCK(tx_ring);
1142         for (int i = 0; i < tx_ring->ring_size; i++) {
1143                 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1144
1145                 if (tx_info->mbuf == NULL)
1146                         continue;
1147
1148                 if (print_once) {
1149                         device_printf(adapter->pdev,
1150                             "free uncompleted tx mbuf qid %d idx 0x%x",
1151                             qid, i);
1152                         print_once = false;
1153                 } else {
1154                         ena_trace(ENA_DBG,
1155                             "free uncompleted tx mbuf qid %d idx 0x%x",
1156                              qid, i);
1157                 }
1158
1159                 bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
1160                 m_free(tx_info->mbuf);
1161                 tx_info->mbuf = NULL;
1162         }
1163         ENA_RING_MTX_UNLOCK(tx_ring);
1164 }
1165
1166 static void
1167 ena_free_all_tx_bufs(struct ena_adapter *adapter)
1168 {
1169
1170         for (int i = 0; i < adapter->num_queues; i++)
1171                 ena_free_tx_bufs(adapter, i);
1172 }
1173
1174 static void
1175 ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1176 {
1177         uint16_t ena_qid;
1178         int i;
1179
1180         for (i = 0; i < adapter->num_queues; i++) {
1181                 ena_qid = ENA_IO_TXQ_IDX(i);
1182                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1183         }
1184 }
1185
1186 static void
1187 ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1188 {
1189         uint16_t ena_qid;
1190         int i;
1191
1192         for (i = 0; i < adapter->num_queues; i++) {
1193                 ena_qid = ENA_IO_RXQ_IDX(i);
1194                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1195         }
1196 }
1197
1198 static void
1199 ena_destroy_all_io_queues(struct ena_adapter *adapter)
1200 {
1201         ena_destroy_all_tx_queues(adapter);
1202         ena_destroy_all_rx_queues(adapter);
1203 }
1204
1205 static inline int
1206 validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
1207 {
1208         struct ena_adapter *adapter = tx_ring->adapter;
1209         struct ena_tx_buffer *tx_info = NULL;
1210
1211         if (likely(req_id < tx_ring->ring_size)) {
1212                 tx_info = &tx_ring->tx_buffer_info[req_id];
1213                 if (tx_info->mbuf != NULL)
1214                         return (0);
1215         }
1216
1217         if (tx_info->mbuf == NULL)
1218                 device_printf(adapter->pdev,
1219                     "tx_info doesn't have valid mbuf\n");
1220         else
1221                 device_printf(adapter->pdev, "Invalid req_id: %hu\n", req_id);
1222
1223         counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
1224
1225         return (EFAULT);
1226 }
1227
1228 static int
1229 ena_create_io_queues(struct ena_adapter *adapter)
1230 {
1231         struct ena_com_dev *ena_dev = adapter->ena_dev;
1232         struct ena_com_create_io_ctx ctx;
1233         struct ena_ring *ring;
1234         uint16_t ena_qid;
1235         uint32_t msix_vector;
1236         int rc, i;
1237
1238         /* Create TX queues */
1239         for (i = 0; i < adapter->num_queues; i++) {
1240                 msix_vector = ENA_IO_IRQ_IDX(i);
1241                 ena_qid = ENA_IO_TXQ_IDX(i);
1242                 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1243                 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1244                 ctx.queue_size = adapter->tx_ring_size;
1245                 ctx.msix_vector = msix_vector;
1246                 ctx.qid = ena_qid;
1247                 rc = ena_com_create_io_queue(ena_dev, &ctx);
1248                 if (rc != 0) {
1249                         device_printf(adapter->pdev,
1250                             "Failed to create io TX queue #%d rc: %d\n", i, rc);
1251                         goto err_tx;
1252                 }
1253                 ring = &adapter->tx_ring[i];
1254                 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1255                     &ring->ena_com_io_sq,
1256                     &ring->ena_com_io_cq);
1257                 if (rc != 0) {
1258                         device_printf(adapter->pdev,
1259                             "Failed to get TX queue handlers. TX queue num"
1260                             " %d rc: %d\n", i, rc);
1261                         ena_com_destroy_io_queue(ena_dev, ena_qid);
1262                         goto err_tx;
1263                 }
1264         }
1265
1266         /* Create RX queues */
1267         for (i = 0; i < adapter->num_queues; i++) {
1268                 msix_vector = ENA_IO_IRQ_IDX(i);
1269                 ena_qid = ENA_IO_RXQ_IDX(i);
1270                 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1271                 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1272                 ctx.queue_size = adapter->rx_ring_size;
1273                 ctx.msix_vector = msix_vector;
1274                 ctx.qid = ena_qid;
1275                 rc = ena_com_create_io_queue(ena_dev, &ctx);
1276                 if (unlikely(rc != 0)) {
1277                         device_printf(adapter->pdev,
1278                             "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1279                         goto err_rx;
1280                 }
1281
1282                 ring = &adapter->rx_ring[i];
1283                 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1284                     &ring->ena_com_io_sq,
1285                     &ring->ena_com_io_cq);
1286                 if (unlikely(rc != 0)) {
1287                         device_printf(adapter->pdev,
1288                             "Failed to get RX queue handlers. RX queue num"
1289                             " %d rc: %d\n", i, rc);
1290                         ena_com_destroy_io_queue(ena_dev, ena_qid);
1291                         goto err_rx;
1292                 }
1293         }
1294
1295         return (0);
1296
1297 err_rx:
1298         while (i--)
1299                 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1300         i = adapter->num_queues;
1301 err_tx:
1302         while (i--)
1303                 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1304
1305         return (ENXIO);
1306 }
1307
1308 /**
1309  * ena_tx_cleanup - clear sent packets and corresponding descriptors
1310  * @tx_ring: ring for which we want to clean packets
1311  *
1312  * Once packets are sent, we ask the device in a loop for no longer used
1313  * descriptors. We find the related mbuf chain in a map (index in an array)
1314  * and free it, then update ring state.
1315  * This is performed in "endless" loop, updating ring pointers every
1316  * TX_COMMIT. The first check of free descriptor is performed before the actual
1317  * loop, then repeated at the loop end.
1318  **/
1319 static int
1320 ena_tx_cleanup(struct ena_ring *tx_ring)
1321 {
1322         struct ena_adapter *adapter;
1323         struct ena_com_io_cq* io_cq;
1324         uint16_t next_to_clean;
1325         uint16_t req_id;
1326         uint16_t ena_qid;
1327         unsigned int total_done = 0;
1328         int rc;
1329         int commit = TX_COMMIT;
1330         int budget = TX_BUDGET;
1331         int work_done;
1332
1333         adapter = tx_ring->que->adapter;
1334         ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
1335         io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1336         next_to_clean = tx_ring->next_to_clean;
1337
1338         do {
1339                 struct ena_tx_buffer *tx_info;
1340                 struct mbuf *mbuf;
1341
1342                 rc = ena_com_tx_comp_req_id_get(io_cq, &req_id);
1343                 if (unlikely(rc != 0))
1344                         break;
1345
1346                 rc = validate_tx_req_id(tx_ring, req_id);
1347                 if (unlikely(rc != 0))
1348                         break;
1349
1350                 tx_info = &tx_ring->tx_buffer_info[req_id];
1351
1352                 mbuf = tx_info->mbuf;
1353
1354                 tx_info->mbuf = NULL;
1355                 bintime_clear(&tx_info->timestamp);
1356
1357                 if (likely(tx_info->num_of_bufs != 0)) {
1358                         /* Map is no longer required */
1359                         bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
1360                 }
1361
1362                 ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d mbuf %p completed",
1363                     tx_ring->qid, mbuf);
1364
1365                 m_freem(mbuf);
1366
1367                 total_done += tx_info->tx_descs;
1368
1369                 tx_ring->free_tx_ids[next_to_clean] = req_id;
1370                 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1371                     tx_ring->ring_size);
1372
1373                 if (unlikely(--commit == 0)) {
1374                         commit = TX_COMMIT;
1375                         /* update ring state every TX_COMMIT descriptor */
1376                         tx_ring->next_to_clean = next_to_clean;
1377                         ena_com_comp_ack(
1378                             &adapter->ena_dev->io_sq_queues[ena_qid],
1379                             total_done);
1380                         ena_com_update_dev_comp_head(io_cq);
1381                         total_done = 0;
1382                 }
1383         } while (likely(--budget));
1384
1385         work_done = TX_BUDGET - budget;
1386
1387         ena_trace(ENA_DBG | ENA_TXPTH, "tx: q %d done. total pkts: %d",
1388         tx_ring->qid, work_done);
1389
1390         /* If there is still something to commit update ring state */
1391         if (likely(commit != TX_COMMIT)) {
1392                 tx_ring->next_to_clean = next_to_clean;
1393                 ena_com_comp_ack(&adapter->ena_dev->io_sq_queues[ena_qid],
1394                     total_done);
1395                 ena_com_update_dev_comp_head(io_cq);
1396         }
1397
1398         taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1399
1400         return (work_done);
1401 }
1402
1403 static void
1404 ena_rx_hash_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
1405     struct mbuf *mbuf)
1406 {
1407         struct ena_adapter *adapter = rx_ring->adapter;
1408
1409         if (likely(adapter->rss_support)) {
1410                 mbuf->m_pkthdr.flowid = ena_rx_ctx->hash;
1411
1412                 if (ena_rx_ctx->frag &&
1413                     (ena_rx_ctx->l3_proto != ENA_ETH_IO_L3_PROTO_UNKNOWN)) {
1414                         M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1415                         return;
1416                 }
1417
1418                 switch (ena_rx_ctx->l3_proto) {
1419                 case ENA_ETH_IO_L3_PROTO_IPV4:
1420                         switch (ena_rx_ctx->l4_proto) {
1421                         case ENA_ETH_IO_L4_PROTO_TCP:
1422                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1423                                 break;
1424                         case ENA_ETH_IO_L4_PROTO_UDP:
1425                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1426                                 break;
1427                         default:
1428                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1429                         }
1430                         break;
1431                 case ENA_ETH_IO_L3_PROTO_IPV6:
1432                         switch (ena_rx_ctx->l4_proto) {
1433                         case ENA_ETH_IO_L4_PROTO_TCP:
1434                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1435                                 break;
1436                         case ENA_ETH_IO_L4_PROTO_UDP:
1437                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1438                                 break;
1439                         default:
1440                                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1441                         }
1442                         break;
1443                 case ENA_ETH_IO_L3_PROTO_UNKNOWN:
1444                         M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1445                         break;
1446                 default:
1447                         M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1448                 }
1449         } else {
1450                 mbuf->m_pkthdr.flowid = rx_ring->qid;
1451                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1452         }
1453 }
1454
1455 /**
1456  * ena_rx_mbuf - assemble mbuf from descriptors
1457  * @rx_ring: ring for which we want to clean packets
1458  * @ena_bufs: buffer info
1459  * @ena_rx_ctx: metadata for this packet(s)
1460  * @next_to_clean: ring pointer, will be updated only upon success
1461  *
1462  **/
1463 static struct mbuf*
1464 ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
1465     struct ena_com_rx_ctx *ena_rx_ctx, uint16_t *next_to_clean)
1466 {
1467         struct mbuf *mbuf;
1468         struct ena_rx_buffer *rx_info;
1469         struct ena_adapter *adapter;
1470         unsigned int descs = ena_rx_ctx->descs;
1471         int rc;
1472         uint16_t ntc, len, req_id, buf = 0;
1473
1474         ntc = *next_to_clean;
1475         adapter = rx_ring->adapter;
1476
1477         len = ena_bufs[buf].len;
1478         req_id = ena_bufs[buf].req_id;
1479         rc = validate_rx_req_id(rx_ring, req_id);
1480         if (unlikely(rc != 0))
1481                 return (NULL);
1482
1483         rx_info = &rx_ring->rx_buffer_info[req_id];
1484         if (unlikely(rx_info->mbuf == NULL)) {
1485                 device_printf(adapter->pdev, "NULL mbuf in rx_info");
1486                 return (NULL);
1487         }
1488
1489         ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx",
1490             rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr);
1491
1492         mbuf = rx_info->mbuf;
1493         mbuf->m_flags |= M_PKTHDR;
1494         mbuf->m_pkthdr.len = len;
1495         mbuf->m_len = len;
1496         mbuf->m_pkthdr.rcvif = rx_ring->que->adapter->ifp;
1497
1498         /* Fill mbuf with hash key and it's interpretation for optimization */
1499         ena_rx_hash_mbuf(rx_ring, ena_rx_ctx, mbuf);
1500
1501         ena_trace(ENA_DBG | ENA_RXPTH, "rx mbuf 0x%p, flags=0x%x, len: %d",
1502             mbuf, mbuf->m_flags, mbuf->m_pkthdr.len);
1503
1504         /* DMA address is not needed anymore, unmap it */
1505         bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
1506
1507         rx_info->mbuf = NULL;
1508         rx_ring->free_rx_ids[ntc] = req_id;
1509         ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
1510
1511         /*
1512          * While we have more than 1 descriptors for one rcvd packet, append
1513          * other mbufs to the main one
1514          */
1515         while (--descs) {
1516                 ++buf;
1517                 len = ena_bufs[buf].len;
1518                 req_id = ena_bufs[buf].req_id;
1519                 rc = validate_rx_req_id(rx_ring, req_id);
1520                 if (unlikely(rc != 0)) {
1521                         /*
1522                          * If the req_id is invalid, then the device will be
1523                          * reset. In that case we must free all mbufs that
1524                          * were already gathered.
1525                          */
1526                         m_freem(mbuf);
1527                         return (NULL);
1528                 }
1529                 rx_info = &rx_ring->rx_buffer_info[req_id];
1530
1531                 if (unlikely(rx_info->mbuf == NULL)) {
1532                         device_printf(adapter->pdev, "NULL mbuf in rx_info");
1533                         /*
1534                          * If one of the required mbufs was not allocated yet,
1535                          * we can break there.
1536                          * All earlier used descriptors will be reallocated
1537                          * later and not used mbufs can be reused.
1538                          * The next_to_clean pointer will not be updated in case
1539                          * of an error, so caller should advance it manually
1540                          * in error handling routine to keep it up to date
1541                          * with hw ring.
1542                          */
1543                         m_freem(mbuf);
1544                         return (NULL);
1545                 }
1546
1547                 if (unlikely(m_append(mbuf, len, rx_info->mbuf->m_data) == 0)) {
1548                         counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1549                         ena_trace(ENA_WARNING, "Failed to append Rx mbuf %p",
1550                             mbuf);
1551                 }
1552
1553                 ena_trace(ENA_DBG | ENA_RXPTH,
1554                     "rx mbuf updated. len %d", mbuf->m_pkthdr.len);
1555
1556                 /* Free already appended mbuf, it won't be useful anymore */
1557                 bus_dmamap_unload(rx_ring->adapter->rx_buf_tag, rx_info->map);
1558                 m_freem(rx_info->mbuf);
1559                 rx_info->mbuf = NULL;
1560
1561                 rx_ring->free_rx_ids[ntc] = req_id;
1562                 ntc = ENA_RX_RING_IDX_NEXT(ntc, rx_ring->ring_size);
1563         }
1564
1565         *next_to_clean = ntc;
1566
1567         return (mbuf);
1568 }
1569
1570 /**
1571  * ena_rx_checksum - indicate in mbuf if hw indicated a good cksum
1572  **/
1573 static inline void
1574 ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx,
1575     struct mbuf *mbuf)
1576 {
1577
1578         /* if IP and error */
1579         if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1580             ena_rx_ctx->l3_csum_err)) {
1581                 /* ipv4 checksum error */
1582                 mbuf->m_pkthdr.csum_flags = 0;
1583                 counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1584                 ena_trace(ENA_DBG, "RX IPv4 header checksum error");
1585                 return;
1586         }
1587
1588         /* if TCP/UDP */
1589         if ((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1590             (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)) {
1591                 if (ena_rx_ctx->l4_csum_err) {
1592                         /* TCP/UDP checksum error */
1593                         mbuf->m_pkthdr.csum_flags = 0;
1594                         counter_u64_add(rx_ring->rx_stats.bad_csum, 1);
1595                         ena_trace(ENA_DBG, "RX L4 checksum error");
1596                 } else {
1597                         mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1598                         mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1599                 }
1600         }
1601 }
1602
1603 static void
1604 ena_deferred_rx_cleanup(void *arg, int pending)
1605 {
1606         struct ena_ring *rx_ring = arg;
1607         int budget = CLEAN_BUDGET;
1608
1609         ENA_RING_MTX_LOCK(rx_ring);
1610         /*
1611          * If deferred task was executed, perform cleanup of all awaiting
1612          * descs (or until given budget is depleted to avoid infinite loop).
1613          */
1614         while (likely(budget--)) {
1615                 if (ena_rx_cleanup(rx_ring) == 0)
1616                         break;
1617         }
1618         ENA_RING_MTX_UNLOCK(rx_ring);
1619 }
1620
1621 /**
1622  * ena_rx_cleanup - handle rx irq
1623  * @arg: ring for which irq is being handled
1624  **/
1625 static int
1626 ena_rx_cleanup(struct ena_ring *rx_ring)
1627 {
1628         struct ena_adapter *adapter;
1629         struct mbuf *mbuf;
1630         struct ena_com_rx_ctx ena_rx_ctx;
1631         struct ena_com_io_cq* io_cq;
1632         struct ena_com_io_sq* io_sq;
1633         if_t ifp;
1634         uint16_t ena_qid;
1635         uint16_t next_to_clean;
1636         uint32_t refill_required;
1637         uint32_t refill_threshold;
1638         uint32_t do_if_input = 0;
1639         unsigned int qid;
1640         int rc, i;
1641         int budget = RX_BUDGET;
1642
1643         adapter = rx_ring->que->adapter;
1644         ifp = adapter->ifp;
1645         qid = rx_ring->que->id;
1646         ena_qid = ENA_IO_RXQ_IDX(qid);
1647         io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1648         io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
1649         next_to_clean = rx_ring->next_to_clean;
1650
1651         ena_trace(ENA_DBG, "rx: qid %d", qid);
1652
1653         do {
1654                 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1655                 ena_rx_ctx.max_bufs = adapter->max_rx_sgl_size;
1656                 ena_rx_ctx.descs = 0;
1657                 rc = ena_com_rx_pkt(io_cq, io_sq, &ena_rx_ctx);
1658
1659                 if (unlikely(rc != 0))
1660                         goto error;
1661
1662                 if (unlikely(ena_rx_ctx.descs == 0))
1663                         break;
1664
1665                 ena_trace(ENA_DBG | ENA_RXPTH, "rx: q %d got packet from ena. "
1666                     "descs #: %d l3 proto %d l4 proto %d hash: %x",
1667                     rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1668                     ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1669
1670                 /* Receive mbuf from the ring */
1671                 mbuf = ena_rx_mbuf(rx_ring, rx_ring->ena_bufs,
1672                     &ena_rx_ctx, &next_to_clean);
1673
1674                 /* Exit if we failed to retrieve a buffer */
1675                 if (unlikely(mbuf == NULL)) {
1676                         for (i = 0; i < ena_rx_ctx.descs; ++i) {
1677                                 rx_ring->free_rx_ids[next_to_clean] =
1678                                     rx_ring->ena_bufs[i].req_id;
1679                                 next_to_clean =
1680                                     ENA_RX_RING_IDX_NEXT(next_to_clean,
1681                                     rx_ring->ring_size);
1682
1683                         }
1684                         break;
1685                 }
1686
1687                 if (((ifp->if_capenable & IFCAP_RXCSUM) != 0) ||
1688                     ((ifp->if_capenable & IFCAP_RXCSUM_IPV6) != 0)) {
1689                         ena_rx_checksum(rx_ring, &ena_rx_ctx, mbuf);
1690                 }
1691
1692                 counter_enter();
1693                 counter_u64_add_protected(rx_ring->rx_stats.bytes,
1694                     mbuf->m_pkthdr.len);
1695                 counter_u64_add_protected(adapter->hw_stats.rx_bytes,
1696                     mbuf->m_pkthdr.len);
1697                 counter_exit();
1698                 /*
1699                  * LRO is only for IP/TCP packets and TCP checksum of the packet
1700                  * should be computed by hardware.
1701                  */
1702                 do_if_input = 1;
1703                 if (((ifp->if_capenable & IFCAP_LRO) != 0)  &&
1704                     ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1705                     (ena_rx_ctx.l4_proto == ENA_ETH_IO_L4_PROTO_TCP)) {
1706                         /*
1707                          * Send to the stack if:
1708                          *  - LRO not enabled, or
1709                          *  - no LRO resources, or
1710                          *  - lro enqueue fails
1711                          */
1712                         if ((rx_ring->lro.lro_cnt != 0) &&
1713                             (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0))
1714                                         do_if_input = 0;
1715                 }
1716                 if (do_if_input != 0) {
1717                         ena_trace(ENA_DBG | ENA_RXPTH,
1718                             "calling if_input() with mbuf %p", mbuf);
1719                         (*ifp->if_input)(ifp, mbuf);
1720                 }
1721
1722                 counter_enter();
1723                 counter_u64_add_protected(rx_ring->rx_stats.cnt, 1);
1724                 counter_u64_add_protected(adapter->hw_stats.rx_packets, 1);
1725                 counter_exit();
1726         } while (--budget);
1727
1728         rx_ring->next_to_clean = next_to_clean;
1729
1730         refill_required = ena_com_free_desc(io_sq);
1731         refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
1732
1733         if (refill_required > refill_threshold) {
1734                 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1735                 ena_refill_rx_bufs(rx_ring, refill_required);
1736         }
1737
1738         tcp_lro_flush_all(&rx_ring->lro);
1739
1740         return (RX_BUDGET - budget);
1741
1742 error:
1743         counter_u64_add(rx_ring->rx_stats.bad_desc_num, 1);
1744         return (RX_BUDGET - budget);
1745 }
1746
1747 /*********************************************************************
1748  *
1749  *  MSIX & Interrupt Service routine
1750  *
1751  **********************************************************************/
1752
1753 /**
1754  * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1755  * @arg: interrupt number
1756  **/
1757 static void
1758 ena_intr_msix_mgmnt(void *arg)
1759 {
1760         struct ena_adapter *adapter = (struct ena_adapter *)arg;
1761
1762         ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1763         if (likely(adapter->running))
1764                 ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1765 }
1766
1767 /**
1768  * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1769  * @arg: interrupt number
1770  **/
1771 static void
1772 ena_handle_msix(void *arg)
1773 {
1774         struct ena_que  *que = arg;
1775         struct ena_adapter *adapter = que->adapter;
1776         if_t ifp = adapter->ifp;
1777         struct ena_ring *tx_ring;
1778         struct ena_ring *rx_ring;
1779         struct ena_com_io_cq* io_cq;
1780         struct ena_eth_io_intr_reg intr_reg;
1781         int qid, ena_qid;
1782         int txc, rxc, i;
1783
1784         if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1785                 return;
1786
1787         ena_trace(ENA_DBG, "MSI-X TX/RX routine");
1788
1789         tx_ring = que->tx_ring;
1790         rx_ring = que->rx_ring;
1791         qid = que->id;
1792         ena_qid = ENA_IO_TXQ_IDX(qid);
1793         io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1794
1795         for (i = 0; i < CLEAN_BUDGET; ++i) {
1796                 /*
1797                  * If lock cannot be acquired, then deferred cleanup task was
1798                  * being executed and rx ring is being cleaned up in
1799                  * another thread.
1800                  */
1801                 if (likely(ENA_RING_MTX_TRYLOCK(rx_ring) != 0)) {
1802                         rxc = ena_rx_cleanup(rx_ring);
1803                         ENA_RING_MTX_UNLOCK(rx_ring);
1804                 } else {
1805                         rxc = 0;
1806                 }
1807
1808                 /* Protection from calling ena_tx_cleanup from ena_start_xmit */
1809                 ENA_RING_MTX_LOCK(tx_ring);
1810                 txc = ena_tx_cleanup(tx_ring);
1811                 ENA_RING_MTX_UNLOCK(tx_ring);
1812
1813                 if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1814                         return;
1815
1816                 if ((txc != TX_BUDGET) && (rxc != RX_BUDGET))
1817                        break;
1818         }
1819
1820         /* Signal that work is done and unmask interrupt */
1821         ena_com_update_intr_reg(&intr_reg,
1822             RX_IRQ_INTERVAL,
1823             TX_IRQ_INTERVAL,
1824             true);
1825         ena_com_unmask_intr(io_cq, &intr_reg);
1826 }
1827
1828 static int
1829 ena_enable_msix(struct ena_adapter *adapter)
1830 {
1831         device_t dev = adapter->pdev;
1832         int msix_vecs, msix_req;
1833         int i, rc = 0;
1834
1835         /* Reserved the max msix vectors we might need */
1836         msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues);
1837
1838         adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1839             M_DEVBUF, M_WAITOK | M_ZERO);
1840
1841         ena_trace(ENA_DBG, "trying to enable MSI-X, vectors: %d", msix_vecs);
1842
1843         for (i = 0; i < msix_vecs; i++) {
1844                 adapter->msix_entries[i].entry = i;
1845                 /* Vectors must start from 1 */
1846                 adapter->msix_entries[i].vector = i + 1;
1847         }
1848
1849         msix_req = msix_vecs;
1850         rc = pci_alloc_msix(dev, &msix_vecs);
1851         if (unlikely(rc != 0)) {
1852                 device_printf(dev,
1853                     "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
1854
1855                 rc = ENOSPC;
1856                 goto err_msix_free;
1857         }
1858
1859         if (msix_vecs != msix_req) {
1860                 device_printf(dev, "Enable only %d MSI-x (out of %d), reduce "
1861                     "the number of queues\n", msix_vecs, msix_req);
1862                 adapter->num_queues = msix_vecs - ENA_ADMIN_MSIX_VEC;
1863         }
1864
1865         adapter->msix_vecs = msix_vecs;
1866         adapter->msix_enabled = true;
1867
1868         return (0);
1869
1870 err_msix_free:
1871         free(adapter->msix_entries, M_DEVBUF);
1872         adapter->msix_entries = NULL;
1873
1874         return (rc);
1875 }
1876
1877 static void
1878 ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1879 {
1880
1881         snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1882             ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1883             device_get_nameunit(adapter->pdev));
1884         /*
1885          * Handler is NULL on purpose, it will be set
1886          * when mgmnt interrupt is acquired
1887          */
1888         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1889         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1890         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1891             adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1892 }
1893
1894 static void
1895 ena_setup_io_intr(struct ena_adapter *adapter)
1896 {
1897         static int last_bind_cpu = -1;
1898         int irq_idx;
1899
1900         for (int i = 0; i < adapter->num_queues; i++) {
1901                 irq_idx = ENA_IO_IRQ_IDX(i);
1902
1903                 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1904                     "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1905                 adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1906                 adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1907                 adapter->irq_tbl[irq_idx].vector =
1908                     adapter->msix_entries[irq_idx].vector;
1909                 ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n",
1910                     adapter->msix_entries[irq_idx].vector);
1911 #ifdef  RSS
1912                 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1913                     rss_getcpu(i % rss_getnumbuckets());
1914 #else
1915                 /*
1916                  * We still want to bind rings to the corresponding cpu
1917                  * using something similar to the RSS round-robin technique.
1918                  */
1919                 if (unlikely(last_bind_cpu < 0))
1920                         last_bind_cpu = CPU_FIRST();
1921                 adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1922                     last_bind_cpu;
1923                 last_bind_cpu = CPU_NEXT(last_bind_cpu);
1924 #endif
1925         }
1926 }
1927
1928 static int
1929 ena_request_mgmnt_irq(struct ena_adapter *adapter)
1930 {
1931         struct ena_irq *irq;
1932         unsigned long flags;
1933         int rc, rcc;
1934
1935         flags = RF_ACTIVE | RF_SHAREABLE;
1936
1937         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1938         irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1939             &irq->vector, flags);
1940
1941         if (unlikely(irq->res == NULL)) {
1942                 device_printf(adapter->pdev, "could not allocate "
1943                     "irq vector: %d\n", irq->vector);
1944                 return (ENXIO);
1945         }
1946
1947         rc = bus_setup_intr(adapter->pdev, irq->res,
1948             INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt,
1949             irq->data, &irq->cookie);
1950         if (unlikely(rc != 0)) {
1951                 device_printf(adapter->pdev, "failed to register "
1952                     "interrupt handler for irq %ju: %d\n",
1953                     rman_get_start(irq->res), rc);
1954                 goto err_res_free;
1955         }
1956         irq->requested = true;
1957
1958         return (rc);
1959
1960 err_res_free:
1961         ena_trace(ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n",
1962             irq->vector);
1963         rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1964             irq->vector, irq->res);
1965         if (unlikely(rcc != 0))
1966                 device_printf(adapter->pdev, "dev has no parent while "
1967                     "releasing res for irq: %d\n", irq->vector);
1968         irq->res = NULL;
1969
1970         return (rc);
1971 }
1972
1973 static int
1974 ena_request_io_irq(struct ena_adapter *adapter)
1975 {
1976         struct ena_irq *irq;
1977         unsigned long flags = 0;
1978         int rc = 0, i, rcc;
1979
1980         if (unlikely(adapter->msix_enabled == 0)) {
1981                 device_printf(adapter->pdev,
1982                     "failed to request I/O IRQ: MSI-X is not enabled\n");
1983                 return (EINVAL);
1984         } else {
1985                 flags = RF_ACTIVE | RF_SHAREABLE;
1986         }
1987
1988         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1989                 irq = &adapter->irq_tbl[i];
1990
1991                 if (unlikely(irq->requested))
1992                         continue;
1993
1994                 irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1995                     &irq->vector, flags);
1996                 if (unlikely(irq->res == NULL)) {
1997                         device_printf(adapter->pdev, "could not allocate "
1998                             "irq vector: %d\n", irq->vector);
1999                         goto err;
2000                 }
2001
2002                 rc = bus_setup_intr(adapter->pdev, irq->res,
2003                     INTR_TYPE_NET | INTR_MPSAFE, NULL,
2004                     irq->handler, irq->data, &irq->cookie);
2005                  if (unlikely(rc != 0)) {
2006                         device_printf(adapter->pdev, "failed to register "
2007                             "interrupt handler for irq %ju: %d\n",
2008                             rman_get_start(irq->res), rc);
2009                         goto err;
2010                 }
2011                 irq->requested = true;
2012
2013 #ifdef  RSS
2014                 ena_trace(ENA_INFO, "queue %d - RSS bucket %d\n",
2015                     i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
2016 #else
2017                 ena_trace(ENA_INFO, "queue %d - cpu %d\n",
2018                     i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
2019 #endif
2020         }
2021
2022         return (rc);
2023
2024 err:
2025
2026         for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
2027                 irq = &adapter->irq_tbl[i];
2028                 rcc = 0;
2029
2030                 /* Once we entered err: section and irq->requested is true we
2031                    free both intr and resources */
2032                 if (irq->requested)
2033                         rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
2034                 if (unlikely(rcc != 0))
2035                         device_printf(adapter->pdev, "could not release"
2036                             " irq: %d, error: %d\n", irq->vector, rcc);
2037
2038                 /* If we entred err: section without irq->requested set we know
2039                    it was bus_alloc_resource_any() that needs cleanup, provided
2040                    res is not NULL. In case res is NULL no work in needed in
2041                    this iteration */
2042                 rcc = 0;
2043                 if (irq->res != NULL) {
2044                         rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
2045                             irq->vector, irq->res);
2046                 }
2047                 if (unlikely(rcc != 0))
2048                         device_printf(adapter->pdev, "dev has no parent while "
2049                             "releasing res for irq: %d\n", irq->vector);
2050                 irq->requested = false;
2051                 irq->res = NULL;
2052         }
2053
2054         return (rc);
2055 }
2056
2057 static void
2058 ena_free_mgmnt_irq(struct ena_adapter *adapter)
2059 {
2060         struct ena_irq *irq;
2061         int rc;
2062
2063         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2064         if (irq->requested) {
2065                 ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n",
2066                     irq->vector);
2067                 rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
2068                 if (unlikely(rc != 0))
2069                         device_printf(adapter->pdev, "failed to tear "
2070                             "down irq: %d\n", irq->vector);
2071                 irq->requested = 0;
2072         }
2073
2074         if (irq->res != NULL) {
2075                 ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n",
2076                     irq->vector);
2077                 rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
2078                     irq->vector, irq->res);
2079                 irq->res = NULL;
2080                 if (unlikely(rc != 0))
2081                         device_printf(adapter->pdev, "dev has no parent while "
2082                             "releasing res for irq: %d\n", irq->vector);
2083         }
2084 }
2085
2086 static void
2087 ena_free_io_irq(struct ena_adapter *adapter)
2088 {
2089         struct ena_irq *irq;
2090         int rc;
2091
2092         for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
2093                 irq = &adapter->irq_tbl[i];
2094                 if (irq->requested) {
2095                         ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n",
2096                             irq->vector);
2097                         rc = bus_teardown_intr(adapter->pdev, irq->res,
2098                             irq->cookie);
2099                         if (unlikely(rc != 0)) {
2100                                 device_printf(adapter->pdev, "failed to tear "
2101                                     "down irq: %d\n", irq->vector);
2102                         }
2103                         irq->requested = 0;
2104                 }
2105
2106                 if (irq->res != NULL) {
2107                         ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n",
2108                             irq->vector);
2109                         rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
2110                             irq->vector, irq->res);
2111                         irq->res = NULL;
2112                         if (unlikely(rc != 0)) {
2113                                 device_printf(adapter->pdev, "dev has no parent"
2114                                     " while releasing res for irq: %d\n",
2115                                     irq->vector);
2116                         }
2117                 }
2118         }
2119 }
2120
2121 static void
2122 ena_free_irqs(struct ena_adapter* adapter)
2123 {
2124
2125         ena_free_io_irq(adapter);
2126         ena_free_mgmnt_irq(adapter);
2127         ena_disable_msix(adapter);
2128 }
2129
2130 static void
2131 ena_disable_msix(struct ena_adapter *adapter)
2132 {
2133
2134         pci_release_msi(adapter->pdev);
2135
2136         adapter->msix_vecs = 0;
2137         free(adapter->msix_entries, M_DEVBUF);
2138         adapter->msix_entries = NULL;
2139 }
2140
2141 static void
2142 ena_unmask_all_io_irqs(struct ena_adapter *adapter)
2143 {
2144         struct ena_com_io_cq* io_cq;
2145         struct ena_eth_io_intr_reg intr_reg;
2146         uint16_t ena_qid;
2147         int i;
2148
2149         /* Unmask interrupts for all queues */
2150         for (i = 0; i < adapter->num_queues; i++) {
2151                 ena_qid = ENA_IO_TXQ_IDX(i);
2152                 io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
2153                 ena_com_update_intr_reg(&intr_reg, 0, 0, true);
2154                 ena_com_unmask_intr(io_cq, &intr_reg);
2155         }
2156 }
2157
2158 /* Configure the Rx forwarding */
2159 static int
2160 ena_rss_configure(struct ena_adapter *adapter)
2161 {
2162         struct ena_com_dev *ena_dev = adapter->ena_dev;
2163         int rc;
2164
2165         /* Set indirect table */
2166         rc = ena_com_indirect_table_set(ena_dev);
2167         if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
2168                 return (rc);
2169
2170         /* Configure hash function (if supported) */
2171         rc = ena_com_set_hash_function(ena_dev);
2172         if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
2173                 return (rc);
2174
2175         /* Configure hash inputs (if supported) */
2176         rc = ena_com_set_hash_ctrl(ena_dev);
2177         if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
2178                 return (rc);
2179
2180         return (0);
2181 }
2182
2183 static int
2184 ena_up_complete(struct ena_adapter *adapter)
2185 {
2186         int rc;
2187
2188         if (likely(adapter->rss_support)) {
2189                 rc = ena_rss_configure(adapter);
2190                 if (rc != 0)
2191                         return (rc);
2192         }
2193
2194         rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
2195         if (unlikely(rc != 0))
2196                 return (rc);
2197
2198         ena_refill_all_rx_bufs(adapter);
2199         ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
2200             sizeof(adapter->hw_stats));
2201
2202         return (0);
2203 }
2204
2205 static int
2206 ena_up(struct ena_adapter *adapter)
2207 {
2208         int rc = 0;
2209
2210         if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2211                 device_printf(adapter->pdev, "device is not attached!\n");
2212                 return (ENXIO);
2213         }
2214
2215         if (unlikely(!adapter->running)) {
2216                 device_printf(adapter->pdev, "device is not running!\n");
2217                 return (ENXIO);
2218         }
2219
2220         if (!adapter->up) {
2221                 device_printf(adapter->pdev, "device is going UP\n");
2222
2223                 /* setup interrupts for IO queues */
2224                 ena_setup_io_intr(adapter);
2225                 rc = ena_request_io_irq(adapter);
2226                 if (unlikely(rc != 0)) {
2227                         ena_trace(ENA_ALERT, "err_req_irq");
2228                         goto err_req_irq;
2229                 }
2230
2231                 /* allocate transmit descriptors */
2232                 rc = ena_setup_all_tx_resources(adapter);
2233                 if (unlikely(rc != 0)) {
2234                         ena_trace(ENA_ALERT, "err_setup_tx");
2235                         goto err_setup_tx;
2236                 }
2237
2238                 /* allocate receive descriptors */
2239                 rc = ena_setup_all_rx_resources(adapter);
2240                 if (unlikely(rc != 0)) {
2241                         ena_trace(ENA_ALERT, "err_setup_rx");
2242                         goto err_setup_rx;
2243                 }
2244
2245                 /* create IO queues for Rx & Tx */
2246                 rc = ena_create_io_queues(adapter);
2247                 if (unlikely(rc != 0)) {
2248                         ena_trace(ENA_ALERT,
2249                             "create IO queues failed");
2250                         goto err_io_que;
2251                 }
2252
2253                 if (unlikely(adapter->link_status))
2254                         if_link_state_change(adapter->ifp, LINK_STATE_UP);
2255
2256                 rc = ena_up_complete(adapter);
2257                 if (unlikely(rc != 0))
2258                         goto err_up_complete;
2259
2260                 counter_u64_add(adapter->dev_stats.interface_up, 1);
2261
2262                 ena_update_hwassist(adapter);
2263
2264                 if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING,
2265                     IFF_DRV_OACTIVE);
2266
2267                 callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
2268                     ena_timer_service, (void *)adapter, 0);
2269
2270                 adapter->up = true;
2271
2272                 ena_unmask_all_io_irqs(adapter);
2273         }
2274
2275         return (0);
2276
2277 err_up_complete:
2278         ena_destroy_all_io_queues(adapter);
2279 err_io_que:
2280         ena_free_all_rx_resources(adapter);
2281 err_setup_rx:
2282         ena_free_all_tx_resources(adapter);
2283 err_setup_tx:
2284         ena_free_io_irq(adapter);
2285 err_req_irq:
2286         return (rc);
2287 }
2288
2289 static uint64_t
2290 ena_get_counter(if_t ifp, ift_counter cnt)
2291 {
2292         struct ena_adapter *adapter;
2293         struct ena_hw_stats *stats;
2294
2295         adapter = if_getsoftc(ifp);
2296         stats = &adapter->hw_stats;
2297
2298         switch (cnt) {
2299         case IFCOUNTER_IPACKETS:
2300                 return (counter_u64_fetch(stats->rx_packets));
2301         case IFCOUNTER_OPACKETS:
2302                 return (counter_u64_fetch(stats->tx_packets));
2303         case IFCOUNTER_IBYTES:
2304                 return (counter_u64_fetch(stats->rx_bytes));
2305         case IFCOUNTER_OBYTES:
2306                 return (counter_u64_fetch(stats->tx_bytes));
2307         case IFCOUNTER_IQDROPS:
2308                 return (counter_u64_fetch(stats->rx_drops));
2309         default:
2310                 return (if_get_counter_default(ifp, cnt));
2311         }
2312 }
2313
2314 static int
2315 ena_media_change(if_t ifp)
2316 {
2317         /* Media Change is not supported by firmware */
2318         return (0);
2319 }
2320
2321 static void
2322 ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2323 {
2324         struct ena_adapter *adapter = if_getsoftc(ifp);
2325         ena_trace(ENA_DBG, "enter");
2326
2327         mtx_lock(&adapter->global_mtx);
2328
2329         ifmr->ifm_status = IFM_AVALID;
2330         ifmr->ifm_active = IFM_ETHER;
2331
2332         if (!adapter->link_status) {
2333                 mtx_unlock(&adapter->global_mtx);
2334                 ena_trace(ENA_INFO, "link_status = false");
2335                 return;
2336         }
2337
2338         ifmr->ifm_status |= IFM_ACTIVE;
2339         ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
2340
2341         mtx_unlock(&adapter->global_mtx);
2342 }
2343
2344 static void
2345 ena_init(void *arg)
2346 {
2347         struct ena_adapter *adapter = (struct ena_adapter *)arg;
2348
2349         if (!adapter->up) {
2350                 sx_xlock(&adapter->ioctl_sx);
2351                 ena_up(adapter);
2352                 sx_unlock(&adapter->ioctl_sx);
2353         }
2354 }
2355
2356 static int
2357 ena_ioctl(if_t ifp, u_long command, caddr_t data)
2358 {
2359         struct ena_adapter *adapter;
2360         struct ifreq *ifr;
2361         int rc;
2362
2363         adapter = ifp->if_softc;
2364         ifr = (struct ifreq *)data;
2365
2366         /*
2367          * Acquiring lock to prevent from running up and down routines parallel.
2368          */
2369         rc = 0;
2370         switch (command) {
2371         case SIOCSIFMTU:
2372                 if (ifp->if_mtu == ifr->ifr_mtu)
2373                         break;
2374                 sx_xlock(&adapter->ioctl_sx);
2375                 ena_down(adapter);
2376
2377                 ena_change_mtu(ifp, ifr->ifr_mtu);
2378
2379                 rc = ena_up(adapter);
2380                 sx_unlock(&adapter->ioctl_sx);
2381                 break;
2382
2383         case SIOCSIFFLAGS:
2384                 if ((ifp->if_flags & IFF_UP) != 0) {
2385                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2386                                 if ((ifp->if_flags & (IFF_PROMISC |
2387                                     IFF_ALLMULTI)) != 0) {
2388                                         device_printf(adapter->pdev,
2389                                             "ioctl promisc/allmulti\n");
2390                                 }
2391                         } else {
2392                                 sx_xlock(&adapter->ioctl_sx);
2393                                 rc = ena_up(adapter);
2394                                 sx_unlock(&adapter->ioctl_sx);
2395                         }
2396                 } else {
2397                         if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2398                                 sx_xlock(&adapter->ioctl_sx);
2399                                 ena_down(adapter);
2400                                 sx_unlock(&adapter->ioctl_sx);
2401                         }
2402                 }
2403                 break;
2404
2405         case SIOCADDMULTI:
2406         case SIOCDELMULTI:
2407                 break;
2408
2409         case SIOCSIFMEDIA:
2410         case SIOCGIFMEDIA:
2411                 rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2412                 break;
2413
2414         case SIOCSIFCAP:
2415                 {
2416                         int reinit = 0;
2417
2418                         if (ifr->ifr_reqcap != ifp->if_capenable) {
2419                                 ifp->if_capenable = ifr->ifr_reqcap;
2420                                 reinit = 1;
2421                         }
2422
2423                         if ((reinit != 0) &&
2424                             ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
2425                                 sx_xlock(&adapter->ioctl_sx);
2426                                 ena_down(adapter);
2427                                 rc = ena_up(adapter);
2428                                 sx_unlock(&adapter->ioctl_sx);
2429                         }
2430                 }
2431
2432                 break;
2433         default:
2434                 rc = ether_ioctl(ifp, command, data);
2435                 break;
2436         }
2437
2438         return (rc);
2439 }
2440
2441 static int
2442 ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2443 {
2444         int caps = 0;
2445
2446         if ((feat->offload.tx &
2447             (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2448             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2449                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2450                 caps |= IFCAP_TXCSUM;
2451
2452         if ((feat->offload.tx &
2453             (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2454             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2455                 caps |= IFCAP_TXCSUM_IPV6;
2456
2457         if ((feat->offload.tx &
2458             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2459                 caps |= IFCAP_TSO4;
2460
2461         if ((feat->offload.tx &
2462             ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2463                 caps |= IFCAP_TSO6;
2464
2465         if ((feat->offload.rx_supported &
2466             (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2467             ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2468                 caps |= IFCAP_RXCSUM;
2469
2470         if ((feat->offload.rx_supported &
2471             ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2472                 caps |= IFCAP_RXCSUM_IPV6;
2473
2474         caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2475
2476         return (caps);
2477 }
2478
2479 static void
2480 ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2481 {
2482
2483         host_info->supported_network_features[0] =
2484             (uint32_t)if_getcapabilities(ifp);
2485 }
2486
2487 static void
2488 ena_update_hwassist(struct ena_adapter *adapter)
2489 {
2490         if_t ifp = adapter->ifp;
2491         uint32_t feat = adapter->tx_offload_cap;
2492         int cap = if_getcapenable(ifp);
2493         int flags = 0;
2494
2495         if_clearhwassist(ifp);
2496
2497         if ((cap & IFCAP_TXCSUM) != 0) {
2498                 if ((feat &
2499                     ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2500                         flags |= CSUM_IP;
2501                 if ((feat &
2502                     (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2503                     ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2504                         flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2505         }
2506
2507         if ((cap & IFCAP_TXCSUM_IPV6) != 0)
2508                 flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2509
2510         if ((cap & IFCAP_TSO4) != 0)
2511                 flags |= CSUM_IP_TSO;
2512
2513         if ((cap & IFCAP_TSO6) != 0)
2514                 flags |= CSUM_IP6_TSO;
2515
2516         if_sethwassistbits(ifp, flags, 0);
2517 }
2518
2519 static int
2520 ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2521     struct ena_com_dev_get_features_ctx *feat)
2522 {
2523         if_t ifp;
2524         int caps = 0;
2525
2526         ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2527         if (unlikely(ifp == NULL)) {
2528                 ena_trace(ENA_ALERT, "can not allocate ifnet structure\n");
2529                 return (ENXIO);
2530         }
2531         if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2532         if_setdev(ifp, pdev);
2533         if_setsoftc(ifp, adapter);
2534
2535         if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2536         if_setinitfn(ifp, ena_init);
2537         if_settransmitfn(ifp, ena_mq_start);
2538         if_setqflushfn(ifp, ena_qflush);
2539         if_setioctlfn(ifp, ena_ioctl);
2540         if_setgetcounterfn(ifp, ena_get_counter);
2541
2542         if_setsendqlen(ifp, adapter->tx_ring_size);
2543         if_setsendqready(ifp);
2544         if_setmtu(ifp, ETHERMTU);
2545         if_setbaudrate(ifp, 0);
2546         /* Zeroize capabilities... */
2547         if_setcapabilities(ifp, 0);
2548         if_setcapenable(ifp, 0);
2549         /* check hardware support */
2550         caps = ena_get_dev_offloads(feat);
2551         /* ... and set them */
2552         if_setcapabilitiesbit(ifp, caps, 0);
2553
2554         /* TSO parameters */
2555         ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
2556             (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2557         ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
2558         ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
2559
2560         if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2561         if_setcapenable(ifp, if_getcapabilities(ifp));
2562
2563         /*
2564          * Specify the media types supported by this adapter and register
2565          * callbacks to update media and link information
2566          */
2567         ifmedia_init(&adapter->media, IFM_IMASK,
2568             ena_media_change, ena_media_status);
2569         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2570         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2571
2572         ether_ifattach(ifp, adapter->mac_addr);
2573
2574         return (0);
2575 }
2576
2577 static void
2578 ena_down(struct ena_adapter *adapter)
2579 {
2580         int rc;
2581
2582         if (adapter->up) {
2583                 device_printf(adapter->pdev, "device is going DOWN\n");
2584
2585                 callout_drain(&adapter->timer_service);
2586
2587                 adapter->up = false;
2588                 if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE,
2589                     IFF_DRV_RUNNING);
2590
2591                 ena_free_io_irq(adapter);
2592
2593                 if (adapter->trigger_reset) {
2594                         rc = ena_com_dev_reset(adapter->ena_dev,
2595                             adapter->reset_reason);
2596                         if (unlikely(rc != 0))
2597                                 device_printf(adapter->pdev,
2598                                     "Device reset failed\n");
2599                 }
2600
2601                 ena_destroy_all_io_queues(adapter);
2602
2603                 ena_free_all_tx_bufs(adapter);
2604                 ena_free_all_rx_bufs(adapter);
2605                 ena_free_all_tx_resources(adapter);
2606                 ena_free_all_rx_resources(adapter);
2607
2608                 counter_u64_add(adapter->dev_stats.interface_down, 1);
2609         }
2610 }
2611
2612 static void
2613 ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct mbuf *mbuf)
2614 {
2615         struct ena_com_tx_meta *ena_meta;
2616         struct ether_vlan_header *eh;
2617         u32 mss;
2618         bool offload;
2619         uint16_t etype;
2620         int ehdrlen;
2621         struct ip *ip;
2622         int iphlen;
2623         struct tcphdr *th;
2624
2625         offload = false;
2626         ena_meta = &ena_tx_ctx->ena_meta;
2627         mss = mbuf->m_pkthdr.tso_segsz;
2628
2629         if (mss != 0)
2630                 offload = true;
2631
2632         if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0)
2633                 offload = true;
2634
2635         if ((mbuf->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
2636                 offload = true;
2637
2638         if (!offload) {
2639                 ena_tx_ctx->meta_valid = 0;
2640                 return;
2641         }
2642
2643         /* Determine where frame payload starts. */
2644         eh = mtod(mbuf, struct ether_vlan_header *);
2645         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2646                 etype = ntohs(eh->evl_proto);
2647                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2648         } else {
2649                 etype = ntohs(eh->evl_encap_proto);
2650                 ehdrlen = ETHER_HDR_LEN;
2651         }
2652
2653         ip = (struct ip *)(mbuf->m_data + ehdrlen);
2654         iphlen = ip->ip_hl << 2;
2655         th = (struct tcphdr *)((caddr_t)ip + iphlen);
2656
2657         if ((mbuf->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2658                 ena_tx_ctx->l3_csum_enable = 1;
2659         }
2660         if ((mbuf->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2661                 ena_tx_ctx->tso_enable = 1;
2662                 ena_meta->l4_hdr_len = (th->th_off);
2663         }
2664
2665         switch (etype) {
2666         case ETHERTYPE_IP:
2667                 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2668                 if ((ip->ip_off & htons(IP_DF)) != 0)
2669                         ena_tx_ctx->df = 1;
2670                 break;
2671         case ETHERTYPE_IPV6:
2672                 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2673
2674         default:
2675                 break;
2676         }
2677
2678         if (ip->ip_p == IPPROTO_TCP) {
2679                 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2680                 if ((mbuf->m_pkthdr.csum_flags &
2681                     (CSUM_IP_TCP | CSUM_IP6_TCP)) != 0)
2682                         ena_tx_ctx->l4_csum_enable = 1;
2683                 else
2684                         ena_tx_ctx->l4_csum_enable = 0;
2685         } else if (ip->ip_p == IPPROTO_UDP) {
2686                 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2687                 if ((mbuf->m_pkthdr.csum_flags &
2688                     (CSUM_IP_UDP | CSUM_IP6_UDP)) != 0)
2689                         ena_tx_ctx->l4_csum_enable = 1;
2690                 else
2691                         ena_tx_ctx->l4_csum_enable = 0;
2692         } else {
2693                 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
2694                 ena_tx_ctx->l4_csum_enable = 0;
2695         }
2696
2697         ena_meta->mss = mss;
2698         ena_meta->l3_hdr_len = iphlen;
2699         ena_meta->l3_hdr_offset = ehdrlen;
2700         ena_tx_ctx->meta_valid = 1;
2701 }
2702
2703 static int
2704 ena_check_and_collapse_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
2705 {
2706         struct ena_adapter *adapter;
2707         struct mbuf *collapsed_mbuf;
2708         int num_frags;
2709
2710         adapter = tx_ring->adapter;
2711         num_frags = ena_mbuf_count(*mbuf);
2712
2713         /* One segment must be reserved for configuration descriptor. */
2714         if (num_frags < adapter->max_tx_sgl_size)
2715                 return (0);
2716         counter_u64_add(tx_ring->tx_stats.collapse, 1);
2717
2718         collapsed_mbuf = m_collapse(*mbuf, M_NOWAIT,
2719             adapter->max_tx_sgl_size - 1);
2720         if (unlikely(collapsed_mbuf == NULL)) {
2721                 counter_u64_add(tx_ring->tx_stats.collapse_err, 1);
2722                 return (ENOMEM);
2723         }
2724
2725         /* If mbuf was collapsed succesfully, original mbuf is released. */
2726         *mbuf = collapsed_mbuf;
2727
2728         return (0);
2729 }
2730
2731 static int
2732 ena_xmit_mbuf(struct ena_ring *tx_ring, struct mbuf **mbuf)
2733 {
2734         struct ena_adapter *adapter;
2735         struct ena_tx_buffer *tx_info;
2736         struct ena_com_tx_ctx ena_tx_ctx;
2737         struct ena_com_dev *ena_dev;
2738         struct ena_com_buf *ena_buf;
2739         struct ena_com_io_sq* io_sq;
2740         bus_dma_segment_t segs[ENA_BUS_DMA_SEGS];
2741         void *push_hdr;
2742         uint16_t next_to_use;
2743         uint16_t req_id;
2744         uint16_t push_len;
2745         uint16_t ena_qid;
2746         uint32_t nsegs, header_len;
2747         int i, rc;
2748         int nb_hw_desc;
2749
2750         ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
2751         adapter = tx_ring->que->adapter;
2752         ena_dev = adapter->ena_dev;
2753         io_sq = &ena_dev->io_sq_queues[ena_qid];
2754
2755         rc = ena_check_and_collapse_mbuf(tx_ring, mbuf);
2756         if (unlikely(rc != 0)) {
2757                 ena_trace(ENA_WARNING,
2758                     "Failed to collapse mbuf! err: %d", rc);
2759                 return (rc);
2760         }
2761
2762         next_to_use = tx_ring->next_to_use;
2763         req_id = tx_ring->free_tx_ids[next_to_use];
2764         tx_info = &tx_ring->tx_buffer_info[req_id];
2765
2766         tx_info->mbuf = *mbuf;
2767         tx_info->num_of_bufs = 0;
2768
2769         ena_buf = tx_info->bufs;
2770
2771         ena_trace(ENA_DBG | ENA_TXPTH, "Tx: %d bytes", (*mbuf)->m_pkthdr.len);
2772
2773         push_len = 0;
2774         /*
2775          * header_len is just a hint for the device. Because FreeBSD is not
2776          * giving us information about packet header length and it is not
2777          * guaranteed that all packet headers will be in the 1st mbuf, setting
2778          * header_len to 0 is making the device ignore this value and resolve
2779          * header on it's own.
2780          */
2781         header_len = 0;
2782         push_hdr = NULL;
2783
2784         rc = bus_dmamap_load_mbuf_sg(adapter->tx_buf_tag, tx_info->map,
2785             *mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
2786
2787         if (unlikely((rc != 0) || (nsegs == 0))) {
2788                 ena_trace(ENA_WARNING,
2789                     "dmamap load failed! err: %d nsegs: %d", rc, nsegs);
2790                 counter_u64_add(tx_ring->tx_stats.dma_mapping_err, 1);
2791                 tx_info->mbuf = NULL;
2792                 if (rc == ENOMEM)
2793                         return (ENA_COM_NO_MEM);
2794                 else
2795                         return (ENA_COM_INVAL);
2796         }
2797
2798         for (i = 0; i < nsegs; i++) {
2799                 ena_buf->len = segs[i].ds_len;
2800                 ena_buf->paddr = segs[i].ds_addr;
2801                 ena_buf++;
2802         }
2803         tx_info->num_of_bufs = nsegs;
2804
2805         memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2806         ena_tx_ctx.ena_bufs = tx_info->bufs;
2807         ena_tx_ctx.push_header = push_hdr;
2808         ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2809         ena_tx_ctx.req_id = req_id;
2810         ena_tx_ctx.header_len = header_len;
2811
2812         /* Set flags and meta data */
2813         ena_tx_csum(&ena_tx_ctx, *mbuf);
2814         /* Prepare the packet's descriptors and send them to device */
2815         rc = ena_com_prepare_tx(io_sq, &ena_tx_ctx, &nb_hw_desc);
2816         if (unlikely(rc != 0)) {
2817                 ena_trace(ENA_DBG | ENA_TXPTH, "failed to prepare tx bufs\n");
2818                 counter_u64_add(tx_ring->tx_stats.prepare_ctx_err, 1);
2819                 goto dma_error;
2820         }
2821
2822         counter_enter();
2823         counter_u64_add_protected(tx_ring->tx_stats.cnt, 1);
2824         counter_u64_add_protected(tx_ring->tx_stats.bytes,
2825             (*mbuf)->m_pkthdr.len);
2826
2827         counter_u64_add_protected(adapter->hw_stats.tx_packets, 1);
2828         counter_u64_add_protected(adapter->hw_stats.tx_bytes,
2829             (*mbuf)->m_pkthdr.len);
2830         counter_exit();
2831
2832         tx_info->tx_descs = nb_hw_desc;
2833         getbinuptime(&tx_info->timestamp);
2834         tx_info->print_once = true;
2835
2836         tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2837             tx_ring->ring_size);
2838
2839         bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map,
2840             BUS_DMASYNC_PREWRITE);
2841
2842         return (0);
2843
2844 dma_error:
2845         tx_info->mbuf = NULL;
2846         bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map);
2847
2848         return (rc);
2849 }
2850
2851 static void
2852 ena_start_xmit(struct ena_ring *tx_ring)
2853 {
2854         struct mbuf *mbuf;
2855         struct ena_adapter *adapter = tx_ring->adapter;
2856         struct ena_com_io_sq* io_sq;
2857         int ena_qid;
2858         int acum_pkts = 0;
2859         int ret = 0;
2860
2861         if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0))
2862                 return;
2863
2864         if (unlikely(!adapter->link_status))
2865                 return;
2866
2867         ena_qid = ENA_IO_TXQ_IDX(tx_ring->que->id);
2868         io_sq = &adapter->ena_dev->io_sq_queues[ena_qid];
2869
2870         while ((mbuf = drbr_peek(adapter->ifp, tx_ring->br)) != NULL) {
2871                 ena_trace(ENA_DBG | ENA_TXPTH, "\ndequeued mbuf %p with flags %#x and"
2872                     " header csum flags %#jx",
2873                     mbuf, mbuf->m_flags, (uint64_t)mbuf->m_pkthdr.csum_flags);
2874
2875                 if (unlikely(!ena_com_sq_have_enough_space(io_sq,
2876                     ENA_TX_CLEANUP_THRESHOLD)))
2877                         ena_tx_cleanup(tx_ring);
2878
2879                 if (unlikely((ret = ena_xmit_mbuf(tx_ring, &mbuf)) != 0)) {
2880                         if (ret == ENA_COM_NO_MEM) {
2881                                 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
2882                         } else if (ret == ENA_COM_NO_SPACE) {
2883                                 drbr_putback(adapter->ifp, tx_ring->br, mbuf);
2884                         } else {
2885                                 m_freem(mbuf);
2886                                 drbr_advance(adapter->ifp, tx_ring->br);
2887                         }
2888
2889                         break;
2890                 }
2891
2892                 drbr_advance(adapter->ifp, tx_ring->br);
2893
2894                 if (unlikely((if_getdrvflags(adapter->ifp) &
2895                     IFF_DRV_RUNNING) == 0))
2896                         return;
2897
2898                 acum_pkts++;
2899
2900                 BPF_MTAP(adapter->ifp, mbuf);
2901
2902                 if (unlikely(acum_pkts == DB_THRESHOLD)) {
2903                         acum_pkts = 0;
2904                         wmb();
2905                         /* Trigger the dma engine */
2906                         ena_com_write_sq_doorbell(io_sq);
2907                         counter_u64_add(tx_ring->tx_stats.doorbells, 1);
2908                 }
2909
2910         }
2911
2912         if (likely(acum_pkts != 0)) {
2913                 wmb();
2914                 /* Trigger the dma engine */
2915                 ena_com_write_sq_doorbell(io_sq);
2916                 counter_u64_add(tx_ring->tx_stats.doorbells, 1);
2917         }
2918
2919         if (!ena_com_sq_have_enough_space(io_sq, ENA_TX_CLEANUP_THRESHOLD))
2920                 ena_tx_cleanup(tx_ring);
2921 }
2922
2923 static void
2924 ena_deferred_mq_start(void *arg, int pending)
2925 {
2926         struct ena_ring *tx_ring = (struct ena_ring *)arg;
2927         struct ifnet *ifp = tx_ring->adapter->ifp;
2928
2929         while (!drbr_empty(ifp, tx_ring->br) &&
2930             (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2931                 ENA_RING_MTX_LOCK(tx_ring);
2932                 ena_start_xmit(tx_ring);
2933                 ENA_RING_MTX_UNLOCK(tx_ring);
2934         }
2935 }
2936
2937 static int
2938 ena_mq_start(if_t ifp, struct mbuf *m)
2939 {
2940         struct ena_adapter *adapter = ifp->if_softc;
2941         struct ena_ring *tx_ring;
2942         int ret, is_drbr_empty;
2943         uint32_t i;
2944
2945         if (unlikely((if_getdrvflags(adapter->ifp) & IFF_DRV_RUNNING) == 0))
2946                 return (ENODEV);
2947
2948         /* Which queue to use */
2949         /*
2950          * If everything is setup correctly, it should be the
2951          * same bucket that the current CPU we're on is.
2952          * It should improve performance.
2953          */
2954         if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2955 #ifdef  RSS
2956                 if (rss_hash2bucket(m->m_pkthdr.flowid,
2957                     M_HASHTYPE_GET(m), &i) == 0) {
2958                         i = i % adapter->num_queues;
2959
2960                 } else
2961 #endif
2962                 {
2963                         i = m->m_pkthdr.flowid % adapter->num_queues;
2964                 }
2965         } else {
2966                 i = curcpu % adapter->num_queues;
2967         }
2968         tx_ring = &adapter->tx_ring[i];
2969
2970         /* Check if drbr is empty before putting packet */
2971         is_drbr_empty = drbr_empty(ifp, tx_ring->br);
2972         ret = drbr_enqueue(ifp, tx_ring->br, m);
2973         if (unlikely(ret != 0)) {
2974                 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2975                 return (ret);
2976         }
2977
2978         if ((is_drbr_empty != 0) && (ENA_RING_MTX_TRYLOCK(tx_ring) != 0)) {
2979                 ena_start_xmit(tx_ring);
2980                 ENA_RING_MTX_UNLOCK(tx_ring);
2981         } else {
2982                 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2983         }
2984
2985         return (0);
2986 }
2987
2988 static void
2989 ena_qflush(if_t ifp)
2990 {
2991         struct ena_adapter *adapter = ifp->if_softc;
2992         struct ena_ring *tx_ring = adapter->tx_ring;
2993         int i;
2994
2995         for(i = 0; i < adapter->num_queues; ++i, ++tx_ring)
2996                 if (!drbr_empty(ifp, tx_ring->br)) {
2997                         ENA_RING_MTX_LOCK(tx_ring);
2998                         drbr_flush(ifp, tx_ring->br);
2999                         ENA_RING_MTX_UNLOCK(tx_ring);
3000                 }
3001
3002         if_qflush(ifp);
3003 }
3004
3005 static int
3006 ena_calc_io_queue_num(struct ena_adapter *adapter,
3007     struct ena_com_dev_get_features_ctx *get_feat_ctx)
3008 {
3009         int io_sq_num, io_cq_num, io_queue_num;
3010
3011         io_sq_num = get_feat_ctx->max_queues.max_sq_num;
3012         io_cq_num = get_feat_ctx->max_queues.max_cq_num;
3013
3014         io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
3015         io_queue_num = min_t(int, io_queue_num, io_sq_num);
3016         io_queue_num = min_t(int, io_queue_num, io_cq_num);
3017         /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
3018         io_queue_num = min_t(int, io_queue_num,
3019             pci_msix_count(adapter->pdev) - 1);
3020 #ifdef  RSS
3021         io_queue_num = min_t(int, io_queue_num, rss_getnumbuckets());
3022 #endif
3023
3024         return (io_queue_num);
3025 }
3026
3027 static int
3028 ena_calc_queue_size(struct ena_adapter *adapter, uint16_t *max_tx_sgl_size,
3029     uint16_t *max_rx_sgl_size, struct ena_com_dev_get_features_ctx *feat)
3030 {
3031         uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
3032         uint32_t v;
3033         uint32_t q;
3034
3035         queue_size = min_t(uint32_t, queue_size,
3036             feat->max_queues.max_cq_depth);
3037         queue_size = min_t(uint32_t, queue_size,
3038             feat->max_queues.max_sq_depth);
3039
3040         /* round down to the nearest power of 2 */
3041         v = queue_size;
3042         while (v != 0) {
3043                 if (powerof2(queue_size) != 0)
3044                         break;
3045                 v /= 2;
3046                 q = rounddown2(queue_size, v);
3047                 if (q != 0) {
3048                         queue_size = q;
3049                         break;
3050                 }
3051         }
3052
3053         if (unlikely(queue_size == 0)) {
3054                 device_printf(adapter->pdev, "Invalid queue size\n");
3055                 return (ENA_COM_FAULT);
3056         }
3057
3058         *max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
3059             feat->max_queues.max_packet_tx_descs);
3060         *max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
3061             feat->max_queues.max_packet_rx_descs);
3062
3063         return (queue_size);
3064 }
3065
3066 static int
3067 ena_rss_init_default(struct ena_adapter *adapter)
3068 {
3069         struct ena_com_dev *ena_dev = adapter->ena_dev;
3070         device_t dev = adapter->pdev;
3071         int qid, rc, i;
3072
3073         rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3074         if (unlikely(rc != 0)) {
3075                 device_printf(dev, "Cannot init indirect table\n");
3076                 return (rc);
3077         }
3078
3079         for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3080 #ifdef  RSS
3081                 qid = rss_get_indirection_to_bucket(i);
3082                 qid = qid % adapter->num_queues;
3083 #else
3084                 qid = i % adapter->num_queues;
3085 #endif
3086                 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3087                     ENA_IO_RXQ_IDX(qid));
3088                 if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
3089                         device_printf(dev, "Cannot fill indirect table\n");
3090                         goto err_rss_destroy;
3091                 }
3092         }
3093
3094         rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3095             ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3096         if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
3097                 device_printf(dev, "Cannot fill hash function\n");
3098                 goto err_rss_destroy;
3099         }
3100
3101         rc = ena_com_set_default_hash_ctrl(ena_dev);
3102         if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
3103                 device_printf(dev, "Cannot fill hash control\n");
3104                 goto err_rss_destroy;
3105         }
3106
3107         return (0);
3108
3109 err_rss_destroy:
3110         ena_com_rss_destroy(ena_dev);
3111         return (rc);
3112 }
3113
3114 static void
3115 ena_rss_init_default_deferred(void *arg)
3116 {
3117         struct ena_adapter *adapter;
3118         devclass_t dc;
3119         int max;
3120         int rc;
3121
3122         dc = devclass_find("ena");
3123         if (unlikely(dc == NULL)) {
3124                 ena_trace(ENA_ALERT, "No devclass ena\n");
3125                 return;
3126         }
3127
3128         max = devclass_get_maxunit(dc);
3129         while (max-- >= 0) {
3130                 adapter = devclass_get_softc(dc, max);
3131                 if (adapter != NULL) {
3132                         rc = ena_rss_init_default(adapter);
3133                         adapter->rss_support = true;
3134                         if (unlikely(rc != 0)) {
3135                                 device_printf(adapter->pdev,
3136                                     "WARNING: RSS was not properly initialized,"
3137                                     " it will affect bandwidth\n");
3138                                 adapter->rss_support = false;
3139                         }
3140                 }
3141         }
3142 }
3143 SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL);
3144
3145 static void
3146 ena_config_host_info(struct ena_com_dev *ena_dev)
3147 {
3148         struct ena_admin_host_info *host_info;
3149         int rc;
3150
3151         /* Allocate only the host info */
3152         rc = ena_com_allocate_host_info(ena_dev);
3153         if (unlikely(rc != 0)) {
3154                 ena_trace(ENA_ALERT, "Cannot allocate host info\n");
3155                 return;
3156         }
3157
3158         host_info = ena_dev->host_attr.host_info;
3159
3160         host_info->os_type = ENA_ADMIN_OS_FREEBSD;
3161         host_info->kernel_ver = osreldate;
3162
3163         sprintf(host_info->kernel_ver_str, "%d", osreldate);
3164         host_info->os_dist = 0;
3165         strncpy(host_info->os_dist_str, osrelease,
3166             sizeof(host_info->os_dist_str) - 1);
3167
3168         host_info->driver_version =
3169                 (DRV_MODULE_VER_MAJOR) |
3170                 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3171                 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
3172
3173         rc = ena_com_set_host_attributes(ena_dev);
3174         if (unlikely(rc != 0)) {
3175                 if (rc == EOPNOTSUPP)
3176                         ena_trace(ENA_WARNING, "Cannot set host attributes\n");
3177                 else
3178                         ena_trace(ENA_ALERT, "Cannot set host attributes\n");
3179
3180                 goto err;
3181         }
3182
3183         return;
3184
3185 err:
3186         ena_com_delete_host_info(ena_dev);
3187 }
3188
3189 static int
3190 ena_device_init(struct ena_adapter *adapter, device_t pdev,
3191     struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
3192 {
3193         struct ena_com_dev* ena_dev = adapter->ena_dev;
3194         bool readless_supported;
3195         uint32_t aenq_groups;
3196         int dma_width;
3197         int rc;
3198
3199         rc = ena_com_mmio_reg_read_request_init(ena_dev);
3200         if (unlikely(rc != 0)) {
3201                 device_printf(pdev, "failed to init mmio read less\n");
3202                 return (rc);
3203         }
3204
3205         /*
3206          * The PCIe configuration space revision id indicate if mmio reg
3207          * read is disabled
3208          */
3209         readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
3210         ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3211
3212         rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3213         if (unlikely(rc != 0)) {
3214                 device_printf(pdev, "Can not reset device\n");
3215                 goto err_mmio_read_less;
3216         }
3217
3218         rc = ena_com_validate_version(ena_dev);
3219         if (unlikely(rc != 0)) {
3220                 device_printf(pdev, "device version is too low\n");
3221                 goto err_mmio_read_less;
3222         }
3223
3224         dma_width = ena_com_get_dma_width(ena_dev);
3225         if (unlikely(dma_width < 0)) {
3226                 device_printf(pdev, "Invalid dma width value %d", dma_width);
3227                 rc = dma_width;
3228                 goto err_mmio_read_less;
3229         }
3230         adapter->dma_width = dma_width;
3231
3232         /* ENA admin level init */
3233         rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
3234         if (unlikely(rc != 0)) {
3235                 device_printf(pdev,
3236                     "Can not initialize ena admin queue with device\n");
3237                 goto err_mmio_read_less;
3238         }
3239
3240         /*
3241          * To enable the msix interrupts the driver needs to know the number
3242          * of queues. So the driver uses polling mode to retrieve this
3243          * information
3244          */
3245         ena_com_set_admin_polling_mode(ena_dev, true);
3246
3247         ena_config_host_info(ena_dev);
3248
3249         /* Get Device Attributes */
3250         rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3251         if (unlikely(rc != 0)) {
3252                 device_printf(pdev,
3253                     "Cannot get attribute for ena device rc: %d\n", rc);
3254                 goto err_admin_init;
3255         }
3256
3257         aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | BIT(ENA_ADMIN_KEEP_ALIVE);
3258
3259         aenq_groups &= get_feat_ctx->aenq.supported_groups;
3260         rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3261         if (unlikely(rc != 0)) {
3262                 device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
3263                 goto err_admin_init;
3264         }
3265
3266         *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3267
3268         return (0);
3269
3270 err_admin_init:
3271         ena_com_delete_host_info(ena_dev);
3272         ena_com_admin_destroy(ena_dev);
3273 err_mmio_read_less:
3274         ena_com_mmio_reg_read_request_destroy(ena_dev);
3275
3276         return (rc);
3277 }
3278
3279 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
3280     int io_vectors)
3281 {
3282         struct ena_com_dev *ena_dev = adapter->ena_dev;
3283         int rc;
3284
3285         rc = ena_enable_msix(adapter);
3286         if (unlikely(rc != 0)) {
3287                 device_printf(adapter->pdev, "Error with MSI-X enablement\n");
3288                 return (rc);
3289         }
3290
3291         ena_setup_mgmnt_intr(adapter);
3292
3293         rc = ena_request_mgmnt_irq(adapter);
3294         if (unlikely(rc != 0)) {
3295                 device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
3296                 goto err_disable_msix;
3297         }
3298
3299         ena_com_set_admin_polling_mode(ena_dev, false);
3300
3301         ena_com_admin_aenq_enable(ena_dev);
3302
3303         return (0);
3304
3305 err_disable_msix:
3306         ena_disable_msix(adapter);
3307
3308         return (rc);
3309 }
3310
3311 /* Function called on ENA_ADMIN_KEEP_ALIVE event */
3312 static void ena_keep_alive_wd(void *adapter_data,
3313     struct ena_admin_aenq_entry *aenq_e)
3314 {
3315         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3316         struct ena_admin_aenq_keep_alive_desc *desc;
3317         sbintime_t stime;
3318         uint64_t rx_drops;
3319
3320         desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3321
3322         rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
3323         counter_u64_zero(adapter->hw_stats.rx_drops);
3324         counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
3325
3326         stime = getsbinuptime();
3327         atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
3328 }
3329
3330 /* Check for keep alive expiration */
3331 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3332 {
3333         sbintime_t timestamp, time;
3334
3335         if (adapter->wd_active == 0)
3336                 return;
3337
3338         if (likely(adapter->keep_alive_timeout == 0))
3339                 return;
3340
3341         timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
3342         time = getsbinuptime() - timestamp;
3343         if (unlikely(time > adapter->keep_alive_timeout)) {
3344                 device_printf(adapter->pdev,
3345                     "Keep alive watchdog timeout.\n");
3346                 counter_u64_add(adapter->dev_stats.wd_expired, 1);
3347                 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3348                 adapter->trigger_reset = true;
3349         }
3350 }
3351
3352 /* Check if admin queue is enabled */
3353 static void check_for_admin_com_state(struct ena_adapter *adapter)
3354 {
3355         if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) ==
3356             false)) {
3357                 device_printf(adapter->pdev,
3358                     "ENA admin queue is not in running state!\n");
3359                 counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
3360                 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3361                 adapter->trigger_reset = true;
3362         }
3363 }
3364
3365 static int
3366 check_missing_comp_in_queue(struct ena_adapter *adapter,
3367     struct ena_ring *tx_ring)
3368 {
3369         struct bintime curtime, time;
3370         struct ena_tx_buffer *tx_buf;
3371         uint32_t missed_tx = 0;
3372         int i;
3373
3374         getbinuptime(&curtime);
3375
3376         for (i = 0; i < tx_ring->ring_size; i++) {
3377                 tx_buf = &tx_ring->tx_buffer_info[i];
3378
3379                 if (bintime_isset(&tx_buf->timestamp) == 0)
3380                         continue;
3381
3382                 time = curtime;
3383                 bintime_sub(&time, &tx_buf->timestamp);
3384
3385                 /* Check again if packet is still waiting */
3386                 if (unlikely(bttosbt(time) > adapter->missing_tx_timeout)) {
3387
3388                         if (!tx_buf->print_once)
3389                                 ena_trace(ENA_WARNING, "Found a Tx that wasn't "
3390                                     "completed on time, qid %d, index %d.\n",
3391                                     tx_ring->qid, i);
3392
3393                         tx_buf->print_once = true;
3394                         missed_tx++;
3395                         counter_u64_add(tx_ring->tx_stats.missing_tx_comp, 1);
3396
3397                         if (unlikely(missed_tx >
3398                             adapter->missing_tx_threshold)) {
3399                                 device_printf(adapter->pdev,
3400                                     "The number of lost tx completion "
3401                                     "is above the threshold (%d > %d). "
3402                                     "Reset the device\n",
3403                                     missed_tx, adapter->missing_tx_threshold);
3404                                 adapter->reset_reason =
3405                                     ENA_REGS_RESET_MISS_TX_CMPL;
3406                                 adapter->trigger_reset = true;
3407                                 return (EIO);
3408                         }
3409                 }
3410         }
3411
3412         return (0);
3413 }
3414
3415 /*
3416  * Check for TX which were not completed on time.
3417  * Timeout is defined by "missing_tx_timeout".
3418  * Reset will be performed if number of incompleted
3419  * transactions exceeds "missing_tx_threshold".
3420  */
3421 static void
3422 check_for_missing_tx_completions(struct ena_adapter *adapter)
3423 {
3424         struct ena_ring *tx_ring;
3425         int i, budget, rc;
3426
3427         /* Make sure the driver doesn't turn the device in other process */
3428         rmb();
3429
3430         if (!adapter->up)
3431                 return;
3432
3433         if (adapter->trigger_reset)
3434                 return;
3435
3436         if (adapter->missing_tx_timeout == 0)
3437                 return;
3438
3439         budget = adapter->missing_tx_max_queues;
3440
3441         for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) {
3442                 tx_ring = &adapter->tx_ring[i];
3443
3444                 rc = check_missing_comp_in_queue(adapter, tx_ring);
3445                 if (unlikely(rc != 0))
3446                         return;
3447
3448                 budget--;
3449                 if (budget == 0) {
3450                         i++;
3451                         break;
3452                 }
3453         }
3454
3455         adapter->next_monitored_tx_qid = i % adapter->num_queues;
3456 }
3457
3458 /* trigger deferred rx cleanup after 2 consecutive detections */
3459 #define EMPTY_RX_REFILL 2
3460 /* For the rare case where the device runs out of Rx descriptors and the
3461  * msix handler failed to refill new Rx descriptors (due to a lack of memory
3462  * for example).
3463  * This case will lead to a deadlock:
3464  * The device won't send interrupts since all the new Rx packets will be dropped
3465  * The msix handler won't allocate new Rx descriptors so the device won't be
3466  * able to send new packets.
3467  *
3468  * When such a situation is detected - execute rx cleanup task in another thread
3469  */
3470 static void
3471 check_for_empty_rx_ring(struct ena_adapter *adapter)
3472 {
3473         struct ena_ring *rx_ring;
3474         int i, refill_required;
3475
3476         if (!adapter->up)
3477                 return;
3478
3479         if (adapter->trigger_reset)
3480                 return;
3481
3482         for (i = 0; i < adapter->num_queues; i++) {
3483                 rx_ring = &adapter->rx_ring[i];
3484
3485                 refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
3486                 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3487                         rx_ring->empty_rx_queue++;
3488
3489                         if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3490                                 counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3491                                     1);
3492
3493                                 device_printf(adapter->pdev,
3494                                     "trigger refill for ring %d\n", i);
3495
3496                                 taskqueue_enqueue(rx_ring->cmpl_tq,
3497                                     &rx_ring->cmpl_task);
3498                                 rx_ring->empty_rx_queue = 0;
3499                         }
3500                 } else {
3501                         rx_ring->empty_rx_queue = 0;
3502                 }
3503         }
3504 }
3505
3506 static void
3507 ena_timer_service(void *data)
3508 {
3509         struct ena_adapter *adapter = (struct ena_adapter *)data;
3510         struct ena_admin_host_info *host_info =
3511             adapter->ena_dev->host_attr.host_info;
3512
3513         check_for_missing_keep_alive(adapter);
3514
3515         check_for_admin_com_state(adapter);
3516
3517         check_for_missing_tx_completions(adapter);
3518
3519         check_for_empty_rx_ring(adapter);
3520
3521         if (host_info != NULL)
3522                 ena_update_host_info(host_info, adapter->ifp);
3523
3524         if (unlikely(adapter->trigger_reset)) {
3525                 device_printf(adapter->pdev, "Trigger reset is on\n");
3526                 taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3527                 return;
3528         }
3529
3530         /*
3531          * Schedule another timeout one second from now.
3532          */
3533         callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0);
3534 }
3535
3536 static void
3537 ena_reset_task(void *arg, int pending)
3538 {
3539         struct ena_com_dev_get_features_ctx get_feat_ctx;
3540         struct ena_adapter *adapter = (struct ena_adapter *)arg;
3541         struct ena_com_dev *ena_dev = adapter->ena_dev;
3542         bool dev_up;
3543         int rc;
3544
3545         if (unlikely(!adapter->trigger_reset)) {
3546                 device_printf(adapter->pdev,
3547                     "device reset scheduled but trigger_reset is off\n");
3548                 return;
3549         }
3550
3551         sx_xlock(&adapter->ioctl_sx);
3552
3553         callout_drain(&adapter->timer_service);
3554
3555         dev_up = adapter->up;
3556
3557         ena_com_set_admin_running_state(ena_dev, false);
3558         ena_down(adapter);
3559         ena_free_mgmnt_irq(adapter);
3560         ena_disable_msix(adapter);
3561         ena_com_abort_admin_commands(ena_dev);
3562         ena_com_wait_for_abort_completion(ena_dev);
3563         ena_com_admin_destroy(ena_dev);
3564         ena_com_mmio_reg_read_request_destroy(ena_dev);
3565
3566         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3567         adapter->trigger_reset = false;
3568
3569         /* Finished destroy part. Restart the device */
3570         rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx,
3571             &adapter->wd_active);
3572         if (unlikely(rc != 0)) {
3573                 device_printf(adapter->pdev,
3574                     "ENA device init failed! (err: %d)\n", rc);
3575                 goto err_dev_free;
3576         }
3577
3578         rc = ena_enable_msix_and_set_admin_interrupts(adapter,
3579             adapter->num_queues);
3580         if (unlikely(rc != 0)) {
3581                 device_printf(adapter->pdev, "Enable MSI-X failed\n");
3582                 goto err_com_free;
3583         }
3584
3585         /* If the interface was up before the reset bring it up */
3586         if (dev_up) {
3587                 rc = ena_up(adapter);
3588                 if (unlikely(rc != 0)) {
3589                         device_printf(adapter->pdev,
3590                             "Failed to create I/O queues\n");
3591                         goto err_msix_free;
3592                 }
3593         }
3594
3595         callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
3596             ena_timer_service, (void *)adapter, 0);
3597
3598         sx_unlock(&adapter->ioctl_sx);
3599
3600         return;
3601
3602 err_msix_free:
3603         ena_free_mgmnt_irq(adapter);
3604         ena_disable_msix(adapter);
3605 err_com_free:
3606         ena_com_admin_destroy(ena_dev);
3607 err_dev_free:
3608         device_printf(adapter->pdev, "ENA reset failed!\n");
3609         adapter->running = false;
3610         sx_unlock(&adapter->ioctl_sx);
3611 }
3612
3613 /**
3614  * ena_attach - Device Initialization Routine
3615  * @pdev: device information struct
3616  *
3617  * Returns 0 on success, otherwise on failure.
3618  *
3619  * ena_attach initializes an adapter identified by a device structure.
3620  * The OS initialization, configuring of the adapter private structure,
3621  * and a hardware reset occur.
3622  **/
3623 static int
3624 ena_attach(device_t pdev)
3625 {
3626         struct ena_com_dev_get_features_ctx get_feat_ctx;
3627         static int version_printed;
3628         struct ena_adapter *adapter;
3629         struct ena_com_dev *ena_dev = NULL;
3630         uint16_t tx_sgl_size = 0;
3631         uint16_t rx_sgl_size = 0;
3632         int io_queue_num;
3633         int queue_size;
3634         int rc;
3635         adapter = device_get_softc(pdev);
3636         adapter->pdev = pdev;
3637
3638         mtx_init(&adapter->global_mtx, "ENA global mtx", NULL, MTX_DEF);
3639         sx_init(&adapter->ioctl_sx, "ENA ioctl sx");
3640
3641         /* Set up the timer service */
3642         callout_init_mtx(&adapter->timer_service, &adapter->global_mtx, 0);
3643         adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
3644         adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
3645         adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
3646         adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
3647
3648         if (version_printed++ == 0)
3649                 device_printf(pdev, "%s\n", ena_version);
3650
3651         rc = ena_allocate_pci_resources(adapter);
3652         if (unlikely(rc != 0)) {
3653                 device_printf(pdev, "PCI resource allocation failed!\n");
3654                 ena_free_pci_resources(adapter);
3655                 return (rc);
3656         }
3657
3658         /* Allocate memory for ena_dev structure */
3659         ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3660             M_WAITOK | M_ZERO);
3661
3662         adapter->ena_dev = ena_dev;
3663         ena_dev->dmadev = pdev;
3664         ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3665             M_WAITOK | M_ZERO);
3666
3667         /* Store register resources */
3668         ((struct ena_bus*)(ena_dev->bus))->reg_bar_t =
3669             rman_get_bustag(adapter->registers);
3670         ((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
3671             rman_get_bushandle(adapter->registers);
3672
3673         if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) {
3674                 device_printf(pdev, "failed to pmap registers bar\n");
3675                 rc = ENXIO;
3676                 goto err_bus_free;
3677         }
3678
3679         ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3680
3681         /* Device initialization */
3682         rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3683         if (unlikely(rc != 0)) {
3684                 device_printf(pdev, "ENA device init failed! (err: %d)\n", rc);
3685                 rc = ENXIO;
3686                 goto err_bus_free;
3687         }
3688
3689         adapter->keep_alive_timestamp = getsbinuptime();
3690
3691         adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3692
3693         /* Set for sure that interface is not up */
3694         adapter->up = false;
3695
3696         memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3697             ETHER_ADDR_LEN);
3698
3699         /* calculate IO queue number to create */
3700         io_queue_num = ena_calc_io_queue_num(adapter, &get_feat_ctx);
3701
3702         ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n",
3703             io_queue_num);
3704         adapter->num_queues = io_queue_num;
3705
3706         adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3707
3708         /* calculatre ring sizes */
3709         queue_size = ena_calc_queue_size(adapter,&tx_sgl_size,
3710             &rx_sgl_size, &get_feat_ctx);
3711         if (unlikely((queue_size <= 0) || (io_queue_num <= 0))) {
3712                 rc = ENA_COM_FAULT;
3713                 goto err_com_free;
3714         }
3715
3716         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3717
3718         adapter->tx_ring_size = queue_size;
3719         adapter->rx_ring_size = queue_size;
3720
3721         adapter->max_tx_sgl_size = tx_sgl_size;
3722         adapter->max_rx_sgl_size = rx_sgl_size;
3723
3724         /* set up dma tags for rx and tx buffers */
3725         rc = ena_setup_tx_dma_tag(adapter);
3726         if (unlikely(rc != 0)) {
3727                 device_printf(pdev, "Failed to create TX DMA tag\n");
3728                 goto err_com_free;
3729         }
3730
3731         rc = ena_setup_rx_dma_tag(adapter);
3732         if (unlikely(rc != 0)) {
3733                 device_printf(pdev, "Failed to create RX DMA tag\n");
3734                 goto err_tx_tag_free;
3735         }
3736
3737         /* initialize rings basic information */
3738         device_printf(pdev, "initalize %d io queues\n", io_queue_num);
3739         ena_init_io_rings(adapter);
3740
3741         /* setup network interface */
3742         rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3743         if (unlikely(rc != 0)) {
3744                 device_printf(pdev, "Error with network interface setup\n");
3745                 goto err_io_free;
3746         }
3747
3748         rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3749         if (unlikely(rc != 0)) {
3750                 device_printf(pdev,
3751                     "Failed to enable and set the admin interrupts\n");
3752                 goto err_ifp_free;
3753         }
3754
3755         /* Initialize reset task queue */
3756         TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3757         adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3758             M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3759         taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET,
3760             "%s rstq", device_get_nameunit(adapter->pdev));
3761
3762         /* Initialize statistics */
3763         ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3764             sizeof(struct ena_stats_dev));
3765         ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3766             sizeof(struct ena_hw_stats));
3767         ena_sysctl_add_nodes(adapter);
3768
3769         /* Tell the stack that the interface is not active */
3770         if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3771
3772         adapter->running = true;
3773         return (0);
3774
3775 err_ifp_free:
3776         if_detach(adapter->ifp);
3777         if_free(adapter->ifp);
3778 err_io_free:
3779         ena_free_all_io_rings_resources(adapter);
3780         ena_free_rx_dma_tag(adapter);
3781 err_tx_tag_free:
3782         ena_free_tx_dma_tag(adapter);
3783 err_com_free:
3784         ena_com_admin_destroy(ena_dev);
3785         ena_com_delete_host_info(ena_dev);
3786         ena_com_mmio_reg_read_request_destroy(ena_dev);
3787 err_bus_free:
3788         free(ena_dev->bus, M_DEVBUF);
3789         free(ena_dev, M_DEVBUF);
3790         ena_free_pci_resources(adapter);
3791
3792         return (rc);
3793 }
3794
3795 /**
3796  * ena_detach - Device Removal Routine
3797  * @pdev: device information struct
3798  *
3799  * ena_detach is called by the device subsystem to alert the driver
3800  * that it should release a PCI device.
3801  **/
3802 static int
3803 ena_detach(device_t pdev)
3804 {
3805         struct ena_adapter *adapter = device_get_softc(pdev);
3806         struct ena_com_dev *ena_dev = adapter->ena_dev;
3807         int rc;
3808
3809         /* Make sure VLANS are not using driver */
3810         if (adapter->ifp->if_vlantrunk != NULL) {
3811                 device_printf(adapter->pdev ,"VLAN is in use, detach first\n");
3812                 return (EBUSY);
3813         }
3814
3815         /* Free reset task and callout */
3816         callout_drain(&adapter->timer_service);
3817         while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
3818                 taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
3819         taskqueue_free(adapter->reset_tq);
3820
3821         sx_xlock(&adapter->ioctl_sx);
3822         ena_down(adapter);
3823         sx_unlock(&adapter->ioctl_sx);
3824
3825         if (adapter->ifp != NULL) {
3826                 ether_ifdetach(adapter->ifp);
3827                 if_free(adapter->ifp);
3828         }
3829
3830         ena_free_all_io_rings_resources(adapter);
3831
3832         ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3833             sizeof(struct ena_hw_stats));
3834         ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3835             sizeof(struct ena_stats_dev));
3836
3837         if (likely(adapter->rss_support))
3838                 ena_com_rss_destroy(ena_dev);
3839
3840         rc = ena_free_rx_dma_tag(adapter);
3841         if (unlikely(rc != 0))
3842                 device_printf(adapter->pdev,
3843                     "Unmapped RX DMA tag associations\n");
3844
3845         rc = ena_free_tx_dma_tag(adapter);
3846         if (unlikely(rc != 0))
3847                 device_printf(adapter->pdev,
3848                     "Unmapped TX DMA tag associations\n");
3849
3850         /* Reset the device only if the device is running. */
3851         if (adapter->running)
3852                 ena_com_dev_reset(ena_dev, adapter->reset_reason);
3853
3854         ena_com_delete_host_info(ena_dev);
3855
3856         ena_free_irqs(adapter);
3857
3858         ena_com_abort_admin_commands(ena_dev);
3859
3860         ena_com_wait_for_abort_completion(ena_dev);
3861
3862         ena_com_admin_destroy(ena_dev);
3863
3864         ena_com_mmio_reg_read_request_destroy(ena_dev);
3865
3866         ena_free_pci_resources(adapter);
3867
3868         mtx_destroy(&adapter->global_mtx);
3869         sx_destroy(&adapter->ioctl_sx);
3870
3871         if (ena_dev->bus != NULL)
3872                 free(ena_dev->bus, M_DEVBUF);
3873
3874         if (ena_dev != NULL)
3875                 free(ena_dev, M_DEVBUF);
3876
3877         return (bus_generic_detach(pdev));
3878 }
3879
3880 /******************************************************************************
3881  ******************************** AENQ Handlers *******************************
3882  *****************************************************************************/
3883 /**
3884  * ena_update_on_link_change:
3885  * Notify the network interface about the change in link status
3886  **/
3887 static void
3888 ena_update_on_link_change(void *adapter_data,
3889     struct ena_admin_aenq_entry *aenq_e)
3890 {
3891         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3892         struct ena_admin_aenq_link_change_desc *aenq_desc;
3893         int status;
3894         if_t ifp;
3895
3896         aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3897         ifp = adapter->ifp;
3898         status = aenq_desc->flags &
3899             ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3900
3901         if (status != 0) {
3902                 device_printf(adapter->pdev, "link is UP\n");
3903                 if_link_state_change(ifp, LINK_STATE_UP);
3904         } else if (status == 0) {
3905                 device_printf(adapter->pdev, "link is DOWN\n");
3906                 if_link_state_change(ifp, LINK_STATE_DOWN);
3907         } else {
3908                 device_printf(adapter->pdev, "invalid value recvd\n");
3909                 BUG();
3910         }
3911
3912         adapter->link_status = status;
3913 }
3914
3915 /**
3916  * This handler will called for unknown event group or unimplemented handlers
3917  **/
3918 static void
3919 unimplemented_aenq_handler(void *data,
3920     struct ena_admin_aenq_entry *aenq_e)
3921 {
3922         return;
3923 }
3924
3925 static struct ena_aenq_handlers aenq_handlers = {
3926     .handlers = {
3927             [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3928             [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3929     },
3930     .unimplemented_handler = unimplemented_aenq_handler
3931 };
3932
3933 /*********************************************************************
3934  *  FreeBSD Device Interface Entry Points
3935  *********************************************************************/
3936
3937 static device_method_t ena_methods[] = {
3938     /* Device interface */
3939     DEVMETHOD(device_probe, ena_probe),
3940     DEVMETHOD(device_attach, ena_attach),
3941     DEVMETHOD(device_detach, ena_detach),
3942     DEVMETHOD_END
3943 };
3944
3945 static driver_t ena_driver = {
3946     "ena", ena_methods, sizeof(struct ena_adapter),
3947 };
3948
3949 devclass_t ena_devclass;
3950 DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
3951 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
3952     nitems(ena_vendor_info_array) - 1);
3953 MODULE_DEPEND(ena, pci, 1, 1, 1);
3954 MODULE_DEPEND(ena, ether, 1, 1, 1);
3955
3956 /*********************************************************************/