1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
8 #include "vnic_resource.h"
9 #include "vnic_devcmd.h"
11 #include "vnic_stats.h"
13 #define VNIC_MAX_RES_HDR_SIZE \
14 (sizeof(struct vnic_resource_header) + \
15 sizeof(struct vnic_resource) * RES_TYPE_MAX)
16 #define VNIC_RES_STRIDE 128
18 #define VNIC_MAX_FLOW_COUNTERS 2048
20 void *vnic_dev_priv(struct vnic_dev *vdev)
25 void vnic_register_cbacks(struct vnic_dev *vdev,
26 void *(*alloc_consistent)(void *priv, size_t size,
27 bus_addr_t *dma_handle, struct iflib_dma_info *res,u8 *name),
28 void (*free_consistent)(void *priv,
29 size_t size, void *vaddr,
30 bus_addr_t dma_handle,struct iflib_dma_info *res))
32 vdev->alloc_consistent = alloc_consistent;
33 vdev->free_consistent = free_consistent;
36 static int vnic_dev_discover_res(struct vnic_dev *vdev,
37 struct vnic_dev_bar *bar, unsigned int num_bars)
39 struct enic_softc *softc = vdev->softc;
40 struct vnic_resource_header __iomem *rh;
41 struct mgmt_barmap_hdr __iomem *mrh;
42 struct vnic_resource __iomem *r;
49 rh = malloc(sizeof(*rh), M_DEVBUF, M_NOWAIT | M_ZERO);
50 mrh = malloc(sizeof(*mrh), M_DEVBUF, M_NOWAIT | M_ZERO);
52 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
58 /* Check for mgmt vnic in addition to normal vnic */
59 ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)rh, sizeof(*rh) / 4);
60 ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)mrh, sizeof(*mrh) / 4);
61 if ((rh->magic != VNIC_RES_MAGIC) ||
62 (rh->version != VNIC_RES_VERSION)) {
63 if ((mrh->magic != MGMTVNIC_MAGIC) ||
64 mrh->version != MGMTVNIC_VERSION) {
65 pr_err("vNIC BAR0 res magic/version error " \
66 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
67 VNIC_RES_MAGIC, VNIC_RES_VERSION,
68 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
69 rh->magic, rh->version);
76 if (mrh->magic == MGMTVNIC_MAGIC)
77 r_offset = sizeof(*mrh);
79 r_offset = sizeof(*rh);
81 r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT | M_ZERO);
82 ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
83 while ((type = r->type) != RES_TYPE_EOL) {
85 u32 bar_offset =r->bar_offset;
88 r_offset += sizeof(*r);
90 if (bar_num >= num_bars)
97 case RES_TYPE_INTR_CTRL:
98 case RES_TYPE_INTR_PBA_LEGACY:
102 ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
106 vdev->res[type].count = count;
107 bcopy(&softc->mem, &vdev->res[type].bar, sizeof(softc->mem));
108 vdev->res[type].bar.offset = bar_offset;
109 ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
118 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
119 enum vnic_res_type type)
121 return vdev->res[type].count;
124 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
127 struct vnic_res *res;
129 if (!vdev->res[type].bar.tag)
132 res = malloc(sizeof(*res), M_DEVBUF, M_NOWAIT | M_ZERO);
133 bcopy(&vdev->res[type], res, sizeof(*res));
139 case RES_TYPE_INTR_CTRL:
141 index * VNIC_RES_STRIDE;
143 res->bar.offset += 0;
149 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
150 unsigned int desc_count, unsigned int desc_size)
152 /* The base address of the desc rings must be 512 byte aligned.
153 * Descriptor count is aligned to groups of 32 descriptors. A
154 * count of 0 means the maximum 4096 descriptors. Descriptor
155 * size is aligned to 16 bytes.
158 unsigned int count_align = 32;
159 unsigned int desc_align = 16;
161 ring->base_align = 512;
166 ring->desc_count = VNIC_ALIGN(desc_count, count_align);
168 ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
170 ring->size = ring->desc_count * ring->desc_size;
171 ring->size_unaligned = ring->size + ring->base_align;
173 return ring->size_unaligned;
176 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
178 memset(ring->descs, 0, ring->size);
181 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
184 struct vnic_res __iomem *devcmd = vdev->devcmd;
189 status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
190 if (status == 0xFFFFFFFF) {
191 /* PCI-e target device is gone */
194 if (status & STAT_BUSY) {
196 pr_err("Busy devcmd %d\n", _CMD_N(cmd));
200 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
201 ENIC_BUS_WRITE_REGION_4(devcmd, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
204 ENIC_BUS_WRITE_4(devcmd, DEVCMD_CMD, cmd);
206 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) {
210 for (delay = 0; delay < wait; delay++) {
214 status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
215 if (status == 0xFFFFFFFF) {
216 /* PCI-e target device is gone */
220 if (!(status & STAT_BUSY)) {
221 if (status & STAT_ERROR) {
222 err = -(int)ENIC_BUS_READ_8(devcmd, DEVCMD_ARGS(0));
224 if (cmd != CMD_CAPABILITY)
225 pr_err("Devcmd %d failed " \
226 "with error code %d\n",
231 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
232 ENIC_BUS_READ_REGION_4(devcmd, bar, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
239 pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
243 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
244 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
245 u64 *args, int nargs, int wait)
251 * Proxy command consumes 2 arguments. One for proxy index,
252 * the other is for command to be proxied
254 if (nargs > VNIC_DEVCMD_NARGS - 2) {
255 pr_err("number of args %d exceeds the maximum\n", nargs);
258 memset(vdev->args, 0, sizeof(vdev->args));
260 vdev->args[0] = vdev->proxy_index;
262 memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
264 err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
268 status = (u32)vdev->args[0];
269 if (status & STAT_ERROR) {
270 err = (int)vdev->args[1];
271 if (err != ERR_ECMDUNKNOWN ||
272 cmd != CMD_CAPABILITY)
273 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
277 memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
282 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
283 enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
287 if (nargs > VNIC_DEVCMD_NARGS) {
288 pr_err("number of args %d exceeds the maximum\n", nargs);
291 memset(vdev->args, 0, sizeof(vdev->args));
292 memcpy(vdev->args, args, nargs * sizeof(args[0]));
294 err = _vnic_dev_cmd(vdev, cmd, wait);
296 memcpy(args, vdev->args, nargs * sizeof(args[0]));
301 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
302 u64 *a0, u64 *a1, int wait)
309 memset(vdev->args, 0, sizeof(vdev->args));
311 switch (vdev->proxy) {
313 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
314 args, ARRAY_SIZE(args), wait);
317 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
318 args, ARRAY_SIZE(args), wait);
322 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
334 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
335 u64 *args, int nargs, int wait)
337 switch (vdev->proxy) {
339 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
342 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
346 return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
350 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
353 memset(args, 0, nargs * sizeof(*args));
354 args[0] = CMD_ADD_ADV_FILTER;
355 args[1] = FILTER_CAP_MODE_V1_FLAG;
356 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
359 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
361 u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
365 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
368 return (a1 >= (u32)FILTER_DPDK_1);
371 /* Determine the "best" filtering mode VIC is capaible of. Returns one of 3
372 * value or 0 on error:
373 * FILTER_DPDK_1- advanced filters availabile
374 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
375 * the IP layer must explicitly specified. I.e. cannot have a UDP
376 * filter that matches both IPv4 and IPv6.
377 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
378 * all other filter types are not available.
379 * Retrun true in filter_tags if supported
381 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
388 err = vnic_dev_advanced_filters_cap(vdev, args, 4);
390 /* determine supported filter actions */
391 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
392 if (args[2] == FILTER_CAP_MODE_V1)
393 *filter_actions = args[3];
395 if (err || ((args[0] == 1) && (args[1] == 0))) {
396 /* Adv filter Command not supported or adv filters available but
397 * not enabled. Try the normal filter capability command.
399 args[0] = CMD_ADD_FILTER;
401 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
405 goto parse_max_level;
406 } else if (args[2] == FILTER_CAP_MODE_V1) {
407 /* parse filter capability mask in args[1] */
408 if (args[1] & FILTER_DPDK_1_FLAG)
409 *mode = FILTER_DPDK_1;
410 else if (args[1] & FILTER_USNIC_IP_FLAG)
411 *mode = FILTER_USNIC_IP;
412 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
413 *mode = FILTER_IPV4_5TUPLE;
418 if (max_level >= (u32)FILTER_USNIC_IP)
419 *mode = FILTER_USNIC_IP;
421 *mode = FILTER_IPV4_5TUPLE;
425 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
428 u64 a0 = CMD_NIC_CFG, a1 = 0;
434 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
435 if (err == 0 && a0 != 0 && a1 != 0) {
437 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
441 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
443 u64 a0 = (u32)cmd, a1 = 0;
447 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
452 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
462 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
466 *(u8 *)value = (u8)a0;
469 *(u16 *)value = (u16)a0;
472 *(u32 *)value = (u32)a0;
485 int vnic_dev_stats_clear(struct vnic_dev *vdev)
490 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
493 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
502 *stats = vdev->stats;
503 a0 = vdev->stats_res.idi_paddr;
504 a1 = sizeof(struct vnic_stats);
506 bus_dmamap_sync(vdev->stats_res.idi_tag,
507 vdev->stats_res.idi_map,
508 BUS_DMASYNC_POSTREAD);
509 rc = vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
510 bus_dmamap_sync(vdev->stats_res.idi_tag,
511 vdev->stats_res.idi_map,
512 BUS_DMASYNC_PREREAD);
517 * Configure counter DMA
519 int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
526 if (num_counters > VNIC_MAX_FLOW_COUNTERS)
528 if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD ||
532 args[0] = num_counters;
533 args[1] = vdev->flow_counters_res.idi_paddr;
535 bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
536 vdev->flow_counters_res.idi_map,
537 BUS_DMASYNC_POSTREAD);
538 err = vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait);
539 bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
540 vdev->flow_counters_res.idi_map,
541 BUS_DMASYNC_PREREAD);
543 /* record if DMAs need to be stopped on close */
545 vdev->flow_counters_dma_active = (num_counters != 0 &&
551 int vnic_dev_close(struct vnic_dev *vdev)
556 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
559 int vnic_dev_enable_wait(struct vnic_dev *vdev)
564 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
565 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
567 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
570 int vnic_dev_disable(struct vnic_dev *vdev)
575 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
578 int vnic_dev_open(struct vnic_dev *vdev, int arg)
580 u64 a0 = (u32)arg, a1 = 0;
583 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
586 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
594 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
603 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
609 for (i = 0; i < ETH_ALEN; i++)
612 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
616 for (i = 0; i < ETH_ALEN; i++)
617 mac_addr[i] = ((u8 *)&a0)[i];
622 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
623 int broadcast, int promisc, int allmulti)
629 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
630 (multicast ? CMD_PFILTER_MULTICAST : 0) |
631 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
632 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
633 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
635 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
637 pr_err("Can't set packet filter\n");
642 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
649 for (i = 0; i < ETH_ALEN; i++)
650 ((u8 *)&a0)[i] = addr[i];
652 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
654 pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
655 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
661 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
668 for (i = 0; i < ETH_ALEN; i++)
669 ((u8 *)&a0)[i] = addr[i];
671 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
673 pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
674 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
680 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
681 u8 ig_vlan_rewrite_mode)
683 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
686 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
687 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
693 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
695 vdev->in_reset = state;
698 static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
700 return vdev->in_reset;
703 int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
704 void *notify_addr, bus_addr_t notify_pa, u16 intr)
710 bus_dmamap_sync(vdev->notify_res.idi_tag,
711 vdev->notify_res.idi_map,
712 BUS_DMASYNC_PREWRITE);
713 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
714 bus_dmamap_sync(vdev->notify_res.idi_tag,
715 vdev->notify_res.idi_map,
716 BUS_DMASYNC_POSTWRITE);
717 if (!vnic_dev_in_reset(vdev)) {
718 vdev->notify = notify_addr;
719 vdev->notify_pa = notify_pa;
723 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
724 a1 += sizeof(struct vnic_devcmd_notify);
726 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
727 if (!vnic_dev_in_reset(vdev))
728 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
733 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
735 void *notify_addr = NULL;
736 bus_addr_t notify_pa = 0;
740 if (vdev->notify || vdev->notify_pa) {
741 return vnic_dev_notify_setcmd(vdev, vdev->notify,
742 vdev->notify_pa, intr);
744 if (!vnic_dev_in_reset(vdev)) {
745 snprintf((char *)name, sizeof(name),
746 "vnic_notify-%u", instance++);
747 iflib_dma_alloc(vdev->softc->ctx,
748 sizeof(struct vnic_devcmd_notify),
749 &vdev->notify_res, BUS_DMA_NOWAIT);
750 notify_pa = vdev->notify_res.idi_paddr;
751 notify_addr = vdev->notify_res.idi_vaddr;
754 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
757 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
763 a0 = 0; /* paddr = 0 to unset notify buffer */
764 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
765 a1 += sizeof(struct vnic_devcmd_notify);
767 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
768 if (!vnic_dev_in_reset(vdev)) {
777 int vnic_dev_notify_unset(struct vnic_dev *vdev)
779 if (vdev->notify && !vnic_dev_in_reset(vdev)) {
780 iflib_dma_free(&vdev->notify_res);
783 return vnic_dev_notify_unsetcmd(vdev);
786 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
789 unsigned int nwords = vdev->notify_sz / 4;
793 if (!vdev->notify || !vdev->notify_sz)
798 bus_dmamap_sync(vdev->notify_res.idi_tag,
799 vdev->notify_res.idi_map,
800 BUS_DMASYNC_PREREAD);
801 memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
802 bus_dmamap_sync(vdev->notify_res.idi_tag,
803 vdev->notify_res.idi_map,
804 BUS_DMASYNC_POSTREAD);
805 words = (u32 *)&vdev->notify_copy;
806 for (i = 1; i < nwords; i++)
808 } while (csum != words[0]);
813 int vnic_dev_init(struct vnic_dev *vdev, int arg)
815 u64 a0 = (u32)arg, a1 = 0;
819 if (vnic_dev_capable(vdev, CMD_INIT))
820 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
822 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
823 if (a0 & CMD_INITF_DEFAULT_MAC) {
824 /* Emulate these for old CMD_INIT_v1 which
825 * didn't pass a0 so no CMD_INITF_*.
827 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
828 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
834 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
836 /* Default: hardware intr coal timer is in units of 1.5 usecs */
837 vdev->intr_coal_timer_info.mul = 2;
838 vdev->intr_coal_timer_info.div = 3;
839 vdev->intr_coal_timer_info.max_usec =
840 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
843 int vnic_dev_link_status(struct vnic_dev *vdev)
845 if (!vnic_dev_notify_ready(vdev))
848 return vdev->notify_copy.link_state;
851 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
853 if (!vnic_dev_notify_ready(vdev))
856 return vdev->notify_copy.port_speed;
859 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
861 return (usec * vdev->intr_coal_timer_info.mul) /
862 vdev->intr_coal_timer_info.div;
865 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
867 return (hw_cycles * vdev->intr_coal_timer_info.div) /
868 vdev->intr_coal_timer_info.mul;
871 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
873 return vdev->intr_coal_timer_info.max_usec;
876 u32 vnic_dev_mtu(struct vnic_dev *vdev)
878 if (!vnic_dev_notify_ready(vdev))
881 return vdev->notify_copy.mtu;
884 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
885 enum vnic_dev_intr_mode intr_mode)
887 vdev->intr_mode = intr_mode;
890 enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
891 struct vnic_dev *vdev)
893 return vdev->intr_mode;
897 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
901 struct enic_softc *softc;
905 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
906 iflib_dma_alloc(softc->ctx, sizeof(struct vnic_stats), &vdev->stats_res, 0);
907 vdev->stats = (struct vnic_stats *)vdev->stats_res.idi_vaddr;
908 return vdev->stats == NULL ? -ENOMEM : 0;
912 * Initialize for up to VNIC_MAX_FLOW_COUNTERS
914 int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev)
918 struct enic_softc *softc;
922 snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++);
923 iflib_dma_alloc(softc->ctx, sizeof(struct vnic_counter_counts) * VNIC_MAX_FLOW_COUNTERS, &vdev->flow_counters_res, 0);
924 vdev->flow_counters = (struct vnic_counter_counts *)vdev->flow_counters_res.idi_vaddr;
925 vdev->flow_counters_dma_active = 0;
926 return vdev->flow_counters == NULL ? -ENOMEM : 0;
929 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
930 struct enic_bar_info *mem, unsigned int num_bars)
932 if (vnic_dev_discover_res(vdev, NULL, num_bars))
935 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
946 * vnic_dev_classifier: Add/Delete classifier entries
947 * @vdev: vdev of the device
948 * @cmd: CLSF_ADD for Add filter
949 * CLSF_DEL for Delete filter
950 * @entry: In case of ADD filter, the caller passes the RQ number in this
952 * This function stores the filter_id returned by the
953 * firmware in the same variable before return;
955 * In case of DEL filter, the caller passes the RQ number. Return
956 * value is irrelevant.
958 * @action: action data
961 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
967 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
970 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
971 u16 vxlan_udp_port_number)
973 u64 a1 = vxlan_udp_port_number;
977 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
980 int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
982 u64 a0 = VIC_FEATURE_VXLAN;
987 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
988 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
990 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
991 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
994 bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx)
1000 if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait))
1002 *idx = (uint32_t)a0;
1006 bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx)
1012 return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1,
1016 bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
1017 bool reset, uint64_t *packets, uint64_t *bytes)
1020 u64 a1 = reset ? 1 : 0;
1024 /* query/reset returns updated counters */
1025 if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait))
1030 /* Get values DMA'd from the adapter */
1031 *packets = vdev->flow_counters[idx].vcc_packets;
1032 *bytes = vdev->flow_counters[idx].vcc_bytes;
1037 device_t dev_from_vnic_dev(struct vnic_dev *vdev) {
1038 return (vdev->softc->dev);