1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 #include "opt_inet6.h"
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
45 /************************************************************************
47 ************************************************************************/
48 char ixv_driver_version[] = "2.0.1-k";
50 /************************************************************************
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixv_strings
55 * Last entry must be all 0s
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static pci_vendor_info_t ixv_vendor_info_array[] =
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66 /* required last entry */
70 /************************************************************************
72 ************************************************************************/
73 static void *ixv_register(device_t dev);
74 static int ixv_if_attach_pre(if_ctx_t ctx);
75 static int ixv_if_attach_post(if_ctx_t ctx);
76 static int ixv_if_detach(if_ctx_t ctx);
78 static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
79 static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
80 static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static void ixv_if_queues_free(if_ctx_t ctx);
82 static void ixv_identify_hardware(if_ctx_t ctx);
83 static void ixv_init_device_features(struct adapter *);
84 static int ixv_allocate_pci_resources(if_ctx_t ctx);
85 static void ixv_free_pci_resources(if_ctx_t ctx);
86 static int ixv_setup_interface(if_ctx_t ctx);
87 static void ixv_if_media_status(if_ctx_t , struct ifmediareq *);
88 static int ixv_if_media_change(if_ctx_t ctx);
89 static void ixv_if_update_admin_status(if_ctx_t ctx);
90 static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
92 static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
93 static void ixv_if_init(if_ctx_t ctx);
94 static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
95 static void ixv_if_stop(if_ctx_t ctx);
96 static int ixv_negotiate_api(struct adapter *);
98 static void ixv_initialize_transmit_units(if_ctx_t ctx);
99 static void ixv_initialize_receive_units(if_ctx_t ctx);
100 static void ixv_initialize_rss_mapping(struct adapter *);
102 static void ixv_setup_vlan_support(if_ctx_t ctx);
103 static void ixv_configure_ivars(struct adapter *);
104 static void ixv_if_enable_intr(if_ctx_t ctx);
105 static void ixv_if_disable_intr(if_ctx_t ctx);
106 static void ixv_if_multi_set(if_ctx_t ctx);
108 static void ixv_if_register_vlan(if_ctx_t, u16);
109 static void ixv_if_unregister_vlan(if_ctx_t, u16);
111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
113 static void ixv_save_stats(struct adapter *);
114 static void ixv_init_stats(struct adapter *);
115 static void ixv_update_stats(struct adapter *);
116 static void ixv_add_stats_sysctls(struct adapter *adapter);
118 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
119 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
121 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
123 /* The MSI-X Interrupt handlers */
124 static int ixv_msix_que(void *);
125 static int ixv_msix_mbx(void *);
127 /************************************************************************
128 * FreeBSD Device Interface Entry Points
129 ************************************************************************/
130 static device_method_t ixv_methods[] = {
131 /* Device interface */
132 DEVMETHOD(device_register, ixv_register),
133 DEVMETHOD(device_probe, iflib_device_probe),
134 DEVMETHOD(device_attach, iflib_device_attach),
135 DEVMETHOD(device_detach, iflib_device_detach),
136 DEVMETHOD(device_shutdown, iflib_device_shutdown),
140 static driver_t ixv_driver = {
141 "ixv", ixv_methods, sizeof(struct adapter),
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
147 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
148 MODULE_DEPEND(ixv, pci, 1, 1, 1);
149 MODULE_DEPEND(ixv, ether, 1, 1, 1);
151 static device_method_t ixv_if_methods[] = {
152 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
153 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
154 DEVMETHOD(ifdi_detach, ixv_if_detach),
155 DEVMETHOD(ifdi_init, ixv_if_init),
156 DEVMETHOD(ifdi_stop, ixv_if_stop),
157 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
158 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
159 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
160 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
161 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
162 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
163 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
164 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
165 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
166 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
167 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
168 DEVMETHOD(ifdi_media_status, ixv_if_media_status),
169 DEVMETHOD(ifdi_media_change, ixv_if_media_change),
170 DEVMETHOD(ifdi_timer, ixv_if_local_timer),
171 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
172 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
173 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
177 static driver_t ixv_if_driver = {
178 "ixv_if", ixv_if_methods, sizeof(struct adapter)
182 * TUNEABLE PARAMETERS:
185 /* Flow control setting, default to full */
186 static int ixv_flow_control = ixgbe_fc_full;
187 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
190 * Header split: this causes the hardware to DMA
191 * the header into a separate mbuf from the payload,
192 * it can be a performance win in some workloads, but
193 * in others it actually hurts, its off by default.
195 static int ixv_header_split = FALSE;
196 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
199 * Shadow VFTA table, this is needed because
200 * the real filter table gets cleared during
201 * a soft reset and we need to repopulate it.
203 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
204 extern struct if_txrx ixgbe_txrx;
206 static struct if_shared_ctx ixv_sctx_init = {
207 .isc_magic = IFLIB_MAGIC,
208 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
209 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
210 .isc_tx_maxsegsize = PAGE_SIZE,
211 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
212 .isc_tso_maxsegsize = PAGE_SIZE,
213 .isc_rx_maxsize = MJUM16BYTES,
214 .isc_rx_nsegments = 1,
215 .isc_rx_maxsegsize = MJUM16BYTES,
219 .isc_admin_intrcnt = 1,
220 .isc_vendor_info = ixv_vendor_info_array,
221 .isc_driver_version = ixv_driver_version,
222 .isc_driver = &ixv_if_driver,
223 .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
225 .isc_nrxd_min = {MIN_RXD},
226 .isc_ntxd_min = {MIN_TXD},
227 .isc_nrxd_max = {MAX_RXD},
228 .isc_ntxd_max = {MAX_TXD},
229 .isc_nrxd_default = {DEFAULT_RXD},
230 .isc_ntxd_default = {DEFAULT_TXD},
233 if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
236 ixv_register(device_t dev)
241 /************************************************************************
242 * ixv_if_tx_queues_alloc
243 ************************************************************************/
245 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
246 int ntxqs, int ntxqsets)
248 struct adapter *adapter = iflib_get_softc(ctx);
249 if_softc_ctx_t scctx = adapter->shared;
250 struct ix_tx_queue *que;
253 MPASS(adapter->num_tx_queues == ntxqsets);
256 /* Allocate queue structure memory */
258 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
259 M_DEVBUF, M_NOWAIT | M_ZERO);
260 if (!adapter->tx_queues) {
261 device_printf(iflib_get_dev(ctx),
262 "Unable to allocate TX ring memory\n");
266 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
267 struct tx_ring *txr = &que->txr;
270 txr->adapter = que->adapter = adapter;
272 /* Allocate report status array */
273 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
277 for (j = 0; j < scctx->isc_ntxd[0]; j++)
278 txr->tx_rsq[j] = QIDX_INVALID;
279 /* get the virtual and physical address of the hardware queues */
280 txr->tail = IXGBE_VFTDT(txr->me);
281 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
282 txr->tx_paddr = paddrs[i*ntxqs];
285 txr->total_packets = 0;
289 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
290 adapter->num_tx_queues);
295 ixv_if_queues_free(ctx);
298 } /* ixv_if_tx_queues_alloc */
300 /************************************************************************
301 * ixv_if_rx_queues_alloc
302 ************************************************************************/
304 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
305 int nrxqs, int nrxqsets)
307 struct adapter *adapter = iflib_get_softc(ctx);
308 struct ix_rx_queue *que;
311 MPASS(adapter->num_rx_queues == nrxqsets);
314 /* Allocate queue structure memory */
316 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
317 M_DEVBUF, M_NOWAIT | M_ZERO);
318 if (!adapter->rx_queues) {
319 device_printf(iflib_get_dev(ctx),
320 "Unable to allocate TX ring memory\n");
325 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
326 struct rx_ring *rxr = &que->rxr;
328 rxr->adapter = que->adapter = adapter;
331 /* get the virtual and physical address of the hw queues */
332 rxr->tail = IXGBE_VFRDT(rxr->me);
333 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
334 rxr->rx_paddr = paddrs[i*nrxqs];
339 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
340 adapter->num_rx_queues);
345 ixv_if_queues_free(ctx);
348 } /* ixv_if_rx_queues_alloc */
350 /************************************************************************
352 ************************************************************************/
354 ixv_if_queues_free(if_ctx_t ctx)
356 struct adapter *adapter = iflib_get_softc(ctx);
357 struct ix_tx_queue *que = adapter->tx_queues;
363 for (i = 0; i < adapter->num_tx_queues; i++, que++) {
364 struct tx_ring *txr = &que->txr;
365 if (txr->tx_rsq == NULL)
368 free(txr->tx_rsq, M_DEVBUF);
371 if (adapter->tx_queues != NULL)
372 free(adapter->tx_queues, M_DEVBUF);
374 if (adapter->rx_queues != NULL)
375 free(adapter->rx_queues, M_DEVBUF);
376 adapter->tx_queues = NULL;
377 adapter->rx_queues = NULL;
378 } /* ixv_if_queues_free */
380 /************************************************************************
381 * ixv_if_attach_pre - Device initialization routine
383 * Called when the driver is being loaded.
384 * Identifies the type of hardware, allocates all resources
385 * and initializes the hardware.
387 * return 0 on success, positive on failure
388 ************************************************************************/
390 ixv_if_attach_pre(if_ctx_t ctx)
392 struct adapter *adapter;
394 if_softc_ctx_t scctx;
398 INIT_DEBUGOUT("ixv_attach: begin");
400 /* Allocate, clear, and link in our adapter structure */
401 dev = iflib_get_dev(ctx);
402 adapter = iflib_get_softc(ctx);
405 adapter->hw.back = adapter;
406 scctx = adapter->shared = iflib_get_softc_ctx(ctx);
407 adapter->media = iflib_get_media(ctx);
410 /* Do base PCI setup - map BAR0 */
411 if (ixv_allocate_pci_resources(ctx)) {
412 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
418 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
420 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
423 /* Determine hardware revision */
424 ixv_identify_hardware(ctx);
425 ixv_init_device_features(adapter);
427 /* Initialize the shared code */
428 error = ixgbe_init_ops_vf(hw);
430 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
435 /* Setup the mailbox */
436 ixgbe_init_mbx_params_vf(hw);
438 error = hw->mac.ops.reset_hw(hw);
439 if (error == IXGBE_ERR_RESET_FAILED)
440 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
442 device_printf(dev, "...reset_hw() failed with error %d\n",
449 error = hw->mac.ops.init_hw(hw);
451 device_printf(dev, "...init_hw() failed with error %d\n",
457 /* Negotiate mailbox API version */
458 error = ixv_negotiate_api(adapter);
461 "Mailbox API negotiation failed during attach!\n");
465 /* If no mac address was assigned, make a random one */
466 if (!ixv_check_ether_addr(hw->mac.addr)) {
467 u8 addr[ETHER_ADDR_LEN];
468 arc4rand(&addr, sizeof(addr), 0);
471 bcopy(addr, hw->mac.addr, sizeof(addr));
472 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
475 /* Most of the iflib initialization... */
477 iflib_set_mac(ctx, hw->mac.addr);
478 switch (adapter->hw.mac.type) {
479 case ixgbe_mac_X550_vf:
480 case ixgbe_mac_X550EM_x_vf:
481 case ixgbe_mac_X550EM_a_vf:
482 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
485 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
487 scctx->isc_txqsizes[0] =
488 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
489 sizeof(u32), DBA_ALIGN);
490 scctx->isc_rxqsizes[0] =
491 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
494 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
495 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
496 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
497 scctx->isc_msix_bar = pci_msix_table_bar(dev);
498 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
499 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
500 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
502 scctx->isc_txrx = &ixgbe_txrx;
505 * Tell the upper layer(s) we support everything the PF
506 * driver does except...
509 scctx->isc_capabilities = IXGBE_CAPS;
510 scctx->isc_capabilities ^= IFCAP_WOL;
511 scctx->isc_capenable = scctx->isc_capabilities;
513 INIT_DEBUGOUT("ixv_if_attach_pre: end");
518 ixv_free_pci_resources(ctx);
521 } /* ixv_if_attach_pre */
524 ixv_if_attach_post(if_ctx_t ctx)
526 struct adapter *adapter = iflib_get_softc(ctx);
527 device_t dev = iflib_get_dev(ctx);
530 /* Setup OS specific network interface */
531 error = ixv_setup_interface(ctx);
533 device_printf(dev, "Interface setup failed: %d\n", error);
537 /* Do the stats setup */
538 ixv_save_stats(adapter);
539 ixv_init_stats(adapter);
540 ixv_add_stats_sysctls(adapter);
544 } /* ixv_if_attach_post */
546 /************************************************************************
547 * ixv_detach - Device removal routine
549 * Called when the driver is being removed.
550 * Stops the adapter and deallocates all the resources
551 * that were allocated for driver operation.
553 * return 0 on success, positive on failure
554 ************************************************************************/
556 ixv_if_detach(if_ctx_t ctx)
558 INIT_DEBUGOUT("ixv_detach: begin");
560 ixv_free_pci_resources(ctx);
563 } /* ixv_if_detach */
565 /************************************************************************
567 ************************************************************************/
569 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
571 struct adapter *adapter = iflib_get_softc(ctx);
572 struct ifnet *ifp = iflib_get_ifp(ctx);
575 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
576 if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
580 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
584 } /* ixv_if_mtu_set */
586 /************************************************************************
587 * ixv_if_init - Init entry point
589 * Used in two ways: It is used by the stack as an init entry
590 * point in network interface structure. It is also used
591 * by the driver as a hw/sw initialization routine to get
592 * to a consistent state.
594 * return 0 on success, positive on failure
595 ************************************************************************/
597 ixv_if_init(if_ctx_t ctx)
599 struct adapter *adapter = iflib_get_softc(ctx);
600 struct ifnet *ifp = iflib_get_ifp(ctx);
601 device_t dev = iflib_get_dev(ctx);
602 struct ixgbe_hw *hw = &adapter->hw;
605 INIT_DEBUGOUT("ixv_if_init: begin");
606 hw->adapter_stopped = FALSE;
607 hw->mac.ops.stop_adapter(hw);
609 /* reprogram the RAR[0] in case user changed it. */
610 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
612 /* Get the latest mac address, User can use a LAA */
613 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
614 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
616 /* Reset VF and renegotiate mailbox API version */
617 hw->mac.ops.reset_hw(hw);
618 hw->mac.ops.start_hw(hw);
619 error = ixv_negotiate_api(adapter);
622 "Mailbox API negotiation failed in if_init!\n");
626 ixv_initialize_transmit_units(ctx);
628 /* Setup Multicast table */
629 ixv_if_multi_set(ctx);
631 adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
633 /* Configure RX settings */
634 ixv_initialize_receive_units(ctx);
636 /* Set up VLAN offload and filter */
637 ixv_setup_vlan_support(ctx);
639 /* Set up MSI-X routing */
640 ixv_configure_ivars(adapter);
642 /* Set up auto-mask */
643 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
645 /* Set moderation on the Link interrupt */
646 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
649 ixv_init_stats(adapter);
651 /* Config/Enable Link */
652 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
655 /* And now turn on interrupts */
656 ixv_if_enable_intr(ctx);
661 /************************************************************************
663 ************************************************************************/
665 ixv_enable_queue(struct adapter *adapter, u32 vector)
667 struct ixgbe_hw *hw = &adapter->hw;
668 u32 queue = 1 << vector;
671 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
672 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
673 } /* ixv_enable_queue */
675 /************************************************************************
677 ************************************************************************/
679 ixv_disable_queue(struct adapter *adapter, u32 vector)
681 struct ixgbe_hw *hw = &adapter->hw;
682 u64 queue = (u64)(1 << vector);
685 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
686 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
687 } /* ixv_disable_queue */
690 /************************************************************************
691 * ixv_msix_que - MSI-X Queue Interrupt Service routine
692 ************************************************************************/
694 ixv_msix_que(void *arg)
696 struct ix_rx_queue *que = arg;
697 struct adapter *adapter = que->adapter;
699 ixv_disable_queue(adapter, que->msix);
702 return (FILTER_SCHEDULE_THREAD);
705 /************************************************************************
707 ************************************************************************/
709 ixv_msix_mbx(void *arg)
711 struct adapter *adapter = arg;
712 struct ixgbe_hw *hw = &adapter->hw;
717 /* First get the cause */
718 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
719 /* Clear interrupt with write */
720 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
722 /* Link status change */
723 if (reg & IXGBE_EICR_LSC)
724 iflib_admin_intr_deferred(adapter->ctx);
726 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
728 return (FILTER_HANDLED);
731 /************************************************************************
732 * ixv_media_status - Media Ioctl callback
734 * Called whenever the user queries the status of
735 * the interface using ifconfig.
736 ************************************************************************/
738 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
740 struct adapter *adapter = iflib_get_softc(ctx);
742 INIT_DEBUGOUT("ixv_media_status: begin");
744 iflib_admin_intr_deferred(ctx);
746 ifmr->ifm_status = IFM_AVALID;
747 ifmr->ifm_active = IFM_ETHER;
749 if (!adapter->link_active)
752 ifmr->ifm_status |= IFM_ACTIVE;
754 switch (adapter->link_speed) {
755 case IXGBE_LINK_SPEED_1GB_FULL:
756 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
758 case IXGBE_LINK_SPEED_10GB_FULL:
759 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
761 case IXGBE_LINK_SPEED_100_FULL:
762 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
764 case IXGBE_LINK_SPEED_10_FULL:
765 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
768 } /* ixv_if_media_status */
770 /************************************************************************
771 * ixv_if_media_change - Media Ioctl callback
773 * Called when the user changes speed/duplex using
774 * media/mediopt option with ifconfig.
775 ************************************************************************/
777 ixv_if_media_change(if_ctx_t ctx)
779 struct adapter *adapter = iflib_get_softc(ctx);
780 struct ifmedia *ifm = iflib_get_media(ctx);
782 INIT_DEBUGOUT("ixv_media_change: begin");
784 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
787 switch (IFM_SUBTYPE(ifm->ifm_media)) {
791 device_printf(adapter->dev, "Only auto media type\n");
796 } /* ixv_if_media_change */
799 /************************************************************************
802 * Negotiate the Mailbox API with the PF;
803 * start with the most featured API first.
804 ************************************************************************/
806 ixv_negotiate_api(struct adapter *adapter)
808 struct ixgbe_hw *hw = &adapter->hw;
809 int mbx_api[] = { ixgbe_mbox_api_11,
811 ixgbe_mbox_api_unknown };
814 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
815 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
821 } /* ixv_negotiate_api */
824 /************************************************************************
825 * ixv_if_multi_set - Multicast Update
827 * Called whenever multicast address list is updated.
828 ************************************************************************/
830 ixv_if_multi_set(if_ctx_t ctx)
832 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
833 struct adapter *adapter = iflib_get_softc(ctx);
835 struct ifmultiaddr *ifma;
836 if_t ifp = iflib_get_ifp(ctx);
839 IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
841 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
842 if (ifma->ifma_addr->sa_family != AF_LINK)
844 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
845 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
846 IXGBE_ETH_LENGTH_OF_ADDRESS);
852 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
853 ixv_mc_array_itr, TRUE);
854 } /* ixv_if_multi_set */
856 /************************************************************************
859 * An iterator function needed by the multicast shared code.
860 * It feeds the shared code routine the addresses in the
861 * array of ixv_set_multi() one by one.
862 ************************************************************************/
864 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
866 u8 *addr = *update_ptr;
871 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
872 *update_ptr = newptr;
875 } /* ixv_mc_array_itr */
877 /************************************************************************
878 * ixv_if_local_timer - Timer routine
880 * Checks for link status, updates statistics,
881 * and runs the watchdog check.
882 ************************************************************************/
884 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
889 /* Fire off the adminq task */
890 iflib_admin_intr_deferred(ctx);
891 } /* ixv_if_local_timer */
893 /************************************************************************
894 * ixv_if_update_admin_status - Update OS on link state
896 * Note: Only updates the OS on the cached link state.
897 * The real check of the hardware only happens with
899 ************************************************************************/
901 ixv_if_update_admin_status(if_ctx_t ctx)
903 struct adapter *adapter = iflib_get_softc(ctx);
904 device_t dev = iflib_get_dev(ctx);
907 adapter->hw.mac.get_link_status = TRUE;
909 status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
910 &adapter->link_up, FALSE);
912 if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
913 /* Mailbox's Clear To Send status is lost or timeout occurred.
914 * We need reinitialization. */
915 iflib_get_ifp(ctx)->if_init(ctx);
918 if (adapter->link_up) {
919 if (adapter->link_active == FALSE) {
921 device_printf(dev, "Link is up %d Gbps %s \n",
922 ((adapter->link_speed == 128) ? 10 : 1),
924 adapter->link_active = TRUE;
925 iflib_link_state_change(ctx, LINK_STATE_UP,
928 } else { /* Link down */
929 if (adapter->link_active == TRUE) {
931 device_printf(dev, "Link is Down\n");
932 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
933 adapter->link_active = FALSE;
938 ixv_update_stats(adapter);
939 } /* ixv_if_update_admin_status */
942 /************************************************************************
943 * ixv_if_stop - Stop the hardware
945 * Disables all traffic on the adapter by issuing a
946 * global reset on the MAC and deallocates TX/RX buffers.
947 ************************************************************************/
949 ixv_if_stop(if_ctx_t ctx)
951 struct adapter *adapter = iflib_get_softc(ctx);
952 struct ixgbe_hw *hw = &adapter->hw;
954 INIT_DEBUGOUT("ixv_stop: begin\n");
956 ixv_if_disable_intr(ctx);
958 hw->mac.ops.reset_hw(hw);
959 adapter->hw.adapter_stopped = FALSE;
960 hw->mac.ops.stop_adapter(hw);
962 /* Update the stack */
963 adapter->link_up = FALSE;
964 ixv_if_update_admin_status(ctx);
966 /* reprogram the RAR[0] in case user changed it. */
967 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
971 /************************************************************************
972 * ixv_identify_hardware - Determine hardware revision.
973 ************************************************************************/
975 ixv_identify_hardware(if_ctx_t ctx)
977 struct adapter *adapter = iflib_get_softc(ctx);
978 device_t dev = iflib_get_dev(ctx);
979 struct ixgbe_hw *hw = &adapter->hw;
981 /* Save off the information about this board */
982 hw->vendor_id = pci_get_vendor(dev);
983 hw->device_id = pci_get_device(dev);
984 hw->revision_id = pci_get_revid(dev);
985 hw->subsystem_vendor_id = pci_get_subvendor(dev);
986 hw->subsystem_device_id = pci_get_subdevice(dev);
988 /* A subset of set_mac_type */
989 switch (hw->device_id) {
990 case IXGBE_DEV_ID_82599_VF:
991 hw->mac.type = ixgbe_mac_82599_vf;
993 case IXGBE_DEV_ID_X540_VF:
994 hw->mac.type = ixgbe_mac_X540_vf;
996 case IXGBE_DEV_ID_X550_VF:
997 hw->mac.type = ixgbe_mac_X550_vf;
999 case IXGBE_DEV_ID_X550EM_X_VF:
1000 hw->mac.type = ixgbe_mac_X550EM_x_vf;
1002 case IXGBE_DEV_ID_X550EM_A_VF:
1003 hw->mac.type = ixgbe_mac_X550EM_a_vf;
1006 device_printf(dev, "unknown mac type\n");
1007 hw->mac.type = ixgbe_mac_unknown;
1010 } /* ixv_identify_hardware */
1012 /************************************************************************
1013 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1014 ************************************************************************/
1016 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1018 struct adapter *adapter = iflib_get_softc(ctx);
1019 device_t dev = iflib_get_dev(ctx);
1020 struct ix_rx_queue *rx_que = adapter->rx_queues;
1021 struct ix_tx_queue *tx_que;
1022 int error, rid, vector = 0;
1025 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1028 snprintf(buf, sizeof(buf), "rxq%d", i);
1029 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1030 IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1033 device_printf(iflib_get_dev(ctx),
1034 "Failed to allocate que int %d err: %d", i, error);
1035 adapter->num_rx_queues = i + 1;
1039 rx_que->msix = vector;
1042 for (int i = 0; i < adapter->num_tx_queues; i++) {
1043 snprintf(buf, sizeof(buf), "txq%d", i);
1044 tx_que = &adapter->tx_queues[i];
1045 tx_que->msix = i % adapter->num_rx_queues;
1046 iflib_softirq_alloc_generic(ctx,
1047 &adapter->rx_queues[tx_que->msix].que_irq,
1048 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1051 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1052 IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1054 device_printf(iflib_get_dev(ctx),
1055 "Failed to register admin handler");
1059 adapter->vector = vector;
1061 * Due to a broken design QEMU will fail to properly
1062 * enable the guest for MSIX unless the vectors in
1063 * the table are all set up, so we must rewrite the
1064 * ENABLE in the MSIX control register again at this
1065 * point to cause it to successfully initialize us.
1067 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1069 pci_find_cap(dev, PCIY_MSIX, &rid);
1070 rid += PCIR_MSIX_CTRL;
1071 msix_ctrl = pci_read_config(dev, rid, 2);
1072 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1073 pci_write_config(dev, rid, msix_ctrl, 2);
1079 iflib_irq_free(ctx, &adapter->irq);
1080 rx_que = adapter->rx_queues;
1081 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1082 iflib_irq_free(ctx, &rx_que->que_irq);
1085 } /* ixv_if_msix_intr_assign */
1087 /************************************************************************
1088 * ixv_allocate_pci_resources
1089 ************************************************************************/
1091 ixv_allocate_pci_resources(if_ctx_t ctx)
1093 struct adapter *adapter = iflib_get_softc(ctx);
1094 device_t dev = iflib_get_dev(ctx);
1098 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1101 if (!(adapter->pci_mem)) {
1102 device_printf(dev, "Unable to allocate bus resource: memory\n");
1106 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1107 adapter->osdep.mem_bus_space_handle =
1108 rman_get_bushandle(adapter->pci_mem);
1109 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1112 } /* ixv_allocate_pci_resources */
1114 /************************************************************************
1115 * ixv_free_pci_resources
1116 ************************************************************************/
1118 ixv_free_pci_resources(if_ctx_t ctx)
1120 struct adapter *adapter = iflib_get_softc(ctx);
1121 struct ix_rx_queue *que = adapter->rx_queues;
1122 device_t dev = iflib_get_dev(ctx);
1124 /* Release all MSI-X queue resources */
1125 if (adapter->intr_type == IFLIB_INTR_MSIX)
1126 iflib_irq_free(ctx, &adapter->irq);
1129 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1130 iflib_irq_free(ctx, &que->que_irq);
1134 if (adapter->pci_mem != NULL)
1135 bus_release_resource(dev, SYS_RES_MEMORY,
1136 rman_get_rid(adapter->pci_mem), adapter->pci_mem);
1137 } /* ixv_free_pci_resources */
1139 /************************************************************************
1140 * ixv_setup_interface
1142 * Setup networking device structure and register an interface.
1143 ************************************************************************/
1145 ixv_setup_interface(if_ctx_t ctx)
1147 struct adapter *adapter = iflib_get_softc(ctx);
1148 if_softc_ctx_t scctx = adapter->shared;
1149 struct ifnet *ifp = iflib_get_ifp(ctx);
1151 INIT_DEBUGOUT("ixv_setup_interface: begin");
1153 if_setbaudrate(ifp, IF_Gbps(10));
1154 ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1157 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1158 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1159 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1162 } /* ixv_setup_interface */
1164 /************************************************************************
1165 * ixv_if_get_counter
1166 ************************************************************************/
1168 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1170 struct adapter *adapter = iflib_get_softc(ctx);
1171 if_t ifp = iflib_get_ifp(ctx);
1174 case IFCOUNTER_IPACKETS:
1175 return (adapter->ipackets);
1176 case IFCOUNTER_OPACKETS:
1177 return (adapter->opackets);
1178 case IFCOUNTER_IBYTES:
1179 return (adapter->ibytes);
1180 case IFCOUNTER_OBYTES:
1181 return (adapter->obytes);
1182 case IFCOUNTER_IMCASTS:
1183 return (adapter->imcasts);
1185 return (if_get_counter_default(ifp, cnt));
1187 } /* ixv_if_get_counter */
1189 /************************************************************************
1190 * ixv_initialize_transmit_units - Enable transmit unit.
1191 ************************************************************************/
1193 ixv_initialize_transmit_units(if_ctx_t ctx)
1195 struct adapter *adapter = iflib_get_softc(ctx);
1196 struct ixgbe_hw *hw = &adapter->hw;
1197 if_softc_ctx_t scctx = adapter->shared;
1198 struct ix_tx_queue *que = adapter->tx_queues;
1201 for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1202 struct tx_ring *txr = &que->txr;
1203 u64 tdba = txr->tx_paddr;
1207 /* Set WTHRESH to 8, burst writeback */
1208 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1209 txdctl |= (8 << 16);
1210 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1212 /* Set the HW Tx Head and Tail indices */
1213 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1214 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1216 /* Set Tx Tail register */
1217 txr->tail = IXGBE_VFTDT(j);
1219 txr->tx_rs_cidx = txr->tx_rs_pidx;
1220 /* Initialize the last processed descriptor to be the end of
1221 * the ring, rather than the start, so that we avoid an
1222 * off-by-one error when calculating how many descriptors are
1223 * done in the credits_update function.
1225 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1226 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1227 txr->tx_rsq[k] = QIDX_INVALID;
1229 /* Set Ring parameters */
1230 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1231 (tdba & 0x00000000ffffffffULL));
1232 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1233 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1234 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1235 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1236 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1237 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1240 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1241 txdctl |= IXGBE_TXDCTL_ENABLE;
1242 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1246 } /* ixv_initialize_transmit_units */
1248 /************************************************************************
1249 * ixv_initialize_rss_mapping
1250 ************************************************************************/
1252 ixv_initialize_rss_mapping(struct adapter *adapter)
1254 struct ixgbe_hw *hw = &adapter->hw;
1255 u32 reta = 0, mrqc, rss_key[10];
1258 u32 rss_hash_config;
1260 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1261 /* Fetch the configured RSS key */
1262 rss_getkey((uint8_t *)&rss_key);
1264 /* set up random bits */
1265 arc4rand(&rss_key, sizeof(rss_key), 0);
1268 /* Now fill out hash function seeds */
1269 for (i = 0; i < 10; i++)
1270 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1272 /* Set up the redirection table */
1273 for (i = 0, j = 0; i < 64; i++, j++) {
1274 if (j == adapter->num_rx_queues)
1277 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1279 * Fetch the RSS bucket id for the given indirection
1280 * entry. Cap it at the number of configured buckets
1281 * (which is num_rx_queues.)
1283 queue_id = rss_get_indirection_to_bucket(i);
1284 queue_id = queue_id % adapter->num_rx_queues;
1289 * The low 8 bits are for hash value (n+0);
1290 * The next 8 bits are for hash value (n+1), etc.
1293 reta |= ((uint32_t)queue_id) << 24;
1295 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1300 /* Perform hash on these packet types */
1301 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1302 rss_hash_config = rss_gethashconfig();
1305 * Disable UDP - IP fragments aren't currently being handled
1306 * and so we end up with a mix of 2-tuple and 4-tuple
1309 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1310 | RSS_HASHTYPE_RSS_TCP_IPV4
1311 | RSS_HASHTYPE_RSS_IPV6
1312 | RSS_HASHTYPE_RSS_TCP_IPV6;
1315 mrqc = IXGBE_MRQC_RSSEN;
1316 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1317 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1318 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1319 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1320 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1321 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1322 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1323 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1324 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1325 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1327 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1328 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1330 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1331 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1332 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1333 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1334 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1335 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1337 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1338 } /* ixv_initialize_rss_mapping */
1341 /************************************************************************
1342 * ixv_initialize_receive_units - Setup receive registers and features.
1343 ************************************************************************/
1345 ixv_initialize_receive_units(if_ctx_t ctx)
1347 struct adapter *adapter = iflib_get_softc(ctx);
1348 if_softc_ctx_t scctx;
1349 struct ixgbe_hw *hw = &adapter->hw;
1350 struct ifnet *ifp = iflib_get_ifp(ctx);
1351 struct ix_rx_queue *que = adapter->rx_queues;
1354 if (ifp->if_mtu > ETHERMTU)
1355 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1357 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1359 psrtype = IXGBE_PSRTYPE_TCPHDR
1360 | IXGBE_PSRTYPE_UDPHDR
1361 | IXGBE_PSRTYPE_IPV4HDR
1362 | IXGBE_PSRTYPE_IPV6HDR
1363 | IXGBE_PSRTYPE_L2HDR;
1365 if (adapter->num_rx_queues > 1)
1368 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1370 /* Tell PF our max_frame size */
1371 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1372 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1374 scctx = adapter->shared;
1376 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1377 struct rx_ring *rxr = &que->rxr;
1378 u64 rdba = rxr->rx_paddr;
1382 /* Disable the queue */
1383 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1384 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1385 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1386 for (int k = 0; k < 10; k++) {
1387 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1388 IXGBE_RXDCTL_ENABLE)
1394 /* Setup the Base and Length of the Rx Descriptor Ring */
1395 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1396 (rdba & 0x00000000ffffffffULL));
1397 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1398 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1399 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1401 /* Reset the ring indices */
1402 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1403 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1405 /* Set up the SRRCTL register */
1406 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1407 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1408 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1410 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1411 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1413 /* Capture Rx Tail index */
1414 rxr->tail = IXGBE_VFRDT(rxr->me);
1416 /* Do the queue enabling last */
1417 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1418 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1419 for (int l = 0; l < 10; l++) {
1420 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1421 IXGBE_RXDCTL_ENABLE)
1427 /* Set the Tail Pointer */
1430 * In netmap mode, we must preserve the buffers made
1431 * available to userspace before the if_init()
1432 * (this is true by default on the TX side, because
1433 * init makes all buffers available to userspace).
1435 * netmap_reset() and the device specific routines
1436 * (e.g. ixgbe_setup_receive_rings()) map these
1437 * buffers at the end of the NIC ring, so here we
1438 * must set the RDT (tail) register to make sure
1439 * they are not overwritten.
1441 * In this driver the NIC ring starts at RDH = 0,
1442 * RDT points to the last slot available for reception (?),
1443 * so RDT = num_rx_desc - 1 means the whole ring is available.
1445 if (ifp->if_capenable & IFCAP_NETMAP) {
1446 struct netmap_adapter *na = NA(ifp);
1447 struct netmap_kring *kring = na->rx_rings[j];
1448 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1450 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1452 #endif /* DEV_NETMAP */
1453 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1454 scctx->isc_nrxd[0] - 1);
1457 ixv_initialize_rss_mapping(adapter);
1458 } /* ixv_initialize_receive_units */
1460 /************************************************************************
1461 * ixv_setup_vlan_support
1462 ************************************************************************/
1464 ixv_setup_vlan_support(if_ctx_t ctx)
1466 struct ifnet *ifp = iflib_get_ifp(ctx);
1467 struct adapter *adapter = iflib_get_softc(ctx);
1468 struct ixgbe_hw *hw = &adapter->hw;
1469 u32 ctrl, vid, vfta, retry;
1472 * We get here thru if_init, meaning
1473 * a soft reset, this has already cleared
1474 * the VFTA and other state, so if there
1475 * have been no vlan's registered do nothing.
1477 if (adapter->num_vlans == 0)
1480 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1481 /* Enable the queues */
1482 for (int i = 0; i < adapter->num_rx_queues; i++) {
1483 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1484 ctrl |= IXGBE_RXDCTL_VME;
1485 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1487 * Let Rx path know that it needs to store VLAN tag
1488 * as part of extra mbuf info.
1490 adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1495 * If filtering VLAN tags is disabled,
1496 * there is no need to fill VLAN Filter Table Array (VFTA).
1498 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1502 * A soft reset zero's out the VFTA, so
1503 * we need to repopulate it now.
1505 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1506 if (ixv_shadow_vfta[i] == 0)
1508 vfta = ixv_shadow_vfta[i];
1510 * Reconstruct the vlan id's
1511 * based on the bits set in each
1512 * of the array ints.
1514 for (int j = 0; j < 32; j++) {
1516 if ((vfta & (1 << j)) == 0)
1519 /* Call the shared code mailbox routine */
1520 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1526 } /* ixv_setup_vlan_support */
1528 /************************************************************************
1529 * ixv_if_register_vlan
1531 * Run via a vlan config EVENT, it enables us to use the
1532 * HW Filter table since we can get the vlan id. This just
1533 * creates the entry in the soft version of the VFTA, init
1534 * will repopulate the real table.
1535 ************************************************************************/
1537 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1539 struct adapter *adapter = iflib_get_softc(ctx);
1542 index = (vtag >> 5) & 0x7F;
1544 ixv_shadow_vfta[index] |= (1 << bit);
1545 ++adapter->num_vlans;
1546 } /* ixv_if_register_vlan */
1548 /************************************************************************
1549 * ixv_if_unregister_vlan
1551 * Run via a vlan unconfig EVENT, remove our entry
1553 ************************************************************************/
1555 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1557 struct adapter *adapter = iflib_get_softc(ctx);
1560 index = (vtag >> 5) & 0x7F;
1562 ixv_shadow_vfta[index] &= ~(1 << bit);
1563 --adapter->num_vlans;
1564 } /* ixv_if_unregister_vlan */
1566 /************************************************************************
1567 * ixv_if_enable_intr
1568 ************************************************************************/
1570 ixv_if_enable_intr(if_ctx_t ctx)
1572 struct adapter *adapter = iflib_get_softc(ctx);
1573 struct ixgbe_hw *hw = &adapter->hw;
1574 struct ix_rx_queue *que = adapter->rx_queues;
1575 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1577 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1579 mask = IXGBE_EIMS_ENABLE_MASK;
1580 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1581 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1583 for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1584 ixv_enable_queue(adapter, que->msix);
1586 IXGBE_WRITE_FLUSH(hw);
1587 } /* ixv_if_enable_intr */
1589 /************************************************************************
1590 * ixv_if_disable_intr
1591 ************************************************************************/
1593 ixv_if_disable_intr(if_ctx_t ctx)
1595 struct adapter *adapter = iflib_get_softc(ctx);
1596 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1598 IXGBE_WRITE_FLUSH(&adapter->hw);
1599 } /* ixv_if_disable_intr */
1601 /************************************************************************
1602 * ixv_if_rx_queue_intr_enable
1603 ************************************************************************/
1605 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1607 struct adapter *adapter = iflib_get_softc(ctx);
1608 struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1610 ixv_enable_queue(adapter, que->rxr.me);
1613 } /* ixv_if_rx_queue_intr_enable */
1615 /************************************************************************
1618 * Setup the correct IVAR register for a particular MSI-X interrupt
1619 * - entry is the register array entry
1620 * - vector is the MSI-X vector for this queue
1621 * - type is RX/TX/MISC
1622 ************************************************************************/
1624 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1626 struct ixgbe_hw *hw = &adapter->hw;
1629 vector |= IXGBE_IVAR_ALLOC_VAL;
1631 if (type == -1) { /* MISC IVAR */
1632 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1635 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1636 } else { /* RX/TX IVARS */
1637 index = (16 * (entry & 1)) + (8 * type);
1638 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1639 ivar &= ~(0xFF << index);
1640 ivar |= (vector << index);
1641 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1643 } /* ixv_set_ivar */
1645 /************************************************************************
1646 * ixv_configure_ivars
1647 ************************************************************************/
1649 ixv_configure_ivars(struct adapter *adapter)
1651 struct ix_rx_queue *que = adapter->rx_queues;
1653 MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1655 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1656 /* First the RX queue entry */
1657 ixv_set_ivar(adapter, i, que->msix, 0);
1658 /* ... and the TX */
1659 ixv_set_ivar(adapter, i, que->msix, 1);
1660 /* Set an initial value in EITR */
1661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1662 IXGBE_EITR_DEFAULT);
1665 /* For the mailbox interrupt */
1666 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1667 } /* ixv_configure_ivars */
1669 /************************************************************************
1672 * The VF stats registers never have a truly virgin
1673 * starting point, so this routine tries to make an
1674 * artificial one, marking ground zero on attach as
1676 ************************************************************************/
1678 ixv_save_stats(struct adapter *adapter)
1680 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1681 adapter->stats.vf.saved_reset_vfgprc +=
1682 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1683 adapter->stats.vf.saved_reset_vfgptc +=
1684 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1685 adapter->stats.vf.saved_reset_vfgorc +=
1686 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1687 adapter->stats.vf.saved_reset_vfgotc +=
1688 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1689 adapter->stats.vf.saved_reset_vfmprc +=
1690 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1692 } /* ixv_save_stats */
1694 /************************************************************************
1696 ************************************************************************/
1698 ixv_init_stats(struct adapter *adapter)
1700 struct ixgbe_hw *hw = &adapter->hw;
1702 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1703 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1704 adapter->stats.vf.last_vfgorc |=
1705 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1707 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1708 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1709 adapter->stats.vf.last_vfgotc |=
1710 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1712 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1714 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1715 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1716 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1717 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1718 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1719 } /* ixv_init_stats */
1721 #define UPDATE_STAT_32(reg, last, count) \
1723 u32 current = IXGBE_READ_REG(hw, reg); \
1724 if (current < last) \
1725 count += 0x100000000LL; \
1727 count &= 0xFFFFFFFF00000000LL; \
1731 #define UPDATE_STAT_36(lsb, msb, last, count) \
1733 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1734 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1735 u64 current = ((cur_msb << 32) | cur_lsb); \
1736 if (current < last) \
1737 count += 0x1000000000LL; \
1739 count &= 0xFFFFFFF000000000LL; \
1743 /************************************************************************
1744 * ixv_update_stats - Update the board statistics counters.
1745 ************************************************************************/
1747 ixv_update_stats(struct adapter *adapter)
1749 struct ixgbe_hw *hw = &adapter->hw;
1750 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1752 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1753 adapter->stats.vf.vfgprc);
1754 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1755 adapter->stats.vf.vfgptc);
1756 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1757 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1758 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1759 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1760 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1761 adapter->stats.vf.vfmprc);
1763 /* Fill out the OS statistics structure */
1764 IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1765 IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1766 IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1767 IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1768 IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1769 } /* ixv_update_stats */
1771 /************************************************************************
1772 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1773 ************************************************************************/
1775 ixv_add_stats_sysctls(struct adapter *adapter)
1777 device_t dev = adapter->dev;
1778 struct ix_tx_queue *tx_que = adapter->tx_queues;
1779 struct ix_rx_queue *rx_que = adapter->rx_queues;
1780 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1781 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1782 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1783 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1784 struct sysctl_oid *stat_node, *queue_node;
1785 struct sysctl_oid_list *stat_list, *queue_list;
1787 #define QUEUE_NAME_LEN 32
1788 char namebuf[QUEUE_NAME_LEN];
1790 /* Driver Statistics */
1791 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1792 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1793 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1794 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1796 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1797 struct tx_ring *txr = &tx_que->txr;
1798 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1799 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1800 CTLFLAG_RD, NULL, "Queue Name");
1801 queue_list = SYSCTL_CHILDREN(queue_node);
1803 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1804 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1805 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1806 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1809 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1810 struct rx_ring *rxr = &rx_que->rxr;
1811 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1812 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1813 CTLFLAG_RD, NULL, "Queue Name");
1814 queue_list = SYSCTL_CHILDREN(queue_node);
1816 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1817 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1818 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1819 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1820 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1821 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1822 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1823 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1826 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1827 CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1828 stat_list = SYSCTL_CHILDREN(stat_node);
1830 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1831 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1832 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1833 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1834 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1835 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1836 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1837 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1838 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1839 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1840 } /* ixv_add_stats_sysctls */
1842 /************************************************************************
1843 * ixv_print_debug_info
1845 * Called only when em_display_debug_stats is enabled.
1846 * Provides a way to take a look at important statistics
1847 * maintained by the driver and hardware.
1848 ************************************************************************/
1850 ixv_print_debug_info(struct adapter *adapter)
1852 device_t dev = adapter->dev;
1853 struct ixgbe_hw *hw = &adapter->hw;
1855 device_printf(dev, "Error Byte Count = %u \n",
1856 IXGBE_READ_REG(hw, IXGBE_ERRBC));
1858 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1859 } /* ixv_print_debug_info */
1861 /************************************************************************
1863 ************************************************************************/
1865 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1867 struct adapter *adapter;
1871 error = sysctl_handle_int(oidp, &result, 0, req);
1873 if (error || !req->newptr)
1877 adapter = (struct adapter *)arg1;
1878 ixv_print_debug_info(adapter);
1882 } /* ixv_sysctl_debug */
1884 /************************************************************************
1885 * ixv_init_device_features
1886 ************************************************************************/
1888 ixv_init_device_features(struct adapter *adapter)
1890 adapter->feat_cap = IXGBE_FEATURE_NETMAP
1893 | IXGBE_FEATURE_LEGACY_TX;
1895 /* A tad short on feature flags for VFs, atm. */
1896 switch (adapter->hw.mac.type) {
1897 case ixgbe_mac_82599_vf:
1899 case ixgbe_mac_X540_vf:
1901 case ixgbe_mac_X550_vf:
1902 case ixgbe_mac_X550EM_x_vf:
1903 case ixgbe_mac_X550EM_a_vf:
1904 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1910 /* Enabled by default... */
1911 /* Is a virtual function (VF) */
1912 if (adapter->feat_cap & IXGBE_FEATURE_VF)
1913 adapter->feat_en |= IXGBE_FEATURE_VF;
1915 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1916 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1917 /* Receive-Side Scaling (RSS) */
1918 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1919 adapter->feat_en |= IXGBE_FEATURE_RSS;
1920 /* Needs advanced context descriptor regardless of offloads req'd */
1921 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1922 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1923 } /* ixv_init_device_features */