1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 #include "opt_inet6.h"
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
45 /************************************************************************
47 ************************************************************************/
48 char ixv_driver_version[] = "2.0.1-k";
50 /************************************************************************
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixv_strings
55 * Last entry must be all 0s
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static pci_vendor_info_t ixv_vendor_info_array[] =
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66 /* required last entry */
70 /************************************************************************
72 ************************************************************************/
73 static void *ixv_register(device_t dev);
74 static int ixv_if_attach_pre(if_ctx_t ctx);
75 static int ixv_if_attach_post(if_ctx_t ctx);
76 static int ixv_if_detach(if_ctx_t ctx);
78 static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
79 static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
80 static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static void ixv_if_queues_free(if_ctx_t ctx);
82 static void ixv_identify_hardware(if_ctx_t ctx);
83 static void ixv_init_device_features(struct adapter *);
84 static int ixv_allocate_pci_resources(if_ctx_t ctx);
85 static void ixv_free_pci_resources(if_ctx_t ctx);
86 static int ixv_setup_interface(if_ctx_t ctx);
87 static void ixv_if_media_status(if_ctx_t , struct ifmediareq *);
88 static int ixv_if_media_change(if_ctx_t ctx);
89 static void ixv_if_update_admin_status(if_ctx_t ctx);
90 static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
92 static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
93 static void ixv_if_init(if_ctx_t ctx);
94 static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
95 static void ixv_if_stop(if_ctx_t ctx);
96 static int ixv_negotiate_api(struct adapter *);
98 static void ixv_initialize_transmit_units(if_ctx_t ctx);
99 static void ixv_initialize_receive_units(if_ctx_t ctx);
100 static void ixv_initialize_rss_mapping(struct adapter *);
102 static void ixv_setup_vlan_support(if_ctx_t ctx);
103 static void ixv_configure_ivars(struct adapter *);
104 static void ixv_if_enable_intr(if_ctx_t ctx);
105 static void ixv_if_disable_intr(if_ctx_t ctx);
106 static void ixv_if_multi_set(if_ctx_t ctx);
108 static void ixv_if_register_vlan(if_ctx_t, u16);
109 static void ixv_if_unregister_vlan(if_ctx_t, u16);
111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
113 static void ixv_save_stats(struct adapter *);
114 static void ixv_init_stats(struct adapter *);
115 static void ixv_update_stats(struct adapter *);
116 static void ixv_add_stats_sysctls(struct adapter *adapter);
118 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
119 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
121 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
123 /* The MSI-X Interrupt handlers */
124 static int ixv_msix_que(void *);
125 static int ixv_msix_mbx(void *);
127 /************************************************************************
128 * FreeBSD Device Interface Entry Points
129 ************************************************************************/
130 static device_method_t ixv_methods[] = {
131 /* Device interface */
132 DEVMETHOD(device_register, ixv_register),
133 DEVMETHOD(device_probe, iflib_device_probe),
134 DEVMETHOD(device_attach, iflib_device_attach),
135 DEVMETHOD(device_detach, iflib_device_detach),
136 DEVMETHOD(device_shutdown, iflib_device_shutdown),
140 static driver_t ixv_driver = {
141 "ixv", ixv_methods, sizeof(struct adapter),
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
147 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
148 MODULE_DEPEND(ixv, pci, 1, 1, 1);
149 MODULE_DEPEND(ixv, ether, 1, 1, 1);
151 static device_method_t ixv_if_methods[] = {
152 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
153 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
154 DEVMETHOD(ifdi_detach, ixv_if_detach),
155 DEVMETHOD(ifdi_init, ixv_if_init),
156 DEVMETHOD(ifdi_stop, ixv_if_stop),
157 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
158 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
159 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
160 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
161 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
162 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
163 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
164 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
165 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
166 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
167 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
168 DEVMETHOD(ifdi_media_status, ixv_if_media_status),
169 DEVMETHOD(ifdi_media_change, ixv_if_media_change),
170 DEVMETHOD(ifdi_timer, ixv_if_local_timer),
171 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
172 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
173 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
177 static driver_t ixv_if_driver = {
178 "ixv_if", ixv_if_methods, sizeof(struct adapter)
182 * TUNEABLE PARAMETERS:
185 /* Flow control setting, default to full */
186 static int ixv_flow_control = ixgbe_fc_full;
187 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
190 * Header split: this causes the hardware to DMA
191 * the header into a separate mbuf from the payload,
192 * it can be a performance win in some workloads, but
193 * in others it actually hurts, its off by default.
195 static int ixv_header_split = FALSE;
196 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
199 * Shadow VFTA table, this is needed because
200 * the real filter table gets cleared during
201 * a soft reset and we need to repopulate it.
203 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
204 extern struct if_txrx ixgbe_txrx;
206 static struct if_shared_ctx ixv_sctx_init = {
207 .isc_magic = IFLIB_MAGIC,
208 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
209 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
210 .isc_tx_maxsegsize = PAGE_SIZE,
211 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
212 .isc_tso_maxsegsize = PAGE_SIZE,
213 .isc_rx_maxsize = MJUM16BYTES,
214 .isc_rx_nsegments = 1,
215 .isc_rx_maxsegsize = MJUM16BYTES,
219 .isc_admin_intrcnt = 1,
220 .isc_vendor_info = ixv_vendor_info_array,
221 .isc_driver_version = ixv_driver_version,
222 .isc_driver = &ixv_if_driver,
223 .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
225 .isc_nrxd_min = {MIN_RXD},
226 .isc_ntxd_min = {MIN_TXD},
227 .isc_nrxd_max = {MAX_RXD},
228 .isc_ntxd_max = {MAX_TXD},
229 .isc_nrxd_default = {DEFAULT_RXD},
230 .isc_ntxd_default = {DEFAULT_TXD},
233 if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
236 ixv_register(device_t dev)
241 /************************************************************************
242 * ixv_if_tx_queues_alloc
243 ************************************************************************/
245 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
246 int ntxqs, int ntxqsets)
248 struct adapter *adapter = iflib_get_softc(ctx);
249 if_softc_ctx_t scctx = adapter->shared;
250 struct ix_tx_queue *que;
253 MPASS(adapter->num_tx_queues == ntxqsets);
256 /* Allocate queue structure memory */
258 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
259 M_DEVBUF, M_NOWAIT | M_ZERO);
260 if (!adapter->tx_queues) {
261 device_printf(iflib_get_dev(ctx),
262 "Unable to allocate TX ring memory\n");
266 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
267 struct tx_ring *txr = &que->txr;
270 txr->adapter = que->adapter = adapter;
271 adapter->active_queues |= (u64)1 << txr->me;
273 /* Allocate report status array */
274 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
278 for (j = 0; j < scctx->isc_ntxd[0]; j++)
279 txr->tx_rsq[j] = QIDX_INVALID;
280 /* get the virtual and physical address of the hardware queues */
281 txr->tail = IXGBE_VFTDT(txr->me);
282 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
283 txr->tx_paddr = paddrs[i*ntxqs];
286 txr->total_packets = 0;
290 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
291 adapter->num_tx_queues);
296 ixv_if_queues_free(ctx);
299 } /* ixv_if_tx_queues_alloc */
301 /************************************************************************
302 * ixv_if_rx_queues_alloc
303 ************************************************************************/
305 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
306 int nrxqs, int nrxqsets)
308 struct adapter *adapter = iflib_get_softc(ctx);
309 struct ix_rx_queue *que;
312 MPASS(adapter->num_rx_queues == nrxqsets);
315 /* Allocate queue structure memory */
317 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
318 M_DEVBUF, M_NOWAIT | M_ZERO);
319 if (!adapter->rx_queues) {
320 device_printf(iflib_get_dev(ctx),
321 "Unable to allocate TX ring memory\n");
326 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
327 struct rx_ring *rxr = &que->rxr;
329 rxr->adapter = que->adapter = adapter;
332 /* get the virtual and physical address of the hw queues */
333 rxr->tail = IXGBE_VFRDT(rxr->me);
334 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
335 rxr->rx_paddr = paddrs[i*nrxqs];
340 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
341 adapter->num_rx_queues);
346 ixv_if_queues_free(ctx);
349 } /* ixv_if_rx_queues_alloc */
351 /************************************************************************
353 ************************************************************************/
355 ixv_if_queues_free(if_ctx_t ctx)
357 struct adapter *adapter = iflib_get_softc(ctx);
358 struct ix_tx_queue *que = adapter->tx_queues;
364 for (i = 0; i < adapter->num_tx_queues; i++, que++) {
365 struct tx_ring *txr = &que->txr;
366 if (txr->tx_rsq == NULL)
369 free(txr->tx_rsq, M_DEVBUF);
372 if (adapter->tx_queues != NULL)
373 free(adapter->tx_queues, M_DEVBUF);
375 if (adapter->rx_queues != NULL)
376 free(adapter->rx_queues, M_DEVBUF);
377 adapter->tx_queues = NULL;
378 adapter->rx_queues = NULL;
379 } /* ixv_if_queues_free */
381 /************************************************************************
382 * ixv_if_attach_pre - Device initialization routine
384 * Called when the driver is being loaded.
385 * Identifies the type of hardware, allocates all resources
386 * and initializes the hardware.
388 * return 0 on success, positive on failure
389 ************************************************************************/
391 ixv_if_attach_pre(if_ctx_t ctx)
393 struct adapter *adapter;
395 if_softc_ctx_t scctx;
399 INIT_DEBUGOUT("ixv_attach: begin");
401 /* Allocate, clear, and link in our adapter structure */
402 dev = iflib_get_dev(ctx);
403 adapter = iflib_get_softc(ctx);
406 adapter->hw.back = adapter;
407 scctx = adapter->shared = iflib_get_softc_ctx(ctx);
408 adapter->media = iflib_get_media(ctx);
411 /* Do base PCI setup - map BAR0 */
412 if (ixv_allocate_pci_resources(ctx)) {
413 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
419 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
421 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
424 /* Determine hardware revision */
425 ixv_identify_hardware(ctx);
426 ixv_init_device_features(adapter);
428 /* Initialize the shared code */
429 error = ixgbe_init_ops_vf(hw);
431 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
436 /* Setup the mailbox */
437 ixgbe_init_mbx_params_vf(hw);
439 error = hw->mac.ops.reset_hw(hw);
440 if (error == IXGBE_ERR_RESET_FAILED)
441 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
443 device_printf(dev, "...reset_hw() failed with error %d\n",
450 error = hw->mac.ops.init_hw(hw);
452 device_printf(dev, "...init_hw() failed with error %d\n",
458 /* Negotiate mailbox API version */
459 error = ixv_negotiate_api(adapter);
462 "Mailbox API negotiation failed during attach!\n");
466 /* If no mac address was assigned, make a random one */
467 if (!ixv_check_ether_addr(hw->mac.addr)) {
468 u8 addr[ETHER_ADDR_LEN];
469 arc4rand(&addr, sizeof(addr), 0);
472 bcopy(addr, hw->mac.addr, sizeof(addr));
473 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
476 /* Most of the iflib initialization... */
478 iflib_set_mac(ctx, hw->mac.addr);
479 switch (adapter->hw.mac.type) {
480 case ixgbe_mac_X550_vf:
481 case ixgbe_mac_X550EM_x_vf:
482 case ixgbe_mac_X550EM_a_vf:
483 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
486 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
488 scctx->isc_txqsizes[0] =
489 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
490 sizeof(u32), DBA_ALIGN);
491 scctx->isc_rxqsizes[0] =
492 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
495 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
496 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
497 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
498 scctx->isc_msix_bar = pci_msix_table_bar(dev);
499 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
500 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
501 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
503 scctx->isc_txrx = &ixgbe_txrx;
506 * Tell the upper layer(s) we support everything the PF
507 * driver does except...
510 scctx->isc_capabilities = IXGBE_CAPS;
511 scctx->isc_capabilities ^= IFCAP_WOL;
512 scctx->isc_capenable = scctx->isc_capabilities;
514 INIT_DEBUGOUT("ixv_if_attach_pre: end");
519 ixv_free_pci_resources(ctx);
522 } /* ixv_if_attach_pre */
525 ixv_if_attach_post(if_ctx_t ctx)
527 struct adapter *adapter = iflib_get_softc(ctx);
528 device_t dev = iflib_get_dev(ctx);
531 /* Setup OS specific network interface */
532 error = ixv_setup_interface(ctx);
534 device_printf(dev, "Interface setup failed: %d\n", error);
538 /* Do the stats setup */
539 ixv_save_stats(adapter);
540 ixv_init_stats(adapter);
541 ixv_add_stats_sysctls(adapter);
545 } /* ixv_if_attach_post */
547 /************************************************************************
548 * ixv_detach - Device removal routine
550 * Called when the driver is being removed.
551 * Stops the adapter and deallocates all the resources
552 * that were allocated for driver operation.
554 * return 0 on success, positive on failure
555 ************************************************************************/
557 ixv_if_detach(if_ctx_t ctx)
559 INIT_DEBUGOUT("ixv_detach: begin");
561 ixv_free_pci_resources(ctx);
564 } /* ixv_if_detach */
566 /************************************************************************
568 ************************************************************************/
570 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
572 struct adapter *adapter = iflib_get_softc(ctx);
573 struct ifnet *ifp = iflib_get_ifp(ctx);
576 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
577 if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
581 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
585 } /* ixv_if_mtu_set */
587 /************************************************************************
588 * ixv_if_init - Init entry point
590 * Used in two ways: It is used by the stack as an init entry
591 * point in network interface structure. It is also used
592 * by the driver as a hw/sw initialization routine to get
593 * to a consistent state.
595 * return 0 on success, positive on failure
596 ************************************************************************/
598 ixv_if_init(if_ctx_t ctx)
600 struct adapter *adapter = iflib_get_softc(ctx);
601 struct ifnet *ifp = iflib_get_ifp(ctx);
602 device_t dev = iflib_get_dev(ctx);
603 struct ixgbe_hw *hw = &adapter->hw;
606 INIT_DEBUGOUT("ixv_if_init: begin");
607 hw->adapter_stopped = FALSE;
608 hw->mac.ops.stop_adapter(hw);
610 /* reprogram the RAR[0] in case user changed it. */
611 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
613 /* Get the latest mac address, User can use a LAA */
614 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
615 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
617 /* Reset VF and renegotiate mailbox API version */
618 hw->mac.ops.reset_hw(hw);
619 hw->mac.ops.start_hw(hw);
620 error = ixv_negotiate_api(adapter);
623 "Mailbox API negotiation failed in if_init!\n");
627 ixv_initialize_transmit_units(ctx);
629 /* Setup Multicast table */
630 ixv_if_multi_set(ctx);
632 adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
634 /* Configure RX settings */
635 ixv_initialize_receive_units(ctx);
637 /* Set up VLAN offload and filter */
638 ixv_setup_vlan_support(ctx);
640 /* Set up MSI-X routing */
641 ixv_configure_ivars(adapter);
643 /* Set up auto-mask */
644 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
646 /* Set moderation on the Link interrupt */
647 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
650 ixv_init_stats(adapter);
652 /* Config/Enable Link */
653 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
656 /* And now turn on interrupts */
657 ixv_if_enable_intr(ctx);
662 /************************************************************************
664 ************************************************************************/
666 ixv_enable_queue(struct adapter *adapter, u32 vector)
668 struct ixgbe_hw *hw = &adapter->hw;
669 u32 queue = 1 << vector;
672 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
673 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
674 } /* ixv_enable_queue */
676 /************************************************************************
678 ************************************************************************/
680 ixv_disable_queue(struct adapter *adapter, u32 vector)
682 struct ixgbe_hw *hw = &adapter->hw;
683 u64 queue = (u64)(1 << vector);
686 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
687 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
688 } /* ixv_disable_queue */
691 /************************************************************************
692 * ixv_msix_que - MSI-X Queue Interrupt Service routine
693 ************************************************************************/
695 ixv_msix_que(void *arg)
697 struct ix_rx_queue *que = arg;
698 struct adapter *adapter = que->adapter;
700 ixv_disable_queue(adapter, que->msix);
703 return (FILTER_SCHEDULE_THREAD);
706 /************************************************************************
708 ************************************************************************/
710 ixv_msix_mbx(void *arg)
712 struct adapter *adapter = arg;
713 struct ixgbe_hw *hw = &adapter->hw;
718 /* First get the cause */
719 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
720 /* Clear interrupt with write */
721 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
723 /* Link status change */
724 if (reg & IXGBE_EICR_LSC)
725 iflib_admin_intr_deferred(adapter->ctx);
727 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
729 return (FILTER_HANDLED);
732 /************************************************************************
733 * ixv_media_status - Media Ioctl callback
735 * Called whenever the user queries the status of
736 * the interface using ifconfig.
737 ************************************************************************/
739 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
741 struct adapter *adapter = iflib_get_softc(ctx);
743 INIT_DEBUGOUT("ixv_media_status: begin");
745 iflib_admin_intr_deferred(ctx);
747 ifmr->ifm_status = IFM_AVALID;
748 ifmr->ifm_active = IFM_ETHER;
750 if (!adapter->link_active)
753 ifmr->ifm_status |= IFM_ACTIVE;
755 switch (adapter->link_speed) {
756 case IXGBE_LINK_SPEED_1GB_FULL:
757 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
759 case IXGBE_LINK_SPEED_10GB_FULL:
760 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
762 case IXGBE_LINK_SPEED_100_FULL:
763 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
765 case IXGBE_LINK_SPEED_10_FULL:
766 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
769 } /* ixv_if_media_status */
771 /************************************************************************
772 * ixv_if_media_change - Media Ioctl callback
774 * Called when the user changes speed/duplex using
775 * media/mediopt option with ifconfig.
776 ************************************************************************/
778 ixv_if_media_change(if_ctx_t ctx)
780 struct adapter *adapter = iflib_get_softc(ctx);
781 struct ifmedia *ifm = iflib_get_media(ctx);
783 INIT_DEBUGOUT("ixv_media_change: begin");
785 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
788 switch (IFM_SUBTYPE(ifm->ifm_media)) {
792 device_printf(adapter->dev, "Only auto media type\n");
797 } /* ixv_if_media_change */
800 /************************************************************************
803 * Negotiate the Mailbox API with the PF;
804 * start with the most featured API first.
805 ************************************************************************/
807 ixv_negotiate_api(struct adapter *adapter)
809 struct ixgbe_hw *hw = &adapter->hw;
810 int mbx_api[] = { ixgbe_mbox_api_11,
812 ixgbe_mbox_api_unknown };
815 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
816 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
822 } /* ixv_negotiate_api */
825 /************************************************************************
826 * ixv_if_multi_set - Multicast Update
828 * Called whenever multicast address list is updated.
829 ************************************************************************/
831 ixv_if_multi_set(if_ctx_t ctx)
833 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
834 struct adapter *adapter = iflib_get_softc(ctx);
836 struct ifmultiaddr *ifma;
837 if_t ifp = iflib_get_ifp(ctx);
840 IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
842 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
843 if (ifma->ifma_addr->sa_family != AF_LINK)
845 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
846 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
847 IXGBE_ETH_LENGTH_OF_ADDRESS);
853 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
854 ixv_mc_array_itr, TRUE);
855 } /* ixv_if_multi_set */
857 /************************************************************************
860 * An iterator function needed by the multicast shared code.
861 * It feeds the shared code routine the addresses in the
862 * array of ixv_set_multi() one by one.
863 ************************************************************************/
865 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
867 u8 *addr = *update_ptr;
872 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
873 *update_ptr = newptr;
876 } /* ixv_mc_array_itr */
878 /************************************************************************
879 * ixv_if_local_timer - Timer routine
881 * Checks for link status, updates statistics,
882 * and runs the watchdog check.
883 ************************************************************************/
885 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
890 /* Fire off the adminq task */
891 iflib_admin_intr_deferred(ctx);
892 } /* ixv_if_local_timer */
894 /************************************************************************
895 * ixv_if_update_admin_status - Update OS on link state
897 * Note: Only updates the OS on the cached link state.
898 * The real check of the hardware only happens with
900 ************************************************************************/
902 ixv_if_update_admin_status(if_ctx_t ctx)
904 struct adapter *adapter = iflib_get_softc(ctx);
905 device_t dev = iflib_get_dev(ctx);
908 adapter->hw.mac.get_link_status = TRUE;
910 status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
911 &adapter->link_up, FALSE);
913 if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
914 /* Mailbox's Clear To Send status is lost or timeout occurred.
915 * We need reinitialization. */
916 iflib_get_ifp(ctx)->if_init(ctx);
919 if (adapter->link_up) {
920 if (adapter->link_active == FALSE) {
922 device_printf(dev, "Link is up %d Gbps %s \n",
923 ((adapter->link_speed == 128) ? 10 : 1),
925 adapter->link_active = TRUE;
926 iflib_link_state_change(ctx, LINK_STATE_UP,
929 } else { /* Link down */
930 if (adapter->link_active == TRUE) {
932 device_printf(dev, "Link is Down\n");
933 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
934 adapter->link_active = FALSE;
939 ixv_update_stats(adapter);
940 } /* ixv_if_update_admin_status */
943 /************************************************************************
944 * ixv_if_stop - Stop the hardware
946 * Disables all traffic on the adapter by issuing a
947 * global reset on the MAC and deallocates TX/RX buffers.
948 ************************************************************************/
950 ixv_if_stop(if_ctx_t ctx)
952 struct adapter *adapter = iflib_get_softc(ctx);
953 struct ixgbe_hw *hw = &adapter->hw;
955 INIT_DEBUGOUT("ixv_stop: begin\n");
957 ixv_if_disable_intr(ctx);
959 hw->mac.ops.reset_hw(hw);
960 adapter->hw.adapter_stopped = FALSE;
961 hw->mac.ops.stop_adapter(hw);
963 /* Update the stack */
964 adapter->link_up = FALSE;
965 ixv_if_update_admin_status(ctx);
967 /* reprogram the RAR[0] in case user changed it. */
968 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
972 /************************************************************************
973 * ixv_identify_hardware - Determine hardware revision.
974 ************************************************************************/
976 ixv_identify_hardware(if_ctx_t ctx)
978 struct adapter *adapter = iflib_get_softc(ctx);
979 device_t dev = iflib_get_dev(ctx);
980 struct ixgbe_hw *hw = &adapter->hw;
982 /* Save off the information about this board */
983 hw->vendor_id = pci_get_vendor(dev);
984 hw->device_id = pci_get_device(dev);
985 hw->revision_id = pci_get_revid(dev);
986 hw->subsystem_vendor_id = pci_get_subvendor(dev);
987 hw->subsystem_device_id = pci_get_subdevice(dev);
989 /* A subset of set_mac_type */
990 switch (hw->device_id) {
991 case IXGBE_DEV_ID_82599_VF:
992 hw->mac.type = ixgbe_mac_82599_vf;
994 case IXGBE_DEV_ID_X540_VF:
995 hw->mac.type = ixgbe_mac_X540_vf;
997 case IXGBE_DEV_ID_X550_VF:
998 hw->mac.type = ixgbe_mac_X550_vf;
1000 case IXGBE_DEV_ID_X550EM_X_VF:
1001 hw->mac.type = ixgbe_mac_X550EM_x_vf;
1003 case IXGBE_DEV_ID_X550EM_A_VF:
1004 hw->mac.type = ixgbe_mac_X550EM_a_vf;
1007 device_printf(dev, "unknown mac type\n");
1008 hw->mac.type = ixgbe_mac_unknown;
1011 } /* ixv_identify_hardware */
1013 /************************************************************************
1014 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1015 ************************************************************************/
1017 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1019 struct adapter *adapter = iflib_get_softc(ctx);
1020 device_t dev = iflib_get_dev(ctx);
1021 struct ix_rx_queue *rx_que = adapter->rx_queues;
1022 struct ix_tx_queue *tx_que;
1023 int error, rid, vector = 0;
1026 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1029 snprintf(buf, sizeof(buf), "rxq%d", i);
1030 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1031 IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1034 device_printf(iflib_get_dev(ctx),
1035 "Failed to allocate que int %d err: %d", i, error);
1036 adapter->num_rx_queues = i + 1;
1040 rx_que->msix = vector;
1041 adapter->active_queues |= (u64)(1 << rx_que->msix);
1045 for (int i = 0; i < adapter->num_tx_queues; i++) {
1046 snprintf(buf, sizeof(buf), "txq%d", i);
1047 tx_que = &adapter->tx_queues[i];
1048 tx_que->msix = i % adapter->num_rx_queues;
1049 iflib_softirq_alloc_generic(ctx,
1050 &adapter->rx_queues[tx_que->msix].que_irq,
1051 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1054 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1055 IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1057 device_printf(iflib_get_dev(ctx),
1058 "Failed to register admin handler");
1062 adapter->vector = vector;
1064 * Due to a broken design QEMU will fail to properly
1065 * enable the guest for MSIX unless the vectors in
1066 * the table are all set up, so we must rewrite the
1067 * ENABLE in the MSIX control register again at this
1068 * point to cause it to successfully initialize us.
1070 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1072 pci_find_cap(dev, PCIY_MSIX, &rid);
1073 rid += PCIR_MSIX_CTRL;
1074 msix_ctrl = pci_read_config(dev, rid, 2);
1075 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1076 pci_write_config(dev, rid, msix_ctrl, 2);
1082 iflib_irq_free(ctx, &adapter->irq);
1083 rx_que = adapter->rx_queues;
1084 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1085 iflib_irq_free(ctx, &rx_que->que_irq);
1088 } /* ixv_if_msix_intr_assign */
1090 /************************************************************************
1091 * ixv_allocate_pci_resources
1092 ************************************************************************/
1094 ixv_allocate_pci_resources(if_ctx_t ctx)
1096 struct adapter *adapter = iflib_get_softc(ctx);
1097 device_t dev = iflib_get_dev(ctx);
1101 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1104 if (!(adapter->pci_mem)) {
1105 device_printf(dev, "Unable to allocate bus resource: memory\n");
1109 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1110 adapter->osdep.mem_bus_space_handle =
1111 rman_get_bushandle(adapter->pci_mem);
1112 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1115 } /* ixv_allocate_pci_resources */
1117 /************************************************************************
1118 * ixv_free_pci_resources
1119 ************************************************************************/
1121 ixv_free_pci_resources(if_ctx_t ctx)
1123 struct adapter *adapter = iflib_get_softc(ctx);
1124 struct ix_rx_queue *que = adapter->rx_queues;
1125 device_t dev = iflib_get_dev(ctx);
1127 /* Release all MSI-X queue resources */
1128 if (adapter->intr_type == IFLIB_INTR_MSIX)
1129 iflib_irq_free(ctx, &adapter->irq);
1132 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1133 iflib_irq_free(ctx, &que->que_irq);
1137 if (adapter->pci_mem != NULL)
1138 bus_release_resource(dev, SYS_RES_MEMORY,
1139 rman_get_rid(adapter->pci_mem), adapter->pci_mem);
1140 } /* ixv_free_pci_resources */
1142 /************************************************************************
1143 * ixv_setup_interface
1145 * Setup networking device structure and register an interface.
1146 ************************************************************************/
1148 ixv_setup_interface(if_ctx_t ctx)
1150 struct adapter *adapter = iflib_get_softc(ctx);
1151 if_softc_ctx_t scctx = adapter->shared;
1152 struct ifnet *ifp = iflib_get_ifp(ctx);
1154 INIT_DEBUGOUT("ixv_setup_interface: begin");
1156 if_setbaudrate(ifp, IF_Gbps(10));
1157 ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1160 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1161 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1162 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1165 } /* ixv_setup_interface */
1167 /************************************************************************
1168 * ixv_if_get_counter
1169 ************************************************************************/
1171 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1173 struct adapter *adapter = iflib_get_softc(ctx);
1174 if_t ifp = iflib_get_ifp(ctx);
1177 case IFCOUNTER_IPACKETS:
1178 return (adapter->ipackets);
1179 case IFCOUNTER_OPACKETS:
1180 return (adapter->opackets);
1181 case IFCOUNTER_IBYTES:
1182 return (adapter->ibytes);
1183 case IFCOUNTER_OBYTES:
1184 return (adapter->obytes);
1185 case IFCOUNTER_IMCASTS:
1186 return (adapter->imcasts);
1188 return (if_get_counter_default(ifp, cnt));
1190 } /* ixv_if_get_counter */
1192 /************************************************************************
1193 * ixv_initialize_transmit_units - Enable transmit unit.
1194 ************************************************************************/
1196 ixv_initialize_transmit_units(if_ctx_t ctx)
1198 struct adapter *adapter = iflib_get_softc(ctx);
1199 struct ixgbe_hw *hw = &adapter->hw;
1200 if_softc_ctx_t scctx = adapter->shared;
1201 struct ix_tx_queue *que = adapter->tx_queues;
1204 for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1205 struct tx_ring *txr = &que->txr;
1206 u64 tdba = txr->tx_paddr;
1210 /* Set WTHRESH to 8, burst writeback */
1211 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1212 txdctl |= (8 << 16);
1213 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1215 /* Set the HW Tx Head and Tail indices */
1216 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1217 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1219 /* Set Tx Tail register */
1220 txr->tail = IXGBE_VFTDT(j);
1222 txr->tx_rs_cidx = txr->tx_rs_pidx;
1223 /* Initialize the last processed descriptor to be the end of
1224 * the ring, rather than the start, so that we avoid an
1225 * off-by-one error when calculating how many descriptors are
1226 * done in the credits_update function.
1228 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1229 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1230 txr->tx_rsq[k] = QIDX_INVALID;
1232 /* Set Ring parameters */
1233 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1234 (tdba & 0x00000000ffffffffULL));
1235 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1236 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1237 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1238 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1239 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1240 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1243 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1244 txdctl |= IXGBE_TXDCTL_ENABLE;
1245 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1249 } /* ixv_initialize_transmit_units */
1251 /************************************************************************
1252 * ixv_initialize_rss_mapping
1253 ************************************************************************/
1255 ixv_initialize_rss_mapping(struct adapter *adapter)
1257 struct ixgbe_hw *hw = &adapter->hw;
1258 u32 reta = 0, mrqc, rss_key[10];
1261 u32 rss_hash_config;
1263 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1264 /* Fetch the configured RSS key */
1265 rss_getkey((uint8_t *)&rss_key);
1267 /* set up random bits */
1268 arc4rand(&rss_key, sizeof(rss_key), 0);
1271 /* Now fill out hash function seeds */
1272 for (i = 0; i < 10; i++)
1273 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1275 /* Set up the redirection table */
1276 for (i = 0, j = 0; i < 64; i++, j++) {
1277 if (j == adapter->num_rx_queues)
1280 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1282 * Fetch the RSS bucket id for the given indirection
1283 * entry. Cap it at the number of configured buckets
1284 * (which is num_rx_queues.)
1286 queue_id = rss_get_indirection_to_bucket(i);
1287 queue_id = queue_id % adapter->num_rx_queues;
1292 * The low 8 bits are for hash value (n+0);
1293 * The next 8 bits are for hash value (n+1), etc.
1296 reta |= ((uint32_t)queue_id) << 24;
1298 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1303 /* Perform hash on these packet types */
1304 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1305 rss_hash_config = rss_gethashconfig();
1308 * Disable UDP - IP fragments aren't currently being handled
1309 * and so we end up with a mix of 2-tuple and 4-tuple
1312 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1313 | RSS_HASHTYPE_RSS_TCP_IPV4
1314 | RSS_HASHTYPE_RSS_IPV6
1315 | RSS_HASHTYPE_RSS_TCP_IPV6;
1318 mrqc = IXGBE_MRQC_RSSEN;
1319 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1320 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1321 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1322 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1323 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1324 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1325 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1326 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1327 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1328 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1330 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1331 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1333 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1334 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1335 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1336 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1337 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1338 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1340 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1341 } /* ixv_initialize_rss_mapping */
1344 /************************************************************************
1345 * ixv_initialize_receive_units - Setup receive registers and features.
1346 ************************************************************************/
1348 ixv_initialize_receive_units(if_ctx_t ctx)
1350 struct adapter *adapter = iflib_get_softc(ctx);
1351 if_softc_ctx_t scctx;
1352 struct ixgbe_hw *hw = &adapter->hw;
1353 struct ifnet *ifp = iflib_get_ifp(ctx);
1354 struct ix_rx_queue *que = adapter->rx_queues;
1357 if (ifp->if_mtu > ETHERMTU)
1358 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1360 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1362 psrtype = IXGBE_PSRTYPE_TCPHDR
1363 | IXGBE_PSRTYPE_UDPHDR
1364 | IXGBE_PSRTYPE_IPV4HDR
1365 | IXGBE_PSRTYPE_IPV6HDR
1366 | IXGBE_PSRTYPE_L2HDR;
1368 if (adapter->num_rx_queues > 1)
1371 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1373 /* Tell PF our max_frame size */
1374 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1375 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1377 scctx = adapter->shared;
1379 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1380 struct rx_ring *rxr = &que->rxr;
1381 u64 rdba = rxr->rx_paddr;
1385 /* Disable the queue */
1386 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1387 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1388 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1389 for (int k = 0; k < 10; k++) {
1390 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1391 IXGBE_RXDCTL_ENABLE)
1397 /* Setup the Base and Length of the Rx Descriptor Ring */
1398 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1399 (rdba & 0x00000000ffffffffULL));
1400 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1401 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1402 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1404 /* Reset the ring indices */
1405 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1406 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1408 /* Set up the SRRCTL register */
1409 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1410 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1411 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1413 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1414 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1416 /* Capture Rx Tail index */
1417 rxr->tail = IXGBE_VFRDT(rxr->me);
1419 /* Do the queue enabling last */
1420 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1421 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1422 for (int l = 0; l < 10; l++) {
1423 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1424 IXGBE_RXDCTL_ENABLE)
1430 /* Set the Tail Pointer */
1433 * In netmap mode, we must preserve the buffers made
1434 * available to userspace before the if_init()
1435 * (this is true by default on the TX side, because
1436 * init makes all buffers available to userspace).
1438 * netmap_reset() and the device specific routines
1439 * (e.g. ixgbe_setup_receive_rings()) map these
1440 * buffers at the end of the NIC ring, so here we
1441 * must set the RDT (tail) register to make sure
1442 * they are not overwritten.
1444 * In this driver the NIC ring starts at RDH = 0,
1445 * RDT points to the last slot available for reception (?),
1446 * so RDT = num_rx_desc - 1 means the whole ring is available.
1448 if (ifp->if_capenable & IFCAP_NETMAP) {
1449 struct netmap_adapter *na = NA(ifp);
1450 struct netmap_kring *kring = na->rx_rings[j];
1451 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1453 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1455 #endif /* DEV_NETMAP */
1456 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1457 scctx->isc_nrxd[0] - 1);
1460 ixv_initialize_rss_mapping(adapter);
1461 } /* ixv_initialize_receive_units */
1463 /************************************************************************
1464 * ixv_setup_vlan_support
1465 ************************************************************************/
1467 ixv_setup_vlan_support(if_ctx_t ctx)
1469 struct ifnet *ifp = iflib_get_ifp(ctx);
1470 struct adapter *adapter = iflib_get_softc(ctx);
1471 struct ixgbe_hw *hw = &adapter->hw;
1472 u32 ctrl, vid, vfta, retry;
1475 * We get here thru if_init, meaning
1476 * a soft reset, this has already cleared
1477 * the VFTA and other state, so if there
1478 * have been no vlan's registered do nothing.
1480 if (adapter->num_vlans == 0)
1483 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1484 /* Enable the queues */
1485 for (int i = 0; i < adapter->num_rx_queues; i++) {
1486 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1487 ctrl |= IXGBE_RXDCTL_VME;
1488 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1490 * Let Rx path know that it needs to store VLAN tag
1491 * as part of extra mbuf info.
1493 adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1498 * If filtering VLAN tags is disabled,
1499 * there is no need to fill VLAN Filter Table Array (VFTA).
1501 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1505 * A soft reset zero's out the VFTA, so
1506 * we need to repopulate it now.
1508 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1509 if (ixv_shadow_vfta[i] == 0)
1511 vfta = ixv_shadow_vfta[i];
1513 * Reconstruct the vlan id's
1514 * based on the bits set in each
1515 * of the array ints.
1517 for (int j = 0; j < 32; j++) {
1519 if ((vfta & (1 << j)) == 0)
1522 /* Call the shared code mailbox routine */
1523 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1529 } /* ixv_setup_vlan_support */
1531 /************************************************************************
1532 * ixv_if_register_vlan
1534 * Run via a vlan config EVENT, it enables us to use the
1535 * HW Filter table since we can get the vlan id. This just
1536 * creates the entry in the soft version of the VFTA, init
1537 * will repopulate the real table.
1538 ************************************************************************/
1540 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1542 struct adapter *adapter = iflib_get_softc(ctx);
1545 index = (vtag >> 5) & 0x7F;
1547 ixv_shadow_vfta[index] |= (1 << bit);
1548 ++adapter->num_vlans;
1549 } /* ixv_if_register_vlan */
1551 /************************************************************************
1552 * ixv_if_unregister_vlan
1554 * Run via a vlan unconfig EVENT, remove our entry
1556 ************************************************************************/
1558 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1560 struct adapter *adapter = iflib_get_softc(ctx);
1563 index = (vtag >> 5) & 0x7F;
1565 ixv_shadow_vfta[index] &= ~(1 << bit);
1566 --adapter->num_vlans;
1567 } /* ixv_if_unregister_vlan */
1569 /************************************************************************
1570 * ixv_if_enable_intr
1571 ************************************************************************/
1573 ixv_if_enable_intr(if_ctx_t ctx)
1575 struct adapter *adapter = iflib_get_softc(ctx);
1576 struct ixgbe_hw *hw = &adapter->hw;
1577 struct ix_rx_queue *que = adapter->rx_queues;
1578 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1580 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1582 mask = IXGBE_EIMS_ENABLE_MASK;
1583 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1584 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1586 for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1587 ixv_enable_queue(adapter, que->msix);
1589 IXGBE_WRITE_FLUSH(hw);
1590 } /* ixv_if_enable_intr */
1592 /************************************************************************
1593 * ixv_if_disable_intr
1594 ************************************************************************/
1596 ixv_if_disable_intr(if_ctx_t ctx)
1598 struct adapter *adapter = iflib_get_softc(ctx);
1599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1600 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1601 IXGBE_WRITE_FLUSH(&adapter->hw);
1602 } /* ixv_if_disable_intr */
1604 /************************************************************************
1605 * ixv_if_rx_queue_intr_enable
1606 ************************************************************************/
1608 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1610 struct adapter *adapter = iflib_get_softc(ctx);
1611 struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1613 ixv_enable_queue(adapter, que->rxr.me);
1616 } /* ixv_if_rx_queue_intr_enable */
1618 /************************************************************************
1621 * Setup the correct IVAR register for a particular MSI-X interrupt
1622 * - entry is the register array entry
1623 * - vector is the MSI-X vector for this queue
1624 * - type is RX/TX/MISC
1625 ************************************************************************/
1627 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1629 struct ixgbe_hw *hw = &adapter->hw;
1632 vector |= IXGBE_IVAR_ALLOC_VAL;
1634 if (type == -1) { /* MISC IVAR */
1635 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1638 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1639 } else { /* RX/TX IVARS */
1640 index = (16 * (entry & 1)) + (8 * type);
1641 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1642 ivar &= ~(0xFF << index);
1643 ivar |= (vector << index);
1644 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1646 } /* ixv_set_ivar */
1648 /************************************************************************
1649 * ixv_configure_ivars
1650 ************************************************************************/
1652 ixv_configure_ivars(struct adapter *adapter)
1654 struct ix_rx_queue *que = adapter->rx_queues;
1656 MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1658 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1659 /* First the RX queue entry */
1660 ixv_set_ivar(adapter, i, que->msix, 0);
1661 /* ... and the TX */
1662 ixv_set_ivar(adapter, i, que->msix, 1);
1663 /* Set an initial value in EITR */
1664 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1665 IXGBE_EITR_DEFAULT);
1668 /* For the mailbox interrupt */
1669 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1670 } /* ixv_configure_ivars */
1672 /************************************************************************
1675 * The VF stats registers never have a truly virgin
1676 * starting point, so this routine tries to make an
1677 * artificial one, marking ground zero on attach as
1679 ************************************************************************/
1681 ixv_save_stats(struct adapter *adapter)
1683 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1684 adapter->stats.vf.saved_reset_vfgprc +=
1685 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1686 adapter->stats.vf.saved_reset_vfgptc +=
1687 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1688 adapter->stats.vf.saved_reset_vfgorc +=
1689 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1690 adapter->stats.vf.saved_reset_vfgotc +=
1691 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1692 adapter->stats.vf.saved_reset_vfmprc +=
1693 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1695 } /* ixv_save_stats */
1697 /************************************************************************
1699 ************************************************************************/
1701 ixv_init_stats(struct adapter *adapter)
1703 struct ixgbe_hw *hw = &adapter->hw;
1705 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1706 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1707 adapter->stats.vf.last_vfgorc |=
1708 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1710 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1711 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1712 adapter->stats.vf.last_vfgotc |=
1713 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1715 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1717 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1718 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1719 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1720 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1721 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1722 } /* ixv_init_stats */
1724 #define UPDATE_STAT_32(reg, last, count) \
1726 u32 current = IXGBE_READ_REG(hw, reg); \
1727 if (current < last) \
1728 count += 0x100000000LL; \
1730 count &= 0xFFFFFFFF00000000LL; \
1734 #define UPDATE_STAT_36(lsb, msb, last, count) \
1736 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1737 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1738 u64 current = ((cur_msb << 32) | cur_lsb); \
1739 if (current < last) \
1740 count += 0x1000000000LL; \
1742 count &= 0xFFFFFFF000000000LL; \
1746 /************************************************************************
1747 * ixv_update_stats - Update the board statistics counters.
1748 ************************************************************************/
1750 ixv_update_stats(struct adapter *adapter)
1752 struct ixgbe_hw *hw = &adapter->hw;
1753 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1755 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1756 adapter->stats.vf.vfgprc);
1757 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1758 adapter->stats.vf.vfgptc);
1759 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1760 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1761 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1762 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1763 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1764 adapter->stats.vf.vfmprc);
1766 /* Fill out the OS statistics structure */
1767 IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1768 IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1769 IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1770 IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1771 IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1772 } /* ixv_update_stats */
1774 /************************************************************************
1775 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1776 ************************************************************************/
1778 ixv_add_stats_sysctls(struct adapter *adapter)
1780 device_t dev = adapter->dev;
1781 struct ix_tx_queue *tx_que = adapter->tx_queues;
1782 struct ix_rx_queue *rx_que = adapter->rx_queues;
1783 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1784 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1785 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1786 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1787 struct sysctl_oid *stat_node, *queue_node;
1788 struct sysctl_oid_list *stat_list, *queue_list;
1790 #define QUEUE_NAME_LEN 32
1791 char namebuf[QUEUE_NAME_LEN];
1793 /* Driver Statistics */
1794 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1795 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1796 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1797 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1799 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1800 struct tx_ring *txr = &tx_que->txr;
1801 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1802 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1803 CTLFLAG_RD, NULL, "Queue Name");
1804 queue_list = SYSCTL_CHILDREN(queue_node);
1806 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1807 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1808 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1809 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1812 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1813 struct rx_ring *rxr = &rx_que->rxr;
1814 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1815 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1816 CTLFLAG_RD, NULL, "Queue Name");
1817 queue_list = SYSCTL_CHILDREN(queue_node);
1819 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1820 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1821 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1822 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1823 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1824 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1825 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1826 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1829 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1830 CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1831 stat_list = SYSCTL_CHILDREN(stat_node);
1833 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1834 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1835 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1836 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1837 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1838 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1839 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1840 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1841 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1842 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1843 } /* ixv_add_stats_sysctls */
1845 /************************************************************************
1846 * ixv_print_debug_info
1848 * Called only when em_display_debug_stats is enabled.
1849 * Provides a way to take a look at important statistics
1850 * maintained by the driver and hardware.
1851 ************************************************************************/
1853 ixv_print_debug_info(struct adapter *adapter)
1855 device_t dev = adapter->dev;
1856 struct ixgbe_hw *hw = &adapter->hw;
1858 device_printf(dev, "Error Byte Count = %u \n",
1859 IXGBE_READ_REG(hw, IXGBE_ERRBC));
1861 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1862 } /* ixv_print_debug_info */
1864 /************************************************************************
1866 ************************************************************************/
1868 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1870 struct adapter *adapter;
1874 error = sysctl_handle_int(oidp, &result, 0, req);
1876 if (error || !req->newptr)
1880 adapter = (struct adapter *)arg1;
1881 ixv_print_debug_info(adapter);
1885 } /* ixv_sysctl_debug */
1887 /************************************************************************
1888 * ixv_init_device_features
1889 ************************************************************************/
1891 ixv_init_device_features(struct adapter *adapter)
1893 adapter->feat_cap = IXGBE_FEATURE_NETMAP
1896 | IXGBE_FEATURE_LEGACY_TX;
1898 /* A tad short on feature flags for VFs, atm. */
1899 switch (adapter->hw.mac.type) {
1900 case ixgbe_mac_82599_vf:
1902 case ixgbe_mac_X540_vf:
1904 case ixgbe_mac_X550_vf:
1905 case ixgbe_mac_X550EM_x_vf:
1906 case ixgbe_mac_X550EM_a_vf:
1907 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1913 /* Enabled by default... */
1914 /* Is a virtual function (VF) */
1915 if (adapter->feat_cap & IXGBE_FEATURE_VF)
1916 adapter->feat_en |= IXGBE_FEATURE_VF;
1918 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1919 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1920 /* Receive-Side Scaling (RSS) */
1921 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1922 adapter->feat_en |= IXGBE_FEATURE_RSS;
1923 /* Needs advanced context descriptor regardless of offloads req'd */
1924 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1925 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1926 } /* ixv_init_device_features */