1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
37 #include "opt_inet6.h"
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
45 /************************************************************************
47 ************************************************************************/
48 char ixv_driver_version[] = "2.0.1-k";
50 /************************************************************************
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixv_strings
55 * Last entry must be all 0s
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static pci_vendor_info_t ixv_vendor_info_array[] =
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66 /* required last entry */
70 /************************************************************************
72 ************************************************************************/
73 static void *ixv_register(device_t dev);
74 static int ixv_if_attach_pre(if_ctx_t ctx);
75 static int ixv_if_attach_post(if_ctx_t ctx);
76 static int ixv_if_detach(if_ctx_t ctx);
78 static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
79 static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
80 static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static void ixv_if_queues_free(if_ctx_t ctx);
82 static void ixv_identify_hardware(if_ctx_t ctx);
83 static void ixv_init_device_features(struct adapter *);
84 static int ixv_allocate_pci_resources(if_ctx_t ctx);
85 static void ixv_free_pci_resources(if_ctx_t ctx);
86 static int ixv_setup_interface(if_ctx_t ctx);
87 static void ixv_if_media_status(if_ctx_t , struct ifmediareq *);
88 static int ixv_if_media_change(if_ctx_t ctx);
89 static void ixv_if_update_admin_status(if_ctx_t ctx);
90 static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
92 static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
93 static void ixv_if_init(if_ctx_t ctx);
94 static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
95 static void ixv_if_stop(if_ctx_t ctx);
96 static int ixv_negotiate_api(struct adapter *);
98 static void ixv_initialize_transmit_units(if_ctx_t ctx);
99 static void ixv_initialize_receive_units(if_ctx_t ctx);
100 static void ixv_initialize_rss_mapping(struct adapter *);
102 static void ixv_setup_vlan_support(if_ctx_t ctx);
103 static void ixv_configure_ivars(struct adapter *);
104 static void ixv_if_enable_intr(if_ctx_t ctx);
105 static void ixv_if_disable_intr(if_ctx_t ctx);
106 static void ixv_if_multi_set(if_ctx_t ctx);
108 static void ixv_if_register_vlan(if_ctx_t, u16);
109 static void ixv_if_unregister_vlan(if_ctx_t, u16);
111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
113 static void ixv_save_stats(struct adapter *);
114 static void ixv_init_stats(struct adapter *);
115 static void ixv_update_stats(struct adapter *);
116 static void ixv_add_stats_sysctls(struct adapter *adapter);
118 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
119 static void ixv_set_ivar(struct adapter *, u8, u8, s8);
121 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
123 /* The MSI-X Interrupt handlers */
124 static int ixv_msix_que(void *);
125 static int ixv_msix_mbx(void *);
127 /************************************************************************
128 * FreeBSD Device Interface Entry Points
129 ************************************************************************/
130 static device_method_t ixv_methods[] = {
131 /* Device interface */
132 DEVMETHOD(device_register, ixv_register),
133 DEVMETHOD(device_probe, iflib_device_probe),
134 DEVMETHOD(device_attach, iflib_device_attach),
135 DEVMETHOD(device_detach, iflib_device_detach),
136 DEVMETHOD(device_shutdown, iflib_device_shutdown),
140 static driver_t ixv_driver = {
141 "ixv", ixv_methods, sizeof(struct adapter),
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
147 MODULE_DEPEND(ixv, pci, 1, 1, 1);
148 MODULE_DEPEND(ixv, ether, 1, 1, 1);
150 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
151 #endif /* DEV_NETMAP */
153 static device_method_t ixv_if_methods[] = {
154 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
155 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
156 DEVMETHOD(ifdi_detach, ixv_if_detach),
157 DEVMETHOD(ifdi_init, ixv_if_init),
158 DEVMETHOD(ifdi_stop, ixv_if_stop),
159 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
160 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
161 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
162 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
165 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
166 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
167 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
168 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
169 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
170 DEVMETHOD(ifdi_media_status, ixv_if_media_status),
171 DEVMETHOD(ifdi_media_change, ixv_if_media_change),
172 DEVMETHOD(ifdi_timer, ixv_if_local_timer),
173 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
174 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
175 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
179 static driver_t ixv_if_driver = {
180 "ixv_if", ixv_if_methods, sizeof(struct adapter)
184 * TUNEABLE PARAMETERS:
187 /* Flow control setting, default to full */
188 static int ixv_flow_control = ixgbe_fc_full;
189 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
192 * Header split: this causes the hardware to DMA
193 * the header into a separate mbuf from the payload,
194 * it can be a performance win in some workloads, but
195 * in others it actually hurts, its off by default.
197 static int ixv_header_split = FALSE;
198 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
201 * Shadow VFTA table, this is needed because
202 * the real filter table gets cleared during
203 * a soft reset and we need to repopulate it.
205 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
206 extern struct if_txrx ixgbe_txrx;
208 static struct if_shared_ctx ixv_sctx_init = {
209 .isc_magic = IFLIB_MAGIC,
210 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
211 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
212 .isc_tx_maxsegsize = PAGE_SIZE,
213 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
214 .isc_tso_maxsegsize = PAGE_SIZE,
215 .isc_rx_maxsize = MJUM16BYTES,
216 .isc_rx_nsegments = 1,
217 .isc_rx_maxsegsize = MJUM16BYTES,
221 .isc_admin_intrcnt = 1,
222 .isc_vendor_info = ixv_vendor_info_array,
223 .isc_driver_version = ixv_driver_version,
224 .isc_driver = &ixv_if_driver,
226 .isc_nrxd_min = {MIN_RXD},
227 .isc_ntxd_min = {MIN_TXD},
228 .isc_nrxd_max = {MAX_RXD},
229 .isc_ntxd_max = {MAX_TXD},
230 .isc_nrxd_default = {DEFAULT_RXD},
231 .isc_ntxd_default = {DEFAULT_TXD},
234 if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
237 ixv_register(device_t dev)
242 /************************************************************************
243 * ixv_if_tx_queues_alloc
244 ************************************************************************/
246 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
247 int ntxqs, int ntxqsets)
249 struct adapter *adapter = iflib_get_softc(ctx);
250 if_softc_ctx_t scctx = adapter->shared;
251 struct ix_tx_queue *que;
254 MPASS(adapter->num_tx_queues == ntxqsets);
257 /* Allocate queue structure memory */
259 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
260 M_DEVBUF, M_NOWAIT | M_ZERO);
261 if (!adapter->tx_queues) {
262 device_printf(iflib_get_dev(ctx),
263 "Unable to allocate TX ring memory\n");
267 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
268 struct tx_ring *txr = &que->txr;
271 txr->adapter = que->adapter = adapter;
272 adapter->active_queues |= (u64)1 << txr->me;
274 /* Allocate report status array */
275 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
279 for (j = 0; j < scctx->isc_ntxd[0]; j++)
280 txr->tx_rsq[j] = QIDX_INVALID;
281 /* get the virtual and physical address of the hardware queues */
282 txr->tail = IXGBE_VFTDT(txr->me);
283 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
284 txr->tx_paddr = paddrs[i*ntxqs];
287 txr->total_packets = 0;
291 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
292 adapter->num_tx_queues);
297 ixv_if_queues_free(ctx);
300 } /* ixv_if_tx_queues_alloc */
302 /************************************************************************
303 * ixv_if_rx_queues_alloc
304 ************************************************************************/
306 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
307 int nrxqs, int nrxqsets)
309 struct adapter *adapter = iflib_get_softc(ctx);
310 struct ix_rx_queue *que;
313 MPASS(adapter->num_rx_queues == nrxqsets);
316 /* Allocate queue structure memory */
318 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
319 M_DEVBUF, M_NOWAIT | M_ZERO);
320 if (!adapter->rx_queues) {
321 device_printf(iflib_get_dev(ctx),
322 "Unable to allocate TX ring memory\n");
327 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
328 struct rx_ring *rxr = &que->rxr;
330 rxr->adapter = que->adapter = adapter;
333 /* get the virtual and physical address of the hw queues */
334 rxr->tail = IXGBE_VFRDT(rxr->me);
335 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
336 rxr->rx_paddr = paddrs[i*nrxqs];
341 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
342 adapter->num_rx_queues);
347 ixv_if_queues_free(ctx);
350 } /* ixv_if_rx_queues_alloc */
352 /************************************************************************
354 ************************************************************************/
356 ixv_if_queues_free(if_ctx_t ctx)
358 struct adapter *adapter = iflib_get_softc(ctx);
359 struct ix_tx_queue *que = adapter->tx_queues;
365 for (i = 0; i < adapter->num_tx_queues; i++, que++) {
366 struct tx_ring *txr = &que->txr;
367 if (txr->tx_rsq == NULL)
370 free(txr->tx_rsq, M_DEVBUF);
373 if (adapter->tx_queues != NULL)
374 free(adapter->tx_queues, M_DEVBUF);
376 if (adapter->rx_queues != NULL)
377 free(adapter->rx_queues, M_DEVBUF);
378 adapter->tx_queues = NULL;
379 adapter->rx_queues = NULL;
380 } /* ixv_if_queues_free */
382 /************************************************************************
383 * ixv_if_attach_pre - Device initialization routine
385 * Called when the driver is being loaded.
386 * Identifies the type of hardware, allocates all resources
387 * and initializes the hardware.
389 * return 0 on success, positive on failure
390 ************************************************************************/
392 ixv_if_attach_pre(if_ctx_t ctx)
394 struct adapter *adapter;
396 if_softc_ctx_t scctx;
400 INIT_DEBUGOUT("ixv_attach: begin");
402 /* Allocate, clear, and link in our adapter structure */
403 dev = iflib_get_dev(ctx);
404 adapter = iflib_get_softc(ctx);
407 adapter->hw.back = adapter;
408 scctx = adapter->shared = iflib_get_softc_ctx(ctx);
409 adapter->media = iflib_get_media(ctx);
412 /* Do base PCI setup - map BAR0 */
413 if (ixv_allocate_pci_resources(ctx)) {
414 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
420 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
421 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
422 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
425 /* Determine hardware revision */
426 ixv_identify_hardware(ctx);
427 ixv_init_device_features(adapter);
429 /* Initialize the shared code */
430 error = ixgbe_init_ops_vf(hw);
432 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
437 /* Setup the mailbox */
438 ixgbe_init_mbx_params_vf(hw);
440 error = hw->mac.ops.reset_hw(hw);
441 if (error == IXGBE_ERR_RESET_FAILED)
442 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
444 device_printf(dev, "...reset_hw() failed with error %d\n",
451 error = hw->mac.ops.init_hw(hw);
453 device_printf(dev, "...init_hw() failed with error %d\n",
459 /* Negotiate mailbox API version */
460 error = ixv_negotiate_api(adapter);
463 "Mailbox API negotiation failed during attach!\n");
467 /* If no mac address was assigned, make a random one */
468 if (!ixv_check_ether_addr(hw->mac.addr)) {
469 u8 addr[ETHER_ADDR_LEN];
470 arc4rand(&addr, sizeof(addr), 0);
473 bcopy(addr, hw->mac.addr, sizeof(addr));
474 bcopy(addr, hw->mac.perm_addr, sizeof(addr));
477 /* Most of the iflib initialization... */
479 iflib_set_mac(ctx, hw->mac.addr);
480 switch (adapter->hw.mac.type) {
481 case ixgbe_mac_X550_vf:
482 case ixgbe_mac_X550EM_x_vf:
483 case ixgbe_mac_X550EM_a_vf:
484 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
487 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
489 scctx->isc_txqsizes[0] =
490 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
491 sizeof(u32), DBA_ALIGN);
492 scctx->isc_rxqsizes[0] =
493 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
496 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
497 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
498 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
499 scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
500 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
501 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
502 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
504 scctx->isc_txrx = &ixgbe_txrx;
507 * Tell the upper layer(s) we support everything the PF
508 * driver does except...
511 scctx->isc_capabilities = IXGBE_CAPS;
512 scctx->isc_capabilities ^= IFCAP_WOL;
513 scctx->isc_capenable = scctx->isc_capabilities;
515 INIT_DEBUGOUT("ixv_if_attach_pre: end");
520 ixv_free_pci_resources(ctx);
523 } /* ixv_if_attach_pre */
526 ixv_if_attach_post(if_ctx_t ctx)
528 struct adapter *adapter = iflib_get_softc(ctx);
529 device_t dev = iflib_get_dev(ctx);
532 /* Setup OS specific network interface */
533 error = ixv_setup_interface(ctx);
535 device_printf(dev, "Interface setup failed: %d\n", error);
539 /* Do the stats setup */
540 ixv_save_stats(adapter);
541 ixv_init_stats(adapter);
542 ixv_add_stats_sysctls(adapter);
546 } /* ixv_if_attach_post */
548 /************************************************************************
549 * ixv_detach - Device removal routine
551 * Called when the driver is being removed.
552 * Stops the adapter and deallocates all the resources
553 * that were allocated for driver operation.
555 * return 0 on success, positive on failure
556 ************************************************************************/
558 ixv_if_detach(if_ctx_t ctx)
560 INIT_DEBUGOUT("ixv_detach: begin");
562 ixv_free_pci_resources(ctx);
565 } /* ixv_if_detach */
567 /************************************************************************
569 ************************************************************************/
571 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
573 struct adapter *adapter = iflib_get_softc(ctx);
574 struct ifnet *ifp = iflib_get_ifp(ctx);
577 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
578 if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
582 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
586 } /* ixv_if_mtu_set */
588 /************************************************************************
589 * ixv_if_init - Init entry point
591 * Used in two ways: It is used by the stack as an init entry
592 * point in network interface structure. It is also used
593 * by the driver as a hw/sw initialization routine to get
594 * to a consistent state.
596 * return 0 on success, positive on failure
597 ************************************************************************/
599 ixv_if_init(if_ctx_t ctx)
601 struct adapter *adapter = iflib_get_softc(ctx);
602 struct ifnet *ifp = iflib_get_ifp(ctx);
603 device_t dev = iflib_get_dev(ctx);
604 struct ixgbe_hw *hw = &adapter->hw;
607 INIT_DEBUGOUT("ixv_if_init: begin");
608 hw->adapter_stopped = FALSE;
609 hw->mac.ops.stop_adapter(hw);
611 /* reprogram the RAR[0] in case user changed it. */
612 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
614 /* Get the latest mac address, User can use a LAA */
615 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
616 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
618 /* Reset VF and renegotiate mailbox API version */
619 hw->mac.ops.reset_hw(hw);
620 hw->mac.ops.start_hw(hw);
621 error = ixv_negotiate_api(adapter);
624 "Mailbox API negotiation failed in if_init!\n");
628 ixv_initialize_transmit_units(ctx);
630 /* Setup Multicast table */
631 ixv_if_multi_set(ctx);
634 * Determine the correct mbuf pool
635 * for doing jumbo/headersplit
637 if (ifp->if_mtu > ETHERMTU)
638 adapter->rx_mbuf_sz = MJUMPAGESIZE;
640 adapter->rx_mbuf_sz = MCLBYTES;
642 /* Configure RX settings */
643 ixv_initialize_receive_units(ctx);
645 /* Set up VLAN offload and filter */
646 ixv_setup_vlan_support(ctx);
648 /* Set up MSI-X routing */
649 ixv_configure_ivars(adapter);
651 /* Set up auto-mask */
652 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
654 /* Set moderation on the Link interrupt */
655 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
658 ixv_init_stats(adapter);
660 /* Config/Enable Link */
661 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
664 /* And now turn on interrupts */
665 ixv_if_enable_intr(ctx);
670 /************************************************************************
672 ************************************************************************/
674 ixv_enable_queue(struct adapter *adapter, u32 vector)
676 struct ixgbe_hw *hw = &adapter->hw;
677 u32 queue = 1 << vector;
680 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
681 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
682 } /* ixv_enable_queue */
684 /************************************************************************
686 ************************************************************************/
688 ixv_disable_queue(struct adapter *adapter, u32 vector)
690 struct ixgbe_hw *hw = &adapter->hw;
691 u64 queue = (u64)(1 << vector);
694 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
695 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
696 } /* ixv_disable_queue */
699 /************************************************************************
700 * ixv_msix_que - MSI-X Queue Interrupt Service routine
701 ************************************************************************/
703 ixv_msix_que(void *arg)
705 struct ix_rx_queue *que = arg;
706 struct adapter *adapter = que->adapter;
708 ixv_disable_queue(adapter, que->msix);
711 return (FILTER_SCHEDULE_THREAD);
714 /************************************************************************
716 ************************************************************************/
718 ixv_msix_mbx(void *arg)
720 struct adapter *adapter = arg;
721 struct ixgbe_hw *hw = &adapter->hw;
726 /* First get the cause */
727 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
728 /* Clear interrupt with write */
729 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
731 /* Link status change */
732 if (reg & IXGBE_EICR_LSC)
733 iflib_admin_intr_deferred(adapter->ctx);
735 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
737 return (FILTER_HANDLED);
740 /************************************************************************
741 * ixv_media_status - Media Ioctl callback
743 * Called whenever the user queries the status of
744 * the interface using ifconfig.
745 ************************************************************************/
747 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
749 struct adapter *adapter = iflib_get_softc(ctx);
751 INIT_DEBUGOUT("ixv_media_status: begin");
753 iflib_admin_intr_deferred(ctx);
755 ifmr->ifm_status = IFM_AVALID;
756 ifmr->ifm_active = IFM_ETHER;
758 if (!adapter->link_active)
761 ifmr->ifm_status |= IFM_ACTIVE;
763 switch (adapter->link_speed) {
764 case IXGBE_LINK_SPEED_1GB_FULL:
765 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
767 case IXGBE_LINK_SPEED_10GB_FULL:
768 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
770 case IXGBE_LINK_SPEED_100_FULL:
771 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
773 case IXGBE_LINK_SPEED_10_FULL:
774 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
777 } /* ixv_if_media_status */
779 /************************************************************************
780 * ixv_if_media_change - Media Ioctl callback
782 * Called when the user changes speed/duplex using
783 * media/mediopt option with ifconfig.
784 ************************************************************************/
786 ixv_if_media_change(if_ctx_t ctx)
788 struct adapter *adapter = iflib_get_softc(ctx);
789 struct ifmedia *ifm = iflib_get_media(ctx);
791 INIT_DEBUGOUT("ixv_media_change: begin");
793 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
796 switch (IFM_SUBTYPE(ifm->ifm_media)) {
800 device_printf(adapter->dev, "Only auto media type\n");
805 } /* ixv_if_media_change */
808 /************************************************************************
811 * Negotiate the Mailbox API with the PF;
812 * start with the most featured API first.
813 ************************************************************************/
815 ixv_negotiate_api(struct adapter *adapter)
817 struct ixgbe_hw *hw = &adapter->hw;
818 int mbx_api[] = { ixgbe_mbox_api_11,
820 ixgbe_mbox_api_unknown };
823 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
824 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
830 } /* ixv_negotiate_api */
833 /************************************************************************
834 * ixv_if_multi_set - Multicast Update
836 * Called whenever multicast address list is updated.
837 ************************************************************************/
839 ixv_if_multi_set(if_ctx_t ctx)
841 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
842 struct adapter *adapter = iflib_get_softc(ctx);
844 struct ifmultiaddr *ifma;
845 if_t ifp = iflib_get_ifp(ctx);
848 IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
850 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
851 if (ifma->ifma_addr->sa_family != AF_LINK)
853 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
854 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
855 IXGBE_ETH_LENGTH_OF_ADDRESS);
861 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
862 ixv_mc_array_itr, TRUE);
863 } /* ixv_if_multi_set */
865 /************************************************************************
868 * An iterator function needed by the multicast shared code.
869 * It feeds the shared code routine the addresses in the
870 * array of ixv_set_multi() one by one.
871 ************************************************************************/
873 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
875 u8 *addr = *update_ptr;
880 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
881 *update_ptr = newptr;
884 } /* ixv_mc_array_itr */
886 /************************************************************************
887 * ixv_if_local_timer - Timer routine
889 * Checks for link status, updates statistics,
890 * and runs the watchdog check.
891 ************************************************************************/
893 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
898 /* Fire off the adminq task */
899 iflib_admin_intr_deferred(ctx);
900 } /* ixv_if_local_timer */
902 /************************************************************************
903 * ixv_if_update_admin_status - Update OS on link state
905 * Note: Only updates the OS on the cached link state.
906 * The real check of the hardware only happens with
908 ************************************************************************/
910 ixv_if_update_admin_status(if_ctx_t ctx)
912 struct adapter *adapter = iflib_get_softc(ctx);
913 device_t dev = iflib_get_dev(ctx);
916 adapter->hw.mac.get_link_status = TRUE;
918 status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
919 &adapter->link_up, FALSE);
921 if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
922 /* Mailbox's Clear To Send status is lost or timeout occurred.
923 * We need reinitialization. */
924 iflib_get_ifp(ctx)->if_init(ctx);
927 if (adapter->link_up) {
928 if (adapter->link_active == FALSE) {
930 device_printf(dev, "Link is up %d Gbps %s \n",
931 ((adapter->link_speed == 128) ? 10 : 1),
933 adapter->link_active = TRUE;
934 iflib_link_state_change(ctx, LINK_STATE_UP,
937 } else { /* Link down */
938 if (adapter->link_active == TRUE) {
940 device_printf(dev, "Link is Down\n");
941 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
942 adapter->link_active = FALSE;
947 ixv_update_stats(adapter);
948 } /* ixv_if_update_admin_status */
951 /************************************************************************
952 * ixv_if_stop - Stop the hardware
954 * Disables all traffic on the adapter by issuing a
955 * global reset on the MAC and deallocates TX/RX buffers.
956 ************************************************************************/
958 ixv_if_stop(if_ctx_t ctx)
960 struct adapter *adapter = iflib_get_softc(ctx);
961 struct ixgbe_hw *hw = &adapter->hw;
963 INIT_DEBUGOUT("ixv_stop: begin\n");
965 ixv_if_disable_intr(ctx);
967 hw->mac.ops.reset_hw(hw);
968 adapter->hw.adapter_stopped = FALSE;
969 hw->mac.ops.stop_adapter(hw);
971 /* Update the stack */
972 adapter->link_up = FALSE;
973 ixv_if_update_admin_status(ctx);
975 /* reprogram the RAR[0] in case user changed it. */
976 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
980 /************************************************************************
981 * ixv_identify_hardware - Determine hardware revision.
982 ************************************************************************/
984 ixv_identify_hardware(if_ctx_t ctx)
986 struct adapter *adapter = iflib_get_softc(ctx);
987 device_t dev = iflib_get_dev(ctx);
988 struct ixgbe_hw *hw = &adapter->hw;
990 /* Save off the information about this board */
991 hw->vendor_id = pci_get_vendor(dev);
992 hw->device_id = pci_get_device(dev);
993 hw->revision_id = pci_get_revid(dev);
994 hw->subsystem_vendor_id = pci_get_subvendor(dev);
995 hw->subsystem_device_id = pci_get_subdevice(dev);
997 /* A subset of set_mac_type */
998 switch (hw->device_id) {
999 case IXGBE_DEV_ID_82599_VF:
1000 hw->mac.type = ixgbe_mac_82599_vf;
1002 case IXGBE_DEV_ID_X540_VF:
1003 hw->mac.type = ixgbe_mac_X540_vf;
1005 case IXGBE_DEV_ID_X550_VF:
1006 hw->mac.type = ixgbe_mac_X550_vf;
1008 case IXGBE_DEV_ID_X550EM_X_VF:
1009 hw->mac.type = ixgbe_mac_X550EM_x_vf;
1011 case IXGBE_DEV_ID_X550EM_A_VF:
1012 hw->mac.type = ixgbe_mac_X550EM_a_vf;
1015 device_printf(dev, "unknown mac type\n");
1016 hw->mac.type = ixgbe_mac_unknown;
1019 } /* ixv_identify_hardware */
1021 /************************************************************************
1022 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1023 ************************************************************************/
1025 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1027 struct adapter *adapter = iflib_get_softc(ctx);
1028 device_t dev = iflib_get_dev(ctx);
1029 struct ix_rx_queue *rx_que = adapter->rx_queues;
1030 struct ix_tx_queue *tx_que;
1031 int error, rid, vector = 0;
1034 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1037 snprintf(buf, sizeof(buf), "rxq%d", i);
1038 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1039 IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1042 device_printf(iflib_get_dev(ctx),
1043 "Failed to allocate que int %d err: %d", i, error);
1044 adapter->num_rx_queues = i + 1;
1048 rx_que->msix = vector;
1049 adapter->active_queues |= (u64)(1 << rx_que->msix);
1053 for (int i = 0; i < adapter->num_tx_queues; i++) {
1054 snprintf(buf, sizeof(buf), "txq%d", i);
1055 tx_que = &adapter->tx_queues[i];
1056 tx_que->msix = i % adapter->num_rx_queues;
1057 iflib_softirq_alloc_generic(ctx,
1058 &adapter->rx_queues[tx_que->msix].que_irq,
1059 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1062 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1063 IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1065 device_printf(iflib_get_dev(ctx),
1066 "Failed to register admin handler");
1070 adapter->vector = vector;
1072 * Due to a broken design QEMU will fail to properly
1073 * enable the guest for MSIX unless the vectors in
1074 * the table are all set up, so we must rewrite the
1075 * ENABLE in the MSIX control register again at this
1076 * point to cause it to successfully initialize us.
1078 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1080 pci_find_cap(dev, PCIY_MSIX, &rid);
1081 rid += PCIR_MSIX_CTRL;
1082 msix_ctrl = pci_read_config(dev, rid, 2);
1083 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1084 pci_write_config(dev, rid, msix_ctrl, 2);
1090 iflib_irq_free(ctx, &adapter->irq);
1091 rx_que = adapter->rx_queues;
1092 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1093 iflib_irq_free(ctx, &rx_que->que_irq);
1096 } /* ixv_if_msix_intr_assign */
1098 /************************************************************************
1099 * ixv_allocate_pci_resources
1100 ************************************************************************/
1102 ixv_allocate_pci_resources(if_ctx_t ctx)
1104 struct adapter *adapter = iflib_get_softc(ctx);
1105 device_t dev = iflib_get_dev(ctx);
1109 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1112 if (!(adapter->pci_mem)) {
1113 device_printf(dev, "Unable to allocate bus resource: memory\n");
1117 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1118 adapter->osdep.mem_bus_space_handle =
1119 rman_get_bushandle(adapter->pci_mem);
1120 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1123 } /* ixv_allocate_pci_resources */
1125 /************************************************************************
1126 * ixv_free_pci_resources
1127 ************************************************************************/
1129 ixv_free_pci_resources(if_ctx_t ctx)
1131 struct adapter *adapter = iflib_get_softc(ctx);
1132 struct ix_rx_queue *que = adapter->rx_queues;
1133 device_t dev = iflib_get_dev(ctx);
1135 /* Release all MSI-X queue resources */
1136 if (adapter->intr_type == IFLIB_INTR_MSIX)
1137 iflib_irq_free(ctx, &adapter->irq);
1140 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1141 iflib_irq_free(ctx, &que->que_irq);
1145 if (adapter->pci_mem != NULL)
1146 bus_release_resource(dev, SYS_RES_MEMORY,
1147 rman_get_rid(adapter->pci_mem), adapter->pci_mem);
1148 } /* ixv_free_pci_resources */
1150 /************************************************************************
1151 * ixv_setup_interface
1153 * Setup networking device structure and register an interface.
1154 ************************************************************************/
1156 ixv_setup_interface(if_ctx_t ctx)
1158 struct adapter *adapter = iflib_get_softc(ctx);
1159 if_softc_ctx_t scctx = adapter->shared;
1160 struct ifnet *ifp = iflib_get_ifp(ctx);
1162 INIT_DEBUGOUT("ixv_setup_interface: begin");
1164 if_setbaudrate(ifp, IF_Gbps(10));
1165 ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1168 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1169 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1170 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1173 } /* ixv_setup_interface */
1175 /************************************************************************
1176 * ixv_if_get_counter
1177 ************************************************************************/
1179 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1181 struct adapter *adapter = iflib_get_softc(ctx);
1182 if_t ifp = iflib_get_ifp(ctx);
1185 case IFCOUNTER_IPACKETS:
1186 return (adapter->ipackets);
1187 case IFCOUNTER_OPACKETS:
1188 return (adapter->opackets);
1189 case IFCOUNTER_IBYTES:
1190 return (adapter->ibytes);
1191 case IFCOUNTER_OBYTES:
1192 return (adapter->obytes);
1193 case IFCOUNTER_IMCASTS:
1194 return (adapter->imcasts);
1196 return (if_get_counter_default(ifp, cnt));
1198 } /* ixv_if_get_counter */
1200 /************************************************************************
1201 * ixv_initialize_transmit_units - Enable transmit unit.
1202 ************************************************************************/
1204 ixv_initialize_transmit_units(if_ctx_t ctx)
1206 struct adapter *adapter = iflib_get_softc(ctx);
1207 struct ixgbe_hw *hw = &adapter->hw;
1208 if_softc_ctx_t scctx = adapter->shared;
1209 struct ix_tx_queue *que = adapter->tx_queues;
1212 for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1213 struct tx_ring *txr = &que->txr;
1214 u64 tdba = txr->tx_paddr;
1218 /* Set WTHRESH to 8, burst writeback */
1219 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1220 txdctl |= (8 << 16);
1221 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1223 /* Set the HW Tx Head and Tail indices */
1224 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1225 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1227 /* Set Tx Tail register */
1228 txr->tail = IXGBE_VFTDT(j);
1230 txr->tx_rs_cidx = txr->tx_rs_pidx;
1231 /* Initialize the last processed descriptor to be the end of
1232 * the ring, rather than the start, so that we avoid an
1233 * off-by-one error when calculating how many descriptors are
1234 * done in the credits_update function.
1236 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1237 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1238 txr->tx_rsq[k] = QIDX_INVALID;
1240 /* Set Ring parameters */
1241 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1242 (tdba & 0x00000000ffffffffULL));
1243 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1244 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1245 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1246 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1247 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1248 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1251 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1252 txdctl |= IXGBE_TXDCTL_ENABLE;
1253 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1257 } /* ixv_initialize_transmit_units */
1259 /************************************************************************
1260 * ixv_initialize_rss_mapping
1261 ************************************************************************/
1263 ixv_initialize_rss_mapping(struct adapter *adapter)
1265 struct ixgbe_hw *hw = &adapter->hw;
1266 u32 reta = 0, mrqc, rss_key[10];
1269 u32 rss_hash_config;
1271 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1272 /* Fetch the configured RSS key */
1273 rss_getkey((uint8_t *)&rss_key);
1275 /* set up random bits */
1276 arc4rand(&rss_key, sizeof(rss_key), 0);
1279 /* Now fill out hash function seeds */
1280 for (i = 0; i < 10; i++)
1281 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1283 /* Set up the redirection table */
1284 for (i = 0, j = 0; i < 64; i++, j++) {
1285 if (j == adapter->num_rx_queues)
1288 if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1290 * Fetch the RSS bucket id for the given indirection
1291 * entry. Cap it at the number of configured buckets
1292 * (which is num_rx_queues.)
1294 queue_id = rss_get_indirection_to_bucket(i);
1295 queue_id = queue_id % adapter->num_rx_queues;
1300 * The low 8 bits are for hash value (n+0);
1301 * The next 8 bits are for hash value (n+1), etc.
1304 reta |= ((uint32_t)queue_id) << 24;
1306 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1311 /* Perform hash on these packet types */
1312 if (adapter->feat_en & IXGBE_FEATURE_RSS)
1313 rss_hash_config = rss_gethashconfig();
1316 * Disable UDP - IP fragments aren't currently being handled
1317 * and so we end up with a mix of 2-tuple and 4-tuple
1320 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1321 | RSS_HASHTYPE_RSS_TCP_IPV4
1322 | RSS_HASHTYPE_RSS_IPV6
1323 | RSS_HASHTYPE_RSS_TCP_IPV6;
1326 mrqc = IXGBE_MRQC_RSSEN;
1327 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1328 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1329 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1330 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1331 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1332 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1333 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1334 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1335 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1336 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1338 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1339 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1341 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1342 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1343 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1344 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1345 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1346 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1348 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1349 } /* ixv_initialize_rss_mapping */
1352 /************************************************************************
1353 * ixv_initialize_receive_units - Setup receive registers and features.
1354 ************************************************************************/
1356 ixv_initialize_receive_units(if_ctx_t ctx)
1358 struct adapter *adapter = iflib_get_softc(ctx);
1359 if_softc_ctx_t scctx;
1360 struct ixgbe_hw *hw = &adapter->hw;
1361 struct ifnet *ifp = iflib_get_ifp(ctx);
1362 struct ix_rx_queue *que = adapter->rx_queues;
1365 if (ifp->if_mtu > ETHERMTU)
1366 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1368 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1370 psrtype = IXGBE_PSRTYPE_TCPHDR
1371 | IXGBE_PSRTYPE_UDPHDR
1372 | IXGBE_PSRTYPE_IPV4HDR
1373 | IXGBE_PSRTYPE_IPV6HDR
1374 | IXGBE_PSRTYPE_L2HDR;
1376 if (adapter->num_rx_queues > 1)
1379 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1381 /* Tell PF our max_frame size */
1382 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1383 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1385 scctx = adapter->shared;
1387 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1388 struct rx_ring *rxr = &que->rxr;
1389 u64 rdba = rxr->rx_paddr;
1393 /* Disable the queue */
1394 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1395 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1396 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1397 for (int k = 0; k < 10; k++) {
1398 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1399 IXGBE_RXDCTL_ENABLE)
1405 /* Setup the Base and Length of the Rx Descriptor Ring */
1406 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1407 (rdba & 0x00000000ffffffffULL));
1408 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1409 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1410 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1412 /* Reset the ring indices */
1413 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1414 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1416 /* Set up the SRRCTL register */
1417 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1418 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1419 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1421 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1422 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1424 /* Capture Rx Tail index */
1425 rxr->tail = IXGBE_VFRDT(rxr->me);
1427 /* Do the queue enabling last */
1428 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1429 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1430 for (int l = 0; l < 10; l++) {
1431 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1432 IXGBE_RXDCTL_ENABLE)
1438 /* Set the Tail Pointer */
1441 * In netmap mode, we must preserve the buffers made
1442 * available to userspace before the if_init()
1443 * (this is true by default on the TX side, because
1444 * init makes all buffers available to userspace).
1446 * netmap_reset() and the device specific routines
1447 * (e.g. ixgbe_setup_receive_rings()) map these
1448 * buffers at the end of the NIC ring, so here we
1449 * must set the RDT (tail) register to make sure
1450 * they are not overwritten.
1452 * In this driver the NIC ring starts at RDH = 0,
1453 * RDT points to the last slot available for reception (?),
1454 * so RDT = num_rx_desc - 1 means the whole ring is available.
1456 if (ifp->if_capenable & IFCAP_NETMAP) {
1457 struct netmap_adapter *na = NA(ifp);
1458 struct netmap_kring *kring = na->rx_rings[j];
1459 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1461 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1463 #endif /* DEV_NETMAP */
1464 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1465 scctx->isc_nrxd[0] - 1);
1468 ixv_initialize_rss_mapping(adapter);
1469 } /* ixv_initialize_receive_units */
1471 /************************************************************************
1472 * ixv_setup_vlan_support
1473 ************************************************************************/
1475 ixv_setup_vlan_support(if_ctx_t ctx)
1477 struct ifnet *ifp = iflib_get_ifp(ctx);
1478 struct adapter *adapter = iflib_get_softc(ctx);
1479 struct ixgbe_hw *hw = &adapter->hw;
1480 u32 ctrl, vid, vfta, retry;
1483 * We get here thru if_init, meaning
1484 * a soft reset, this has already cleared
1485 * the VFTA and other state, so if there
1486 * have been no vlan's registered do nothing.
1488 if (adapter->num_vlans == 0)
1491 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1492 /* Enable the queues */
1493 for (int i = 0; i < adapter->num_rx_queues; i++) {
1494 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1495 ctrl |= IXGBE_RXDCTL_VME;
1496 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1498 * Let Rx path know that it needs to store VLAN tag
1499 * as part of extra mbuf info.
1501 adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1506 * If filtering VLAN tags is disabled,
1507 * there is no need to fill VLAN Filter Table Array (VFTA).
1509 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1513 * A soft reset zero's out the VFTA, so
1514 * we need to repopulate it now.
1516 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1517 if (ixv_shadow_vfta[i] == 0)
1519 vfta = ixv_shadow_vfta[i];
1521 * Reconstruct the vlan id's
1522 * based on the bits set in each
1523 * of the array ints.
1525 for (int j = 0; j < 32; j++) {
1527 if ((vfta & (1 << j)) == 0)
1530 /* Call the shared code mailbox routine */
1531 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1537 } /* ixv_setup_vlan_support */
1539 /************************************************************************
1540 * ixv_if_register_vlan
1542 * Run via a vlan config EVENT, it enables us to use the
1543 * HW Filter table since we can get the vlan id. This just
1544 * creates the entry in the soft version of the VFTA, init
1545 * will repopulate the real table.
1546 ************************************************************************/
1548 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1550 struct adapter *adapter = iflib_get_softc(ctx);
1553 index = (vtag >> 5) & 0x7F;
1555 ixv_shadow_vfta[index] |= (1 << bit);
1556 ++adapter->num_vlans;
1557 } /* ixv_if_register_vlan */
1559 /************************************************************************
1560 * ixv_if_unregister_vlan
1562 * Run via a vlan unconfig EVENT, remove our entry
1564 ************************************************************************/
1566 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1568 struct adapter *adapter = iflib_get_softc(ctx);
1571 index = (vtag >> 5) & 0x7F;
1573 ixv_shadow_vfta[index] &= ~(1 << bit);
1574 --adapter->num_vlans;
1575 } /* ixv_if_unregister_vlan */
1577 /************************************************************************
1578 * ixv_if_enable_intr
1579 ************************************************************************/
1581 ixv_if_enable_intr(if_ctx_t ctx)
1583 struct adapter *adapter = iflib_get_softc(ctx);
1584 struct ixgbe_hw *hw = &adapter->hw;
1585 struct ix_rx_queue *que = adapter->rx_queues;
1586 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1588 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1590 mask = IXGBE_EIMS_ENABLE_MASK;
1591 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1592 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1594 for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1595 ixv_enable_queue(adapter, que->msix);
1597 IXGBE_WRITE_FLUSH(hw);
1598 } /* ixv_if_enable_intr */
1600 /************************************************************************
1601 * ixv_if_disable_intr
1602 ************************************************************************/
1604 ixv_if_disable_intr(if_ctx_t ctx)
1606 struct adapter *adapter = iflib_get_softc(ctx);
1607 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1608 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1609 IXGBE_WRITE_FLUSH(&adapter->hw);
1610 } /* ixv_if_disable_intr */
1612 /************************************************************************
1613 * ixv_if_rx_queue_intr_enable
1614 ************************************************************************/
1616 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1618 struct adapter *adapter = iflib_get_softc(ctx);
1619 struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1621 ixv_enable_queue(adapter, que->rxr.me);
1624 } /* ixv_if_rx_queue_intr_enable */
1626 /************************************************************************
1629 * Setup the correct IVAR register for a particular MSI-X interrupt
1630 * - entry is the register array entry
1631 * - vector is the MSI-X vector for this queue
1632 * - type is RX/TX/MISC
1633 ************************************************************************/
1635 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1637 struct ixgbe_hw *hw = &adapter->hw;
1640 vector |= IXGBE_IVAR_ALLOC_VAL;
1642 if (type == -1) { /* MISC IVAR */
1643 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1646 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1647 } else { /* RX/TX IVARS */
1648 index = (16 * (entry & 1)) + (8 * type);
1649 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1650 ivar &= ~(0xFF << index);
1651 ivar |= (vector << index);
1652 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1654 } /* ixv_set_ivar */
1656 /************************************************************************
1657 * ixv_configure_ivars
1658 ************************************************************************/
1660 ixv_configure_ivars(struct adapter *adapter)
1662 struct ix_rx_queue *que = adapter->rx_queues;
1664 MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1666 for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1667 /* First the RX queue entry */
1668 ixv_set_ivar(adapter, i, que->msix, 0);
1669 /* ... and the TX */
1670 ixv_set_ivar(adapter, i, que->msix, 1);
1671 /* Set an initial value in EITR */
1672 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1673 IXGBE_EITR_DEFAULT);
1676 /* For the mailbox interrupt */
1677 ixv_set_ivar(adapter, 1, adapter->vector, -1);
1678 } /* ixv_configure_ivars */
1680 /************************************************************************
1683 * The VF stats registers never have a truly virgin
1684 * starting point, so this routine tries to make an
1685 * artificial one, marking ground zero on attach as
1687 ************************************************************************/
1689 ixv_save_stats(struct adapter *adapter)
1691 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1692 adapter->stats.vf.saved_reset_vfgprc +=
1693 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1694 adapter->stats.vf.saved_reset_vfgptc +=
1695 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1696 adapter->stats.vf.saved_reset_vfgorc +=
1697 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1698 adapter->stats.vf.saved_reset_vfgotc +=
1699 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1700 adapter->stats.vf.saved_reset_vfmprc +=
1701 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1703 } /* ixv_save_stats */
1705 /************************************************************************
1707 ************************************************************************/
1709 ixv_init_stats(struct adapter *adapter)
1711 struct ixgbe_hw *hw = &adapter->hw;
1713 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1714 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1715 adapter->stats.vf.last_vfgorc |=
1716 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1718 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1719 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1720 adapter->stats.vf.last_vfgotc |=
1721 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1723 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1725 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1726 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1727 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1728 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1729 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1730 } /* ixv_init_stats */
1732 #define UPDATE_STAT_32(reg, last, count) \
1734 u32 current = IXGBE_READ_REG(hw, reg); \
1735 if (current < last) \
1736 count += 0x100000000LL; \
1738 count &= 0xFFFFFFFF00000000LL; \
1742 #define UPDATE_STAT_36(lsb, msb, last, count) \
1744 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1745 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1746 u64 current = ((cur_msb << 32) | cur_lsb); \
1747 if (current < last) \
1748 count += 0x1000000000LL; \
1750 count &= 0xFFFFFFF000000000LL; \
1754 /************************************************************************
1755 * ixv_update_stats - Update the board statistics counters.
1756 ************************************************************************/
1758 ixv_update_stats(struct adapter *adapter)
1760 struct ixgbe_hw *hw = &adapter->hw;
1761 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1763 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1764 adapter->stats.vf.vfgprc);
1765 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1766 adapter->stats.vf.vfgptc);
1767 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1768 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1769 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1770 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1771 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1772 adapter->stats.vf.vfmprc);
1774 /* Fill out the OS statistics structure */
1775 IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1776 IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1777 IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1778 IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1779 IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1780 } /* ixv_update_stats */
1782 /************************************************************************
1783 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1784 ************************************************************************/
1786 ixv_add_stats_sysctls(struct adapter *adapter)
1788 device_t dev = adapter->dev;
1789 struct ix_tx_queue *tx_que = adapter->tx_queues;
1790 struct ix_rx_queue *rx_que = adapter->rx_queues;
1791 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1792 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1793 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1794 struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1795 struct sysctl_oid *stat_node, *queue_node;
1796 struct sysctl_oid_list *stat_list, *queue_list;
1798 #define QUEUE_NAME_LEN 32
1799 char namebuf[QUEUE_NAME_LEN];
1801 /* Driver Statistics */
1802 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1803 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1804 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1805 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1807 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1808 struct tx_ring *txr = &tx_que->txr;
1809 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1810 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1811 CTLFLAG_RD, NULL, "Queue Name");
1812 queue_list = SYSCTL_CHILDREN(queue_node);
1814 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1815 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1816 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1817 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1820 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1821 struct rx_ring *rxr = &rx_que->rxr;
1822 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1823 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1824 CTLFLAG_RD, NULL, "Queue Name");
1825 queue_list = SYSCTL_CHILDREN(queue_node);
1827 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1828 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1829 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1830 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1831 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1832 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1833 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1834 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1837 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1838 CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1839 stat_list = SYSCTL_CHILDREN(stat_node);
1841 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1842 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1843 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1844 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1845 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1846 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1847 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1848 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1849 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1850 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1851 } /* ixv_add_stats_sysctls */
1853 /************************************************************************
1854 * ixv_print_debug_info
1856 * Called only when em_display_debug_stats is enabled.
1857 * Provides a way to take a look at important statistics
1858 * maintained by the driver and hardware.
1859 ************************************************************************/
1861 ixv_print_debug_info(struct adapter *adapter)
1863 device_t dev = adapter->dev;
1864 struct ixgbe_hw *hw = &adapter->hw;
1866 device_printf(dev, "Error Byte Count = %u \n",
1867 IXGBE_READ_REG(hw, IXGBE_ERRBC));
1869 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1870 } /* ixv_print_debug_info */
1872 /************************************************************************
1874 ************************************************************************/
1876 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1878 struct adapter *adapter;
1882 error = sysctl_handle_int(oidp, &result, 0, req);
1884 if (error || !req->newptr)
1888 adapter = (struct adapter *)arg1;
1889 ixv_print_debug_info(adapter);
1893 } /* ixv_sysctl_debug */
1895 /************************************************************************
1896 * ixv_init_device_features
1897 ************************************************************************/
1899 ixv_init_device_features(struct adapter *adapter)
1901 adapter->feat_cap = IXGBE_FEATURE_NETMAP
1904 | IXGBE_FEATURE_LEGACY_TX;
1906 /* A tad short on feature flags for VFs, atm. */
1907 switch (adapter->hw.mac.type) {
1908 case ixgbe_mac_82599_vf:
1910 case ixgbe_mac_X540_vf:
1912 case ixgbe_mac_X550_vf:
1913 case ixgbe_mac_X550EM_x_vf:
1914 case ixgbe_mac_X550EM_a_vf:
1915 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1921 /* Enabled by default... */
1922 /* Is a virtual function (VF) */
1923 if (adapter->feat_cap & IXGBE_FEATURE_VF)
1924 adapter->feat_en |= IXGBE_FEATURE_VF;
1926 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1927 adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1928 /* Receive-Side Scaling (RSS) */
1929 if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1930 adapter->feat_en |= IXGBE_FEATURE_RSS;
1931 /* Needs advanced context descriptor regardless of offloads req'd */
1932 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1933 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1934 } /* ixv_init_device_features */