1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** Virtual Channel support
37 ** These are support functions to communication
38 ** between the VF and PF drivers.
44 /* busy wait delay in msec */
45 #define IAVF_BUSY_WAIT_DELAY 10
46 #define IAVF_BUSY_WAIT_COUNT 50
51 ** Send message to PF and print status if failure.
54 iavf_send_pf_msg(struct iavf_sc *sc,
55 enum virtchnl_ops op, u8 *msg, u16 len)
57 struct i40e_hw *hw = &sc->hw;
58 device_t dev = sc->dev;
62 /* Validating message before sending it to the PF */
63 val_err = virtchnl_vc_validate_vf_msg(&sc->version, op, msg, len);
65 device_printf(dev, "Error validating msg to PF for op %d,"
66 " msglen %d: error %d\n", op, len, val_err);
68 if (!i40e_check_asq_alive(hw)) {
69 if (op != VIRTCHNL_OP_GET_STATS)
70 device_printf(dev, "Unable to send opcode %s to PF, "
71 "ASQ is not alive\n", ixl_vc_opcode_str(op));
75 if (op != VIRTCHNL_OP_GET_STATS)
77 "Sending msg (op=%s[%d]) to PF\n",
78 ixl_vc_opcode_str(op), op);
80 status = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
81 if (status && op != VIRTCHNL_OP_GET_STATS)
82 device_printf(dev, "Unable to send opcode %s to PF, "
83 "status %s, aq error %s\n",
84 ixl_vc_opcode_str(op),
85 i40e_stat_str(hw, status),
86 i40e_aq_str(hw, hw->aq.asq_last_status));
94 ** Send API version admin queue message to the PF. The reply is not checked
95 ** in this function. Returns 0 if the message was successfully
96 ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
99 iavf_send_api_ver(struct iavf_sc *sc)
101 struct virtchnl_version_info vvi;
103 vvi.major = VIRTCHNL_VERSION_MAJOR;
104 vvi.minor = VIRTCHNL_VERSION_MINOR;
106 return iavf_send_pf_msg(sc, VIRTCHNL_OP_VERSION,
107 (u8 *)&vvi, sizeof(vvi));
111 ** iavf_verify_api_ver
113 ** Compare API versions with the PF. Must be called after admin queue is
114 ** initialized. Returns 0 if API versions match, EIO if
115 ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
118 iavf_verify_api_ver(struct iavf_sc *sc)
120 struct virtchnl_version_info *pf_vvi;
121 struct i40e_hw *hw = &sc->hw;
122 struct i40e_arq_event_info event;
123 device_t dev = sc->dev;
127 event.buf_len = IXL_AQ_BUF_SZ;
128 event.msg_buf = malloc(event.buf_len, M_IAVF, M_WAITOK);
131 if (++retries > IAVF_AQ_MAX_ERR)
134 /* Initial delay here is necessary */
135 i40e_msec_pause(100);
136 err = i40e_clean_arq_element(hw, &event, NULL);
137 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
144 if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
145 VIRTCHNL_OP_VERSION) {
146 DDPRINTF(dev, "Received unexpected op response: %d\n",
147 le32toh(event.desc.cookie_high));
148 /* Don't stop looking for expected response */
152 err = (i40e_status)le32toh(event.desc.cookie_low);
160 pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
161 if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
162 ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
163 (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) {
164 device_printf(dev, "Critical PF/VF API version mismatch!\n");
167 sc->version.major = pf_vvi->major;
168 sc->version.minor = pf_vvi->minor;
171 /* Log PF/VF api versions */
172 device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
173 pf_vvi->major, pf_vvi->minor,
174 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
177 free(event.msg_buf, M_IAVF);
182 ** iavf_send_vf_config_msg
184 ** Send VF configuration request admin queue message to the PF. The reply
185 ** is not checked in this function. Returns 0 if the message was
186 ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
189 iavf_send_vf_config_msg(struct iavf_sc *sc)
193 caps = VIRTCHNL_VF_OFFLOAD_L2 |
194 VIRTCHNL_VF_OFFLOAD_RSS_PF |
195 VIRTCHNL_VF_OFFLOAD_VLAN;
197 iavf_dbg_info(sc, "Sending offload flags: 0x%b\n",
198 caps, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
200 if (sc->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
201 return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
204 return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
205 (u8 *)&caps, sizeof(caps));
209 ** iavf_get_vf_config
211 ** Get VF configuration from PF and populate hw structure. Must be called after
212 ** admin queue is initialized. Busy waits until response is received from PF,
213 ** with maximum timeout. Response from PF is returned in the buffer for further
214 ** processing by the caller.
217 iavf_get_vf_config(struct iavf_sc *sc)
219 struct i40e_hw *hw = &sc->hw;
220 device_t dev = sc->dev;
221 struct i40e_arq_event_info event;
226 /* Note this assumes a single VSI */
227 len = sizeof(struct virtchnl_vf_resource) +
228 sizeof(struct virtchnl_vsi_resource);
230 event.msg_buf = malloc(event.buf_len, M_IAVF, M_WAITOK);
233 err = i40e_clean_arq_element(hw, &event, NULL);
234 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
235 if (++retries <= IAVF_AQ_MAX_ERR)
237 } else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
238 VIRTCHNL_OP_GET_VF_RESOURCES) {
239 DDPRINTF(dev, "Received a response from PF,"
240 " opcode %d, error %d",
241 le32toh(event.desc.cookie_high),
242 le32toh(event.desc.cookie_low));
246 err = (i40e_status)le32toh(event.desc.cookie_low);
248 device_printf(dev, "%s: Error returned from PF,"
249 " opcode %d, error %d\n", __func__,
250 le32toh(event.desc.cookie_high),
251 le32toh(event.desc.cookie_low));
255 /* We retrieved the config message, with no errors */
259 if (retries > IAVF_AQ_MAX_ERR) {
260 INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
267 memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
268 i40e_vf_parse_hw_config(hw, sc->vf_res);
271 free(event.msg_buf, M_IAVF);
276 ** iavf_configure_queues
278 ** Request that the PF set up our queues.
281 iavf_configure_queues(struct iavf_sc *sc)
283 device_t dev = sc->dev;
284 struct ixl_vsi *vsi = &sc->vsi;
285 if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
286 struct ixl_tx_queue *tx_que = vsi->tx_queues;
287 struct ixl_rx_queue *rx_que = vsi->rx_queues;
292 struct virtchnl_vsi_queue_config_info *vqci;
293 struct virtchnl_queue_pair_info *vqpi;
295 /* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX
296 * queues of a pair need to be configured */
297 pairs = max(vsi->num_tx_queues, vsi->num_rx_queues);
298 len = sizeof(struct virtchnl_vsi_queue_config_info) +
299 (sizeof(struct virtchnl_queue_pair_info) * pairs);
300 vqci = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
302 device_printf(dev, "%s: unable to allocate memory\n", __func__);
305 vqci->vsi_id = sc->vsi_res->vsi_id;
306 vqci->num_queue_pairs = pairs;
308 /* Size check is not needed here - HW max is 16 queue pairs, and we
309 * can fit info for 31 of them into the AQ buffer before it overflows.
311 // TODO: the above is wrong now; X722 VFs can have 256 queues
312 for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) {
316 vqpi->txq.vsi_id = vqci->vsi_id;
317 vqpi->txq.queue_id = i;
318 vqpi->txq.ring_len = scctx->isc_ntxd[0];
319 vqpi->txq.dma_ring_addr = txr->tx_paddr;
320 /* Enable Head writeback */
321 if (!vsi->enable_head_writeback) {
322 vqpi->txq.headwb_enabled = 0;
323 vqpi->txq.dma_headwb_addr = 0;
325 vqpi->txq.headwb_enabled = 1;
326 vqpi->txq.dma_headwb_addr = txr->tx_paddr +
327 sizeof(struct i40e_tx_desc) * scctx->isc_ntxd[0];
330 vqpi->rxq.vsi_id = vqci->vsi_id;
331 vqpi->rxq.queue_id = i;
332 vqpi->rxq.ring_len = scctx->isc_nrxd[0];
333 vqpi->rxq.dma_ring_addr = rxr->rx_paddr;
334 vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size;
335 vqpi->rxq.databuffer_size = rxr->mbuf_sz;
336 vqpi->rxq.splithdr_enabled = 0;
339 iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
347 ** iavf_enable_queues
349 ** Request that the PF enable all of our queues.
352 iavf_enable_queues(struct iavf_sc *sc)
354 struct virtchnl_queue_select vqs;
356 vqs.vsi_id = sc->vsi_res->vsi_id;
357 /* XXX: In Linux PF, as long as neither of these is 0,
358 * every queue in VF VSI is enabled. */
359 vqs.tx_queues = (1 << sc->vsi.num_tx_queues) - 1;
360 vqs.rx_queues = vqs.tx_queues;
361 iavf_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES,
362 (u8 *)&vqs, sizeof(vqs));
367 ** iavf_disable_queues
369 ** Request that the PF disable all of our queues.
372 iavf_disable_queues(struct iavf_sc *sc)
374 struct virtchnl_queue_select vqs;
376 vqs.vsi_id = sc->vsi_res->vsi_id;
377 /* XXX: In Linux PF, as long as neither of these is 0,
378 * every queue in VF VSI is disabled. */
379 vqs.tx_queues = (1 << sc->vsi.num_tx_queues) - 1;
380 vqs.rx_queues = vqs.tx_queues;
381 iavf_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES,
382 (u8 *)&vqs, sizeof(vqs));
389 ** Request that the PF map queues to interrupt vectors. Misc causes, including
390 ** admin queue, are always mapped to vector 0.
393 iavf_map_queues(struct iavf_sc *sc)
395 struct virtchnl_irq_map_info *vm;
397 struct ixl_vsi *vsi = &sc->vsi;
398 struct ixl_rx_queue *rx_que = vsi->rx_queues;
399 if_softc_ctx_t scctx = vsi->shared;
400 device_t dev = sc->dev;
402 // XXX: What happens if we only get 1 MSI-X vector?
403 MPASS(scctx->isc_vectors > 1);
405 /* How many queue vectors, adminq uses one */
406 // XXX: How do we know how many interrupt vectors we have?
407 q = scctx->isc_vectors - 1;
409 len = sizeof(struct virtchnl_irq_map_info) +
410 (scctx->isc_vectors * sizeof(struct virtchnl_vector_map));
411 vm = malloc(len, M_IAVF, M_NOWAIT);
413 device_printf(dev, "%s: unable to allocate memory\n", __func__);
417 vm->num_vectors = scctx->isc_vectors;
418 /* Queue vectors first */
419 for (i = 0; i < q; i++, rx_que++) {
420 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
421 vm->vecmap[i].vector_id = i + 1; /* first is adminq */
422 // TODO: Re-examine this
423 vm->vecmap[i].txq_map = (1 << rx_que->rxr.me);
424 vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me);
425 vm->vecmap[i].rxitr_idx = 0;
426 vm->vecmap[i].txitr_idx = 1;
429 /* Misc vector last - this is only for AdminQ messages */
430 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
431 vm->vecmap[i].vector_id = 0;
432 vm->vecmap[i].txq_map = 0;
433 vm->vecmap[i].rxq_map = 0;
434 vm->vecmap[i].rxitr_idx = 0;
435 vm->vecmap[i].txitr_idx = 0;
437 iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP,
445 ** Scan the Filter List looking for vlans that need
446 ** to be added, then create the data to hand to the AQ
450 iavf_add_vlans(struct iavf_sc *sc)
452 struct virtchnl_vlan_filter_list *v;
453 struct iavf_vlan_filter *f, *ftmp;
454 device_t dev = sc->dev;
455 int len, i = 0, cnt = 0;
457 /* Get count of VLAN filters to add */
458 SLIST_FOREACH(f, sc->vlan_filters, next) {
459 if (f->flags & IXL_FILTER_ADD)
463 if (!cnt) /* no work... */
466 len = sizeof(struct virtchnl_vlan_filter_list) +
469 if (len > IXL_AQ_BUF_SZ) {
470 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
475 v = malloc(len, M_IAVF, M_NOWAIT);
477 device_printf(dev, "%s: unable to allocate memory\n",
482 v->vsi_id = sc->vsi_res->vsi_id;
483 v->num_elements = cnt;
485 /* Scan the filter array */
486 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
487 if (f->flags & IXL_FILTER_ADD) {
488 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
489 f->flags = IXL_FILTER_USED;
496 iavf_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
503 ** Scan the Filter Table looking for vlans that need
504 ** to be removed, then create the data to hand to the AQ
508 iavf_del_vlans(struct iavf_sc *sc)
510 struct virtchnl_vlan_filter_list *v;
511 struct iavf_vlan_filter *f, *ftmp;
512 device_t dev = sc->dev;
513 int len, i = 0, cnt = 0;
515 /* Get count of VLAN filters to delete */
516 SLIST_FOREACH(f, sc->vlan_filters, next) {
517 if (f->flags & IXL_FILTER_DEL)
521 if (!cnt) /* no work... */
524 len = sizeof(struct virtchnl_vlan_filter_list) +
527 if (len > IXL_AQ_BUF_SZ) {
528 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
533 v = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
535 device_printf(dev, "%s: unable to allocate memory\n",
540 v->vsi_id = sc->vsi_res->vsi_id;
541 v->num_elements = cnt;
543 /* Scan the filter array */
544 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
545 if (f->flags & IXL_FILTER_DEL) {
546 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
548 SLIST_REMOVE(sc->vlan_filters, f, iavf_vlan_filter, next);
555 iavf_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
563 ** This routine takes additions to the vsi filter
564 ** table and creates an Admin Queue call to create
565 ** the filters in the hardware.
568 iavf_add_ether_filters(struct iavf_sc *sc)
570 struct virtchnl_ether_addr_list *a;
571 struct iavf_mac_filter *f;
572 device_t dev = sc->dev;
573 int len, j = 0, cnt = 0;
574 enum i40e_status_code status;
576 /* Get count of MAC addresses to add */
577 SLIST_FOREACH(f, sc->mac_filters, next) {
578 if (f->flags & IXL_FILTER_ADD)
581 if (cnt == 0) { /* Should not happen... */
582 iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__);
586 len = sizeof(struct virtchnl_ether_addr_list) +
587 (cnt * sizeof(struct virtchnl_ether_addr));
589 a = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
591 device_printf(dev, "%s: Failed to get memory for "
592 "virtchnl_ether_addr_list\n", __func__);
595 a->vsi_id = sc->vsi.id;
596 a->num_elements = cnt;
598 /* Scan the filter array */
599 SLIST_FOREACH(f, sc->mac_filters, next) {
600 if (f->flags & IXL_FILTER_ADD) {
601 bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
602 f->flags &= ~IXL_FILTER_ADD;
605 iavf_dbg_vc(sc, "ADD: " MAC_FORMAT "\n",
606 MAC_FORMAT_ARGS(f->macaddr));
611 DDPRINTF(dev, "len %d, j %d, cnt %d",
614 status = iavf_send_pf_msg(sc,
615 VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len);
622 ** This routine takes filters flagged for deletion in the
623 ** sc MAC filter list and creates an Admin Queue call
624 ** to delete those filters in the hardware.
627 iavf_del_ether_filters(struct iavf_sc *sc)
629 struct virtchnl_ether_addr_list *d;
630 struct iavf_mac_filter *f, *f_temp;
631 device_t dev = sc->dev;
632 int len, j = 0, cnt = 0;
634 /* Get count of MAC addresses to delete */
635 SLIST_FOREACH(f, sc->mac_filters, next) {
636 if (f->flags & IXL_FILTER_DEL)
640 iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__);
644 len = sizeof(struct virtchnl_ether_addr_list) +
645 (cnt * sizeof(struct virtchnl_ether_addr));
647 d = malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
649 device_printf(dev, "%s: Failed to get memory for "
650 "virtchnl_ether_addr_list\n", __func__);
653 d->vsi_id = sc->vsi.id;
654 d->num_elements = cnt;
656 /* Scan the filter array */
657 SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
658 if (f->flags & IXL_FILTER_DEL) {
659 bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
660 iavf_dbg_vc(sc, "DEL: " MAC_FORMAT "\n",
661 MAC_FORMAT_ARGS(f->macaddr));
663 SLIST_REMOVE(sc->mac_filters, f, iavf_mac_filter, next);
670 VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len);
677 ** iavf_request_reset
678 ** Request that the PF reset this VF. No response is expected.
681 iavf_request_reset(struct iavf_sc *sc)
684 ** Set the reset status to "in progress" before
685 ** the request, this avoids any possibility of
686 ** a mistaken early detection of completion.
688 wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS);
689 iavf_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0);
694 ** iavf_request_stats
695 ** Request the statistics for this VF's VSI from PF.
698 iavf_request_stats(struct iavf_sc *sc)
700 struct virtchnl_queue_select vqs;
703 vqs.vsi_id = sc->vsi_res->vsi_id;
704 /* Low priority, we don't need to error check */
705 error = iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS,
706 (u8 *)&vqs, sizeof(vqs));
708 device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
714 ** Updates driver's stats counters with VSI stats returned from PF.
717 iavf_update_stats_counters(struct iavf_sc *sc, struct i40e_eth_stats *es)
719 struct ixl_vsi *vsi = &sc->vsi;
720 uint64_t tx_discards;
722 tx_discards = es->tx_discards;
724 for (int i = 0; i < vsi->num_queues; i++)
725 tx_discards += sc->vsi.queues[i].txr.br->br_drops;
728 /* Update ifnet stats */
729 IXL_SET_IPACKETS(vsi, es->rx_unicast +
732 IXL_SET_OPACKETS(vsi, es->tx_unicast +
735 IXL_SET_IBYTES(vsi, es->rx_bytes);
736 IXL_SET_OBYTES(vsi, es->tx_bytes);
737 IXL_SET_IMCASTS(vsi, es->rx_multicast);
738 IXL_SET_OMCASTS(vsi, es->tx_multicast);
740 IXL_SET_OERRORS(vsi, es->tx_errors);
741 IXL_SET_IQDROPS(vsi, es->rx_discards);
742 IXL_SET_OQDROPS(vsi, tx_discards);
743 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
744 IXL_SET_COLLISIONS(vsi, 0);
746 vsi->eth_stats = *es;
750 iavf_config_rss_key(struct iavf_sc *sc)
752 struct virtchnl_rss_key *rss_key_msg;
753 int msg_len, key_length;
754 u8 rss_seed[IXL_RSS_KEY_SIZE];
757 /* Fetch the configured RSS key */
758 rss_getkey((uint8_t *) &rss_seed);
760 ixl_get_default_rss_key((u32 *)rss_seed);
763 /* Send the fetched key */
764 key_length = IXL_RSS_KEY_SIZE;
765 msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
766 rss_key_msg = malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO);
767 if (rss_key_msg == NULL) {
768 device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
772 rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
773 rss_key_msg->key_len = key_length;
774 bcopy(rss_seed, &rss_key_msg->key[0], key_length);
776 iavf_dbg_vc(sc, "config_rss: vsi_id %d, key_len %d\n",
777 rss_key_msg->vsi_id, rss_key_msg->key_len);
779 iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY,
780 (u8 *)rss_key_msg, msg_len);
782 free(rss_key_msg, M_IAVF);
787 iavf_set_rss_hena(struct iavf_sc *sc)
789 struct virtchnl_rss_hena hena;
790 struct i40e_hw *hw = &sc->hw;
792 if (hw->mac.type == I40E_MAC_X722_VF)
793 hena.hena = IXL_DEFAULT_RSS_HENA_X722;
795 hena.hena = IXL_DEFAULT_RSS_HENA_XL710;
797 iavf_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA,
798 (u8 *)&hena, sizeof(hena));
803 iavf_config_rss_lut(struct iavf_sc *sc)
805 struct virtchnl_rss_lut *rss_lut_msg;
811 lut_length = IXL_RSS_VSI_LUT_SIZE;
812 msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
813 rss_lut_msg = malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO);
814 if (rss_lut_msg == NULL) {
815 device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
819 rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
820 /* Each LUT entry is a max of 1 byte, so this is easy */
821 rss_lut_msg->lut_entries = lut_length;
823 /* Populate the LUT with max no. of queues in round robin fashion */
824 for (i = 0; i < lut_length; i++) {
827 * Fetch the RSS bucket id for the given indirection entry.
828 * Cap it at the number of configured buckets (which is
831 que_id = rss_get_indirection_to_bucket(i);
832 que_id = que_id % sc->vsi.num_rx_queues;
834 que_id = i % sc->vsi.num_rx_queues;
836 lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
837 rss_lut_msg->lut[i] = lut;
840 iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT,
841 (u8 *)rss_lut_msg, msg_len);
843 free(rss_lut_msg, M_IAVF);
848 iavf_config_promisc_mode(struct iavf_sc *sc)
850 struct virtchnl_promisc_info pinfo;
852 pinfo.vsi_id = sc->vsi_res->vsi_id;
853 pinfo.flags = sc->promisc_flags;
855 iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
856 (u8 *)&pinfo, sizeof(pinfo));
861 ** iavf_vc_completion
863 ** Asynchronous completion function for admin queue messages. Rather than busy
864 ** wait, we fire off our requests and assume that no errors will be returned.
865 ** This function handles the reply messages.
868 iavf_vc_completion(struct iavf_sc *sc,
869 enum virtchnl_ops v_opcode,
870 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
872 device_t dev = sc->dev;
874 if (v_opcode != VIRTCHNL_OP_GET_STATS)
875 iavf_dbg_vc(sc, "%s: opcode %s\n", __func__,
876 ixl_vc_opcode_str(v_opcode));
878 if (v_opcode == VIRTCHNL_OP_EVENT) {
879 struct virtchnl_pf_event *vpe =
880 (struct virtchnl_pf_event *)msg;
882 switch (vpe->event) {
883 case VIRTCHNL_EVENT_LINK_CHANGE:
884 iavf_dbg_vc(sc, "Link change: status %d, speed %s\n",
885 vpe->event_data.link_event.link_status,
886 iavf_vc_speed_to_string(vpe->event_data.link_event.link_speed));
888 vpe->event_data.link_event.link_status;
890 vpe->event_data.link_event.link_speed;
891 iavf_update_link_status(sc);
893 case VIRTCHNL_EVENT_RESET_IMPENDING:
894 device_printf(dev, "PF initiated reset!\n");
895 sc->init_state = IAVF_RESET_PENDING;
896 iavf_if_init(sc->vsi.ctx);
899 iavf_dbg_vc(sc, "Unknown event %d from AQ\n",
907 /* Catch-all error response */
910 "%s: AQ returned error %s to our request %s!\n",
911 __func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
915 case VIRTCHNL_OP_GET_STATS:
916 iavf_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
918 case VIRTCHNL_OP_ADD_ETH_ADDR:
920 device_printf(dev, "WARNING: Error adding VF mac filter!\n");
921 device_printf(dev, "WARNING: Device may not receive traffic!\n");
924 case VIRTCHNL_OP_DEL_ETH_ADDR:
926 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
928 case VIRTCHNL_OP_ADD_VLAN:
930 case VIRTCHNL_OP_DEL_VLAN:
932 case VIRTCHNL_OP_ENABLE_QUEUES:
933 atomic_store_rel_32(&sc->queues_enabled, 1);
934 wakeup_one(&sc->enable_queues_chan);
936 case VIRTCHNL_OP_DISABLE_QUEUES:
937 atomic_store_rel_32(&sc->queues_enabled, 0);
938 wakeup_one(&sc->disable_queues_chan);
940 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
942 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
944 case VIRTCHNL_OP_CONFIG_RSS_KEY:
946 case VIRTCHNL_OP_SET_RSS_HENA:
948 case VIRTCHNL_OP_CONFIG_RSS_LUT:
952 "Received unexpected message %s from PF.\n",
953 ixl_vc_opcode_str(v_opcode));
959 ixl_vc_send_cmd(struct iavf_sc *sc, uint32_t request)
963 case IAVF_FLAG_AQ_MAP_VECTORS:
964 return iavf_map_queues(sc);
966 case IAVF_FLAG_AQ_ADD_MAC_FILTER:
967 return iavf_add_ether_filters(sc);
969 case IAVF_FLAG_AQ_ADD_VLAN_FILTER:
970 return iavf_add_vlans(sc);
972 case IAVF_FLAG_AQ_DEL_MAC_FILTER:
973 return iavf_del_ether_filters(sc);
975 case IAVF_FLAG_AQ_DEL_VLAN_FILTER:
976 return iavf_del_vlans(sc);
978 case IAVF_FLAG_AQ_CONFIGURE_QUEUES:
979 return iavf_configure_queues(sc);
981 case IAVF_FLAG_AQ_DISABLE_QUEUES:
982 return iavf_disable_queues(sc);
984 case IAVF_FLAG_AQ_ENABLE_QUEUES:
985 return iavf_enable_queues(sc);
987 case IAVF_FLAG_AQ_CONFIG_RSS_KEY:
988 return iavf_config_rss_key(sc);
990 case IAVF_FLAG_AQ_SET_RSS_HENA:
991 return iavf_set_rss_hena(sc);
993 case IAVF_FLAG_AQ_CONFIG_RSS_LUT:
994 return iavf_config_rss_lut(sc);
996 case IAVF_FLAG_AQ_CONFIGURE_PROMISC:
997 return iavf_config_promisc_mode(sc);
1004 ixl_vc_get_op_chan(struct iavf_sc *sc, uint32_t request)
1007 case IAVF_FLAG_AQ_ENABLE_QUEUES:
1008 return (&sc->enable_queues_chan);
1009 case IAVF_FLAG_AQ_DISABLE_QUEUES:
1010 return (&sc->disable_queues_chan);