1 /******************************************************************************
3 Copyright (c) 2013-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** Virtual Channel support
37 ** These are support functions to communication
38 ** between the VF and PF drivers.
43 #include "i40e_prototype.h"
46 /* busy wait delay in msec */
47 #define IXLV_BUSY_WAIT_DELAY 10
48 #define IXLV_BUSY_WAIT_COUNT 50
50 static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
51 enum virtchnl_status_code);
52 static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
53 static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
54 static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
58 ** Validate VF messages
60 static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
63 bool err_msg_format = false;
66 /* Validate message length. */
68 case VIRTCHNL_OP_VERSION:
69 valid_len = sizeof(struct virtchnl_version_info);
71 case VIRTCHNL_OP_RESET_VF:
74 case VIRTCHNL_OP_GET_VF_RESOURCES:
75 /* Valid length in api v1.0 is 0, v1.1 is 4 */
78 case VIRTCHNL_OP_CONFIG_TX_QUEUE:
79 valid_len = sizeof(struct virtchnl_txq_info);
81 case VIRTCHNL_OP_CONFIG_RX_QUEUE:
82 valid_len = sizeof(struct virtchnl_rxq_info);
84 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
85 valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
86 if (msglen >= valid_len) {
87 struct virtchnl_vsi_queue_config_info *vqc =
88 (struct virtchnl_vsi_queue_config_info *)msg;
89 valid_len += (vqc->num_queue_pairs *
91 virtchnl_queue_pair_info));
92 if (vqc->num_queue_pairs == 0)
93 err_msg_format = true;
96 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
97 valid_len = sizeof(struct virtchnl_irq_map_info);
98 if (msglen >= valid_len) {
99 struct virtchnl_irq_map_info *vimi =
100 (struct virtchnl_irq_map_info *)msg;
101 valid_len += (vimi->num_vectors *
102 sizeof(struct virtchnl_vector_map));
103 if (vimi->num_vectors == 0)
104 err_msg_format = true;
107 case VIRTCHNL_OP_ENABLE_QUEUES:
108 case VIRTCHNL_OP_DISABLE_QUEUES:
109 valid_len = sizeof(struct virtchnl_queue_select);
111 case VIRTCHNL_OP_ADD_ETH_ADDR:
112 case VIRTCHNL_OP_DEL_ETH_ADDR:
113 valid_len = sizeof(struct virtchnl_ether_addr_list);
114 if (msglen >= valid_len) {
115 struct virtchnl_ether_addr_list *veal =
116 (struct virtchnl_ether_addr_list *)msg;
117 valid_len += veal->num_elements *
118 sizeof(struct virtchnl_ether_addr);
119 if (veal->num_elements == 0)
120 err_msg_format = true;
123 case VIRTCHNL_OP_ADD_VLAN:
124 case VIRTCHNL_OP_DEL_VLAN:
125 valid_len = sizeof(struct virtchnl_vlan_filter_list);
126 if (msglen >= valid_len) {
127 struct virtchnl_vlan_filter_list *vfl =
128 (struct virtchnl_vlan_filter_list *)msg;
129 valid_len += vfl->num_elements * sizeof(u16);
130 if (vfl->num_elements == 0)
131 err_msg_format = true;
134 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
135 valid_len = sizeof(struct virtchnl_promisc_info);
137 case VIRTCHNL_OP_GET_STATS:
138 valid_len = sizeof(struct virtchnl_queue_select);
140 /* These are always errors coming from the VF. */
141 case VIRTCHNL_OP_EVENT:
142 case VIRTCHNL_OP_UNKNOWN:
147 /* few more checks */
148 if ((valid_len != msglen) || (err_msg_format))
158 ** Send message to PF and print status if failure.
161 ixlv_send_pf_msg(struct ixlv_sc *sc,
162 enum virtchnl_ops op, u8 *msg, u16 len)
164 struct i40e_hw *hw = &sc->hw;
165 device_t dev = sc->dev;
170 ** Pre-validating messages to the PF
173 val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
175 device_printf(dev, "Error validating msg to PF for op %d,"
176 " msglen %d: error %d\n", op, len, val_err);
179 err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
181 device_printf(dev, "Unable to send opcode %s to PF, "
182 "status %s, aq error %s\n",
183 ixl_vc_opcode_str(op),
184 i40e_stat_str(hw, err),
185 i40e_aq_str(hw, hw->aq.asq_last_status));
193 ** Send API version admin queue message to the PF. The reply is not checked
194 ** in this function. Returns 0 if the message was successfully
195 ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
198 ixlv_send_api_ver(struct ixlv_sc *sc)
200 struct virtchnl_version_info vvi;
202 vvi.major = VIRTCHNL_VERSION_MAJOR;
203 vvi.minor = VIRTCHNL_VERSION_MINOR;
205 return ixlv_send_pf_msg(sc, VIRTCHNL_OP_VERSION,
206 (u8 *)&vvi, sizeof(vvi));
210 ** ixlv_verify_api_ver
212 ** Compare API versions with the PF. Must be called after admin queue is
213 ** initialized. Returns 0 if API versions match, EIO if
214 ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
217 ixlv_verify_api_ver(struct ixlv_sc *sc)
219 struct virtchnl_version_info *pf_vvi;
220 struct i40e_hw *hw = &sc->hw;
221 struct i40e_arq_event_info event;
222 device_t dev = sc->dev;
226 event.buf_len = IXL_AQ_BUF_SZ;
227 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
228 if (!event.msg_buf) {
234 if (++retries > IXLV_AQ_MAX_ERR)
237 /* Initial delay here is necessary */
238 i40e_msec_pause(100);
239 err = i40e_clean_arq_element(hw, &event, NULL);
240 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
247 if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
248 VIRTCHNL_OP_VERSION) {
249 DDPRINTF(dev, "Received unexpected op response: %d\n",
250 le32toh(event.desc.cookie_high));
251 /* Don't stop looking for expected response */
255 err = (i40e_status)le32toh(event.desc.cookie_low);
263 pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
264 if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
265 ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
266 (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) {
267 device_printf(dev, "Critical PF/VF API version mismatch!\n");
270 sc->pf_version = pf_vvi->minor;
272 /* Log PF/VF api versions */
273 device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
274 pf_vvi->major, pf_vvi->minor,
275 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
278 free(event.msg_buf, M_DEVBUF);
284 ** ixlv_send_vf_config_msg
286 ** Send VF configuration request admin queue message to the PF. The reply
287 ** is not checked in this function. Returns 0 if the message was
288 ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
291 ixlv_send_vf_config_msg(struct ixlv_sc *sc)
295 caps = VIRTCHNL_VF_OFFLOAD_L2 |
296 VIRTCHNL_VF_OFFLOAD_RSS_PF |
297 VIRTCHNL_VF_OFFLOAD_VLAN;
299 if (sc->pf_version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
300 return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
303 return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
304 (u8 *)&caps, sizeof(caps));
308 ** ixlv_get_vf_config
310 ** Get VF configuration from PF and populate hw structure. Must be called after
311 ** admin queue is initialized. Busy waits until response is received from PF,
312 ** with maximum timeout. Response from PF is returned in the buffer for further
313 ** processing by the caller.
316 ixlv_get_vf_config(struct ixlv_sc *sc)
318 struct i40e_hw *hw = &sc->hw;
319 device_t dev = sc->dev;
320 struct i40e_arq_event_info event;
325 /* Note this assumes a single VSI */
326 len = sizeof(struct virtchnl_vf_resource) +
327 sizeof(struct virtchnl_vsi_resource);
329 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
330 if (!event.msg_buf) {
336 err = i40e_clean_arq_element(hw, &event, NULL);
337 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
338 if (++retries <= IXLV_AQ_MAX_ERR)
340 } else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
341 VIRTCHNL_OP_GET_VF_RESOURCES) {
342 DDPRINTF(dev, "Received a response from PF,"
343 " opcode %d, error %d",
344 le32toh(event.desc.cookie_high),
345 le32toh(event.desc.cookie_low));
349 err = (i40e_status)le32toh(event.desc.cookie_low);
351 device_printf(dev, "%s: Error returned from PF,"
352 " opcode %d, error %d\n", __func__,
353 le32toh(event.desc.cookie_high),
354 le32toh(event.desc.cookie_low));
358 /* We retrieved the config message, with no errors */
362 if (retries > IXLV_AQ_MAX_ERR) {
363 INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
370 memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
371 i40e_vf_parse_hw_config(hw, sc->vf_res);
374 free(event.msg_buf, M_DEVBUF);
380 ** ixlv_configure_queues
382 ** Request that the PF set up our queues.
385 ixlv_configure_queues(struct ixlv_sc *sc)
387 device_t dev = sc->dev;
388 struct ixl_vsi *vsi = &sc->vsi;
389 struct ixl_queue *que = vsi->queues;
394 struct virtchnl_vsi_queue_config_info *vqci;
395 struct virtchnl_queue_pair_info *vqpi;
397 pairs = vsi->num_queues;
398 len = sizeof(struct virtchnl_vsi_queue_config_info) +
399 (sizeof(struct virtchnl_queue_pair_info) * pairs);
400 vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
402 device_printf(dev, "%s: unable to allocate memory\n", __func__);
403 ixl_vc_schedule_retry(&sc->vc_mgr);
406 vqci->vsi_id = sc->vsi_res->vsi_id;
407 vqci->num_queue_pairs = pairs;
409 /* Size check is not needed here - HW max is 16 queue pairs, and we
410 * can fit info for 31 of them into the AQ buffer before it overflows.
412 for (int i = 0; i < pairs; i++, que++, vqpi++) {
415 vqpi->txq.vsi_id = vqci->vsi_id;
416 vqpi->txq.queue_id = i;
417 vqpi->txq.ring_len = que->num_tx_desc;
418 vqpi->txq.dma_ring_addr = txr->dma.pa;
419 /* Enable Head writeback */
420 if (vsi->enable_head_writeback) {
421 vqpi->txq.headwb_enabled = 1;
422 vqpi->txq.dma_headwb_addr = txr->dma.pa +
423 (que->num_tx_desc * sizeof(struct i40e_tx_desc));
426 vqpi->rxq.vsi_id = vqci->vsi_id;
427 vqpi->rxq.queue_id = i;
428 vqpi->rxq.ring_len = que->num_rx_desc;
429 vqpi->rxq.dma_ring_addr = rxr->dma.pa;
430 vqpi->rxq.max_pkt_size = vsi->max_frame_size;
431 vqpi->rxq.databuffer_size = rxr->mbuf_sz;
432 vqpi->rxq.splithdr_enabled = 0;
435 ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
437 free(vqci, M_DEVBUF);
441 ** ixlv_enable_queues
443 ** Request that the PF enable all of our queues.
446 ixlv_enable_queues(struct ixlv_sc *sc)
448 struct virtchnl_queue_select vqs;
450 vqs.vsi_id = sc->vsi_res->vsi_id;
451 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
452 vqs.rx_queues = vqs.tx_queues;
453 ixlv_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES,
454 (u8 *)&vqs, sizeof(vqs));
458 ** ixlv_disable_queues
460 ** Request that the PF disable all of our queues.
463 ixlv_disable_queues(struct ixlv_sc *sc)
465 struct virtchnl_queue_select vqs;
467 vqs.vsi_id = sc->vsi_res->vsi_id;
468 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
469 vqs.rx_queues = vqs.tx_queues;
470 ixlv_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES,
471 (u8 *)&vqs, sizeof(vqs));
477 ** Request that the PF map queues to interrupt vectors. Misc causes, including
478 ** admin queue, are always mapped to vector 0.
481 ixlv_map_queues(struct ixlv_sc *sc)
483 struct virtchnl_irq_map_info *vm;
485 struct ixl_vsi *vsi = &sc->vsi;
486 struct ixl_queue *que = vsi->queues;
488 /* How many queue vectors, adminq uses one */
491 len = sizeof(struct virtchnl_irq_map_info) +
492 (sc->msix * sizeof(struct virtchnl_vector_map));
493 vm = malloc(len, M_DEVBUF, M_NOWAIT);
495 printf("%s: unable to allocate memory\n", __func__);
496 ixl_vc_schedule_retry(&sc->vc_mgr);
500 vm->num_vectors = sc->msix;
501 /* Queue vectors first */
502 for (i = 0; i < q; i++, que++) {
503 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
504 vm->vecmap[i].vector_id = i + 1; /* first is adminq */
505 vm->vecmap[i].txq_map = (1 << que->me);
506 vm->vecmap[i].rxq_map = (1 << que->me);
507 vm->vecmap[i].rxitr_idx = 0;
508 vm->vecmap[i].txitr_idx = 1;
511 /* Misc vector last - this is only for AdminQ messages */
512 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
513 vm->vecmap[i].vector_id = 0;
514 vm->vecmap[i].txq_map = 0;
515 vm->vecmap[i].rxq_map = 0;
516 vm->vecmap[i].rxitr_idx = 0;
517 vm->vecmap[i].txitr_idx = 0;
519 ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP,
525 ** Scan the Filter List looking for vlans that need
526 ** to be added, then create the data to hand to the AQ
530 ixlv_add_vlans(struct ixlv_sc *sc)
532 struct virtchnl_vlan_filter_list *v;
533 struct ixlv_vlan_filter *f, *ftmp;
534 device_t dev = sc->dev;
535 int len, i = 0, cnt = 0;
537 /* Get count of VLAN filters to add */
538 SLIST_FOREACH(f, sc->vlan_filters, next) {
539 if (f->flags & IXL_FILTER_ADD)
543 if (!cnt) { /* no work... */
544 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
545 VIRTCHNL_STATUS_SUCCESS);
549 len = sizeof(struct virtchnl_vlan_filter_list) +
552 if (len > IXL_AQ_BUF_SZ) {
553 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
555 ixl_vc_schedule_retry(&sc->vc_mgr);
559 v = malloc(len, M_DEVBUF, M_NOWAIT);
561 device_printf(dev, "%s: unable to allocate memory\n",
563 ixl_vc_schedule_retry(&sc->vc_mgr);
567 v->vsi_id = sc->vsi_res->vsi_id;
568 v->num_elements = cnt;
570 /* Scan the filter array */
571 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
572 if (f->flags & IXL_FILTER_ADD) {
573 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
574 f->flags = IXL_FILTER_USED;
581 ixlv_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
587 ** Scan the Filter Table looking for vlans that need
588 ** to be removed, then create the data to hand to the AQ
592 ixlv_del_vlans(struct ixlv_sc *sc)
594 device_t dev = sc->dev;
595 struct virtchnl_vlan_filter_list *v;
596 struct ixlv_vlan_filter *f, *ftmp;
597 int len, i = 0, cnt = 0;
599 /* Get count of VLAN filters to delete */
600 SLIST_FOREACH(f, sc->vlan_filters, next) {
601 if (f->flags & IXL_FILTER_DEL)
605 if (!cnt) { /* no work... */
606 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
607 VIRTCHNL_STATUS_SUCCESS);
611 len = sizeof(struct virtchnl_vlan_filter_list) +
614 if (len > IXL_AQ_BUF_SZ) {
615 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
617 ixl_vc_schedule_retry(&sc->vc_mgr);
621 v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
623 device_printf(dev, "%s: unable to allocate memory\n",
625 ixl_vc_schedule_retry(&sc->vc_mgr);
629 v->vsi_id = sc->vsi_res->vsi_id;
630 v->num_elements = cnt;
632 /* Scan the filter array */
633 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
634 if (f->flags & IXL_FILTER_DEL) {
635 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
637 SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
644 ixlv_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
651 ** This routine takes additions to the vsi filter
652 ** table and creates an Admin Queue call to create
653 ** the filters in the hardware.
656 ixlv_add_ether_filters(struct ixlv_sc *sc)
658 struct virtchnl_ether_addr_list *a;
659 struct ixlv_mac_filter *f;
660 device_t dev = sc->dev;
661 int len, j = 0, cnt = 0;
663 /* Get count of MAC addresses to add */
664 SLIST_FOREACH(f, sc->mac_filters, next) {
665 if (f->flags & IXL_FILTER_ADD)
668 if (cnt == 0) { /* Should not happen... */
669 DDPRINTF(dev, "cnt == 0, exiting...");
670 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
671 VIRTCHNL_STATUS_SUCCESS);
675 len = sizeof(struct virtchnl_ether_addr_list) +
676 (cnt * sizeof(struct virtchnl_ether_addr));
678 a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
680 device_printf(dev, "%s: Failed to get memory for "
681 "virtchnl_ether_addr_list\n", __func__);
682 ixl_vc_schedule_retry(&sc->vc_mgr);
685 a->vsi_id = sc->vsi.id;
686 a->num_elements = cnt;
688 /* Scan the filter array */
689 SLIST_FOREACH(f, sc->mac_filters, next) {
690 if (f->flags & IXL_FILTER_ADD) {
691 bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
692 f->flags &= ~IXL_FILTER_ADD;
695 DDPRINTF(dev, "ADD: " MAC_FORMAT,
696 MAC_FORMAT_ARGS(f->macaddr));
701 DDPRINTF(dev, "len %d, j %d, cnt %d",
704 VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len);
711 ** This routine takes filters flagged for deletion in the
712 ** sc MAC filter list and creates an Admin Queue call
713 ** to delete those filters in the hardware.
716 ixlv_del_ether_filters(struct ixlv_sc *sc)
718 struct virtchnl_ether_addr_list *d;
719 device_t dev = sc->dev;
720 struct ixlv_mac_filter *f, *f_temp;
721 int len, j = 0, cnt = 0;
723 /* Get count of MAC addresses to delete */
724 SLIST_FOREACH(f, sc->mac_filters, next) {
725 if (f->flags & IXL_FILTER_DEL)
729 DDPRINTF(dev, "cnt == 0, exiting...");
730 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
731 VIRTCHNL_STATUS_SUCCESS);
735 len = sizeof(struct virtchnl_ether_addr_list) +
736 (cnt * sizeof(struct virtchnl_ether_addr));
738 d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
740 device_printf(dev, "%s: Failed to get memory for "
741 "virtchnl_ether_addr_list\n", __func__);
742 ixl_vc_schedule_retry(&sc->vc_mgr);
745 d->vsi_id = sc->vsi.id;
746 d->num_elements = cnt;
748 /* Scan the filter array */
749 SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
750 if (f->flags & IXL_FILTER_DEL) {
751 bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
752 DDPRINTF(dev, "DEL: " MAC_FORMAT,
753 MAC_FORMAT_ARGS(f->macaddr));
755 SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
762 VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len);
769 ** ixlv_request_reset
770 ** Request that the PF reset this VF. No response is expected.
773 ixlv_request_reset(struct ixlv_sc *sc)
776 ** Set the reset status to "in progress" before
777 ** the request, this avoids any possibility of
778 ** a mistaken early detection of completion.
780 wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS);
781 ixlv_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0);
785 ** ixlv_request_stats
786 ** Request the statistics for this VF's VSI from PF.
789 ixlv_request_stats(struct ixlv_sc *sc)
791 struct virtchnl_queue_select vqs;
794 vqs.vsi_id = sc->vsi_res->vsi_id;
795 /* Low priority, we don't need to error check */
796 error = ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS,
797 (u8 *)&vqs, sizeof(vqs));
800 device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
805 ** Updates driver's stats counters with VSI stats returned from PF.
808 ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
810 struct ixl_vsi *vsi = &sc->vsi;
811 uint64_t tx_discards;
813 tx_discards = es->tx_discards;
814 for (int i = 0; i < vsi->num_queues; i++)
815 tx_discards += sc->vsi.queues[i].txr.br->br_drops;
817 /* Update ifnet stats */
818 IXL_SET_IPACKETS(vsi, es->rx_unicast +
821 IXL_SET_OPACKETS(vsi, es->tx_unicast +
824 IXL_SET_IBYTES(vsi, es->rx_bytes);
825 IXL_SET_OBYTES(vsi, es->tx_bytes);
826 IXL_SET_IMCASTS(vsi, es->rx_multicast);
827 IXL_SET_OMCASTS(vsi, es->tx_multicast);
829 IXL_SET_OERRORS(vsi, es->tx_errors);
830 IXL_SET_IQDROPS(vsi, es->rx_discards);
831 IXL_SET_OQDROPS(vsi, tx_discards);
832 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
833 IXL_SET_COLLISIONS(vsi, 0);
835 vsi->eth_stats = *es;
839 ixlv_config_rss_key(struct ixlv_sc *sc)
841 struct virtchnl_rss_key *rss_key_msg;
842 int msg_len, key_length;
843 u8 rss_seed[IXL_RSS_KEY_SIZE];
846 /* Fetch the configured RSS key */
847 rss_getkey((uint8_t *) &rss_seed);
849 ixl_get_default_rss_key((u32 *)rss_seed);
852 /* Send the fetched key */
853 key_length = IXL_RSS_KEY_SIZE;
854 msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
855 rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
856 if (rss_key_msg == NULL) {
857 device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
861 rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
862 rss_key_msg->key_len = key_length;
863 bcopy(rss_seed, &rss_key_msg->key[0], key_length);
865 DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d",
866 rss_key_msg->vsi_id, rss_key_msg->key_len);
868 ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY,
869 (u8 *)rss_key_msg, msg_len);
871 free(rss_key_msg, M_DEVBUF);
875 ixlv_set_rss_hena(struct ixlv_sc *sc)
877 struct virtchnl_rss_hena hena;
879 hena.hena = IXL_DEFAULT_RSS_HENA_X722;
881 ixlv_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA,
882 (u8 *)&hena, sizeof(hena));
886 ixlv_config_rss_lut(struct ixlv_sc *sc)
888 struct virtchnl_rss_lut *rss_lut_msg;
894 lut_length = IXL_RSS_VSI_LUT_SIZE;
895 msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
896 rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
897 if (rss_lut_msg == NULL) {
898 device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
902 rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
903 /* Each LUT entry is a max of 1 byte, so this is easy */
904 rss_lut_msg->lut_entries = lut_length;
906 /* Populate the LUT with max no. of queues in round robin fashion */
907 for (i = 0; i < lut_length; i++) {
910 * Fetch the RSS bucket id for the given indirection entry.
911 * Cap it at the number of configured buckets (which is
914 que_id = rss_get_indirection_to_bucket(i);
915 que_id = que_id % sc->vsi.num_queues;
917 que_id = i % sc->vsi.num_queues;
919 lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
920 rss_lut_msg->lut[i] = lut;
923 ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT,
924 (u8 *)rss_lut_msg, msg_len);
926 free(rss_lut_msg, M_DEVBUF);
930 ** ixlv_vc_completion
932 ** Asynchronous completion function for admin queue messages. Rather than busy
933 ** wait, we fire off our requests and assume that no errors will be returned.
934 ** This function handles the reply messages.
937 ixlv_vc_completion(struct ixlv_sc *sc,
938 enum virtchnl_ops v_opcode,
939 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
941 device_t dev = sc->dev;
942 struct ixl_vsi *vsi = &sc->vsi;
944 if (v_opcode == VIRTCHNL_OP_EVENT) {
945 struct virtchnl_pf_event *vpe =
946 (struct virtchnl_pf_event *)msg;
948 switch (vpe->event) {
949 case VIRTCHNL_EVENT_LINK_CHANGE:
951 device_printf(dev, "Link change: status %d, speed %d\n",
952 vpe->event_data.link_event.link_status,
953 vpe->event_data.link_event.link_speed);
956 vpe->event_data.link_event.link_status;
958 vpe->event_data.link_event.link_speed;
959 ixlv_update_link_status(sc);
961 case VIRTCHNL_EVENT_RESET_IMPENDING:
962 device_printf(dev, "PF initiated reset!\n");
963 sc->init_state = IXLV_RESET_PENDING;
964 mtx_unlock(&sc->mtx);
969 device_printf(dev, "%s: Unknown event %d from AQ\n",
970 __func__, vpe->event);
977 /* Catch-all error response */
980 "%s: AQ returned error %s to our request %s!\n",
981 __func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
985 if (v_opcode != VIRTCHNL_OP_GET_STATS)
986 DDPRINTF(dev, "opcode %d", v_opcode);
990 case VIRTCHNL_OP_GET_STATS:
991 ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
993 case VIRTCHNL_OP_ADD_ETH_ADDR:
994 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
997 device_printf(dev, "WARNING: Error adding VF mac filter!\n");
998 device_printf(dev, "WARNING: Device may not receive traffic!\n");
1001 case VIRTCHNL_OP_DEL_ETH_ADDR:
1002 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
1005 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1006 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
1009 case VIRTCHNL_OP_ADD_VLAN:
1010 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
1013 case VIRTCHNL_OP_DEL_VLAN:
1014 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
1017 case VIRTCHNL_OP_ENABLE_QUEUES:
1018 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
1020 if (v_retval == 0) {
1021 /* Update link status */
1022 ixlv_update_link_status(sc);
1023 /* Turn on all interrupts */
1024 ixlv_enable_intr(vsi);
1025 /* And inform the stack we're ready */
1026 vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1027 /* TODO: Clear a state flag, so we know we're ready to run init again */
1030 case VIRTCHNL_OP_DISABLE_QUEUES:
1031 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
1033 if (v_retval == 0) {
1034 /* Turn off all interrupts */
1035 ixlv_disable_intr(vsi);
1036 /* Tell the stack that the interface is no longer active */
1037 vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1040 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1041 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
1044 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1045 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
1048 case VIRTCHNL_OP_CONFIG_RSS_KEY:
1049 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY,
1052 case VIRTCHNL_OP_SET_RSS_HENA:
1053 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA,
1056 case VIRTCHNL_OP_CONFIG_RSS_LUT:
1057 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT,
1063 "%s: Received unexpected message %s from PF.\n",
1064 __func__, ixl_vc_opcode_str(v_opcode));
1072 ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
1076 case IXLV_FLAG_AQ_MAP_VECTORS:
1077 ixlv_map_queues(sc);
1080 case IXLV_FLAG_AQ_ADD_MAC_FILTER:
1081 ixlv_add_ether_filters(sc);
1084 case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
1088 case IXLV_FLAG_AQ_DEL_MAC_FILTER:
1089 ixlv_del_ether_filters(sc);
1092 case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
1096 case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
1097 ixlv_configure_queues(sc);
1100 case IXLV_FLAG_AQ_DISABLE_QUEUES:
1101 ixlv_disable_queues(sc);
1104 case IXLV_FLAG_AQ_ENABLE_QUEUES:
1105 ixlv_enable_queues(sc);
1108 case IXLV_FLAG_AQ_CONFIG_RSS_KEY:
1109 ixlv_config_rss_key(sc);
1112 case IXLV_FLAG_AQ_SET_RSS_HENA:
1113 ixlv_set_rss_hena(sc);
1116 case IXLV_FLAG_AQ_CONFIG_RSS_LUT:
1117 ixlv_config_rss_lut(sc);
1123 ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
1126 mgr->current = NULL;
1127 TAILQ_INIT(&mgr->pending);
1128 callout_init_mtx(&mgr->callout, &sc->mtx, 0);
1132 ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
1134 struct ixl_vc_cmd *cmd;
1137 mgr->current = NULL;
1138 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1140 cmd->callback(cmd, cmd->arg, err);
1141 ixl_vc_process_next(mgr);
1145 ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
1146 enum virtchnl_status_code err)
1148 struct ixl_vc_cmd *cmd;
1151 if (cmd == NULL || cmd->request != request)
1154 callout_stop(&mgr->callout);
1155 /* ATM, the virtchnl codes map to i40e ones directly */
1156 ixl_vc_process_completion(mgr, (enum i40e_status_code)err);
1160 ixl_vc_cmd_timeout(void *arg)
1162 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1164 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1165 ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
1169 ixl_vc_cmd_retry(void *arg)
1171 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1173 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1174 ixl_vc_send_current(mgr);
1178 ixl_vc_send_current(struct ixl_vc_mgr *mgr)
1180 struct ixl_vc_cmd *cmd;
1183 ixl_vc_send_cmd(mgr->sc, cmd->request);
1184 callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
1188 ixl_vc_process_next(struct ixl_vc_mgr *mgr)
1190 struct ixl_vc_cmd *cmd;
1192 if (mgr->current != NULL)
1195 if (TAILQ_EMPTY(&mgr->pending))
1198 cmd = TAILQ_FIRST(&mgr->pending);
1199 TAILQ_REMOVE(&mgr->pending, cmd, next);
1202 ixl_vc_send_current(mgr);
1206 ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
1209 callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
1213 ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
1214 uint32_t req, ixl_vc_callback_t *callback, void *arg)
1216 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1218 if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
1219 if (mgr->current == cmd)
1220 mgr->current = NULL;
1222 TAILQ_REMOVE(&mgr->pending, cmd, next);
1226 cmd->callback = callback;
1228 cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
1229 TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
1231 ixl_vc_process_next(mgr);
1235 ixl_vc_flush(struct ixl_vc_mgr *mgr)
1237 struct ixl_vc_cmd *cmd;
1239 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1240 KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
1241 ("ixlv: pending commands waiting but no command in progress"));
1245 mgr->current = NULL;
1246 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1247 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1250 while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
1251 TAILQ_REMOVE(&mgr->pending, cmd, next);
1252 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1253 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1256 callout_stop(&mgr->callout);