1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** Virtual Channel support
37 ** These are support functions to communication
38 ** between the VF and PF drivers.
43 #include "i40e_prototype.h"
46 /* busy wait delay in msec */
47 #define IXLV_BUSY_WAIT_DELAY 10
48 #define IXLV_BUSY_WAIT_COUNT 50
50 static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
51 enum i40e_status_code);
52 static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
53 static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
54 static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
58 ** Validate VF messages
60 static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
63 bool err_msg_format = false;
66 /* Validate message length. */
68 case I40E_VIRTCHNL_OP_VERSION:
69 valid_len = sizeof(struct i40e_virtchnl_version_info);
71 case I40E_VIRTCHNL_OP_RESET_VF:
74 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
75 /* Valid length in api v1.0 is 0, v1.1 is 4 */
78 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
79 valid_len = sizeof(struct i40e_virtchnl_txq_info);
81 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
82 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
84 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
85 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
86 if (msglen >= valid_len) {
87 struct i40e_virtchnl_vsi_queue_config_info *vqc =
88 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
89 valid_len += (vqc->num_queue_pairs *
91 i40e_virtchnl_queue_pair_info));
92 if (vqc->num_queue_pairs == 0)
93 err_msg_format = true;
96 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
97 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
98 if (msglen >= valid_len) {
99 struct i40e_virtchnl_irq_map_info *vimi =
100 (struct i40e_virtchnl_irq_map_info *)msg;
101 valid_len += (vimi->num_vectors *
102 sizeof(struct i40e_virtchnl_vector_map));
103 if (vimi->num_vectors == 0)
104 err_msg_format = true;
107 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
108 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
109 valid_len = sizeof(struct i40e_virtchnl_queue_select);
111 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
112 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
113 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
114 if (msglen >= valid_len) {
115 struct i40e_virtchnl_ether_addr_list *veal =
116 (struct i40e_virtchnl_ether_addr_list *)msg;
117 valid_len += veal->num_elements *
118 sizeof(struct i40e_virtchnl_ether_addr);
119 if (veal->num_elements == 0)
120 err_msg_format = true;
123 case I40E_VIRTCHNL_OP_ADD_VLAN:
124 case I40E_VIRTCHNL_OP_DEL_VLAN:
125 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
126 if (msglen >= valid_len) {
127 struct i40e_virtchnl_vlan_filter_list *vfl =
128 (struct i40e_virtchnl_vlan_filter_list *)msg;
129 valid_len += vfl->num_elements * sizeof(u16);
130 if (vfl->num_elements == 0)
131 err_msg_format = true;
134 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
135 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
137 case I40E_VIRTCHNL_OP_GET_STATS:
138 valid_len = sizeof(struct i40e_virtchnl_queue_select);
140 /* These are always errors coming from the VF. */
141 case I40E_VIRTCHNL_OP_EVENT:
142 case I40E_VIRTCHNL_OP_UNKNOWN:
147 /* few more checks */
148 if ((valid_len != msglen) || (err_msg_format))
158 ** Send message to PF and print status if failure.
161 ixlv_send_pf_msg(struct ixlv_sc *sc,
162 enum i40e_virtchnl_ops op, u8 *msg, u16 len)
164 struct i40e_hw *hw = &sc->hw;
165 device_t dev = sc->dev;
170 ** Pre-validating messages to the PF
173 val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
175 device_printf(dev, "Error validating msg to PF for op %d,"
176 " msglen %d: error %d\n", op, len, val_err);
179 err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
181 device_printf(dev, "Unable to send opcode %s to PF, "
182 "status %s, aq error %s\n",
183 ixl_vc_opcode_str(op),
184 i40e_stat_str(hw, err),
185 i40e_aq_str(hw, hw->aq.asq_last_status));
193 ** Send API version admin queue message to the PF. The reply is not checked
194 ** in this function. Returns 0 if the message was successfully
195 ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
198 ixlv_send_api_ver(struct ixlv_sc *sc)
200 struct i40e_virtchnl_version_info vvi;
202 vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
203 vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
205 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
206 (u8 *)&vvi, sizeof(vvi));
210 ** ixlv_verify_api_ver
212 ** Compare API versions with the PF. Must be called after admin queue is
213 ** initialized. Returns 0 if API versions match, EIO if
214 ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
217 ixlv_verify_api_ver(struct ixlv_sc *sc)
219 struct i40e_virtchnl_version_info *pf_vvi;
220 struct i40e_hw *hw = &sc->hw;
221 struct i40e_arq_event_info event;
222 device_t dev = sc->dev;
226 event.buf_len = IXL_AQ_BUF_SZ;
227 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
228 if (!event.msg_buf) {
234 if (++retries > IXLV_AQ_MAX_ERR)
237 /* Initial delay here is necessary */
238 i40e_msec_pause(100);
239 err = i40e_clean_arq_element(hw, &event, NULL);
240 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
247 if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
248 I40E_VIRTCHNL_OP_VERSION) {
249 DDPRINTF(dev, "Received unexpected op response: %d\n",
250 le32toh(event.desc.cookie_high));
251 /* Don't stop looking for expected response */
255 err = (i40e_status)le32toh(event.desc.cookie_low);
263 pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
264 if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
265 ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
266 (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) {
267 device_printf(dev, "Critical PF/VF API version mismatch!\n");
270 sc->pf_version = pf_vvi->minor;
272 /* Log PF/VF api versions */
273 device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
274 pf_vvi->major, pf_vvi->minor,
275 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR);
278 free(event.msg_buf, M_DEVBUF);
284 ** ixlv_send_vf_config_msg
286 ** Send VF configuration request admin queue message to the PF. The reply
287 ** is not checked in this function. Returns 0 if the message was
288 ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
291 ixlv_send_vf_config_msg(struct ixlv_sc *sc)
295 caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
296 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
297 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
299 if (sc->pf_version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
300 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
303 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
304 (u8 *)&caps, sizeof(caps));
308 ** ixlv_get_vf_config
310 ** Get VF configuration from PF and populate hw structure. Must be called after
311 ** admin queue is initialized. Busy waits until response is received from PF,
312 ** with maximum timeout. Response from PF is returned in the buffer for further
313 ** processing by the caller.
316 ixlv_get_vf_config(struct ixlv_sc *sc)
318 struct i40e_hw *hw = &sc->hw;
319 device_t dev = sc->dev;
320 struct i40e_arq_event_info event;
325 /* Note this assumes a single VSI */
326 len = sizeof(struct i40e_virtchnl_vf_resource) +
327 sizeof(struct i40e_virtchnl_vsi_resource);
329 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
330 if (!event.msg_buf) {
336 err = i40e_clean_arq_element(hw, &event, NULL);
337 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
338 if (++retries <= IXLV_AQ_MAX_ERR)
340 } else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
341 I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
342 DDPRINTF(dev, "Received a response from PF,"
343 " opcode %d, error %d",
344 le32toh(event.desc.cookie_high),
345 le32toh(event.desc.cookie_low));
349 err = (i40e_status)le32toh(event.desc.cookie_low);
351 device_printf(dev, "%s: Error returned from PF,"
352 " opcode %d, error %d\n", __func__,
353 le32toh(event.desc.cookie_high),
354 le32toh(event.desc.cookie_low));
358 /* We retrieved the config message, with no errors */
362 if (retries > IXLV_AQ_MAX_ERR) {
363 INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
370 memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
371 i40e_vf_parse_hw_config(hw, sc->vf_res);
374 free(event.msg_buf, M_DEVBUF);
380 ** ixlv_configure_queues
382 ** Request that the PF set up our queues.
385 ixlv_configure_queues(struct ixlv_sc *sc)
387 device_t dev = sc->dev;
388 struct ixl_vsi *vsi = &sc->vsi;
389 struct ixl_queue *que = vsi->queues;
394 struct i40e_virtchnl_vsi_queue_config_info *vqci;
395 struct i40e_virtchnl_queue_pair_info *vqpi;
397 pairs = vsi->num_queues;
398 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
399 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
400 vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
402 device_printf(dev, "%s: unable to allocate memory\n", __func__);
403 ixl_vc_schedule_retry(&sc->vc_mgr);
406 vqci->vsi_id = sc->vsi_res->vsi_id;
407 vqci->num_queue_pairs = pairs;
409 /* Size check is not needed here - HW max is 16 queue pairs, and we
410 * can fit info for 31 of them into the AQ buffer before it overflows.
412 for (int i = 0; i < pairs; i++, que++, vqpi++) {
415 vqpi->txq.vsi_id = vqci->vsi_id;
416 vqpi->txq.queue_id = i;
417 vqpi->txq.ring_len = que->num_desc;
418 vqpi->txq.dma_ring_addr = txr->dma.pa;
419 /* Enable Head writeback */
420 vqpi->txq.headwb_enabled = 1;
421 vqpi->txq.dma_headwb_addr = txr->dma.pa +
422 (que->num_desc * sizeof(struct i40e_tx_desc));
424 vqpi->rxq.vsi_id = vqci->vsi_id;
425 vqpi->rxq.queue_id = i;
426 vqpi->rxq.ring_len = que->num_desc;
427 vqpi->rxq.dma_ring_addr = rxr->dma.pa;
428 vqpi->rxq.max_pkt_size = vsi->max_frame_size;
429 vqpi->rxq.databuffer_size = rxr->mbuf_sz;
430 vqpi->rxq.splithdr_enabled = 0;
433 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
435 free(vqci, M_DEVBUF);
439 ** ixlv_enable_queues
441 ** Request that the PF enable all of our queues.
444 ixlv_enable_queues(struct ixlv_sc *sc)
446 struct i40e_virtchnl_queue_select vqs;
448 vqs.vsi_id = sc->vsi_res->vsi_id;
449 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
450 vqs.rx_queues = vqs.tx_queues;
451 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
452 (u8 *)&vqs, sizeof(vqs));
456 ** ixlv_disable_queues
458 ** Request that the PF disable all of our queues.
461 ixlv_disable_queues(struct ixlv_sc *sc)
463 struct i40e_virtchnl_queue_select vqs;
465 vqs.vsi_id = sc->vsi_res->vsi_id;
466 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
467 vqs.rx_queues = vqs.tx_queues;
468 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
469 (u8 *)&vqs, sizeof(vqs));
475 ** Request that the PF map queues to interrupt vectors. Misc causes, including
476 ** admin queue, are always mapped to vector 0.
479 ixlv_map_queues(struct ixlv_sc *sc)
481 struct i40e_virtchnl_irq_map_info *vm;
483 struct ixl_vsi *vsi = &sc->vsi;
484 struct ixl_queue *que = vsi->queues;
486 /* How many queue vectors, adminq uses one */
489 len = sizeof(struct i40e_virtchnl_irq_map_info) +
490 (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
491 vm = malloc(len, M_DEVBUF, M_NOWAIT);
493 printf("%s: unable to allocate memory\n", __func__);
494 ixl_vc_schedule_retry(&sc->vc_mgr);
498 vm->num_vectors = sc->msix;
499 /* Queue vectors first */
500 for (i = 0; i < q; i++, que++) {
501 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
502 vm->vecmap[i].vector_id = i + 1; /* first is adminq */
503 vm->vecmap[i].txq_map = (1 << que->me);
504 vm->vecmap[i].rxq_map = (1 << que->me);
505 vm->vecmap[i].rxitr_idx = 0;
506 vm->vecmap[i].txitr_idx = 1;
509 /* Misc vector last - this is only for AdminQ messages */
510 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
511 vm->vecmap[i].vector_id = 0;
512 vm->vecmap[i].txq_map = 0;
513 vm->vecmap[i].rxq_map = 0;
514 vm->vecmap[i].rxitr_idx = 0;
515 vm->vecmap[i].txitr_idx = 0;
517 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
523 ** Scan the Filter List looking for vlans that need
524 ** to be added, then create the data to hand to the AQ
528 ixlv_add_vlans(struct ixlv_sc *sc)
530 struct i40e_virtchnl_vlan_filter_list *v;
531 struct ixlv_vlan_filter *f, *ftmp;
532 device_t dev = sc->dev;
533 int len, i = 0, cnt = 0;
535 /* Get count of VLAN filters to add */
536 SLIST_FOREACH(f, sc->vlan_filters, next) {
537 if (f->flags & IXL_FILTER_ADD)
541 if (!cnt) { /* no work... */
542 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
547 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
550 if (len > IXL_AQ_BUF_SZ) {
551 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
553 ixl_vc_schedule_retry(&sc->vc_mgr);
557 v = malloc(len, M_DEVBUF, M_NOWAIT);
559 device_printf(dev, "%s: unable to allocate memory\n",
561 ixl_vc_schedule_retry(&sc->vc_mgr);
565 v->vsi_id = sc->vsi_res->vsi_id;
566 v->num_elements = cnt;
568 /* Scan the filter array */
569 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
570 if (f->flags & IXL_FILTER_ADD) {
571 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
572 f->flags = IXL_FILTER_USED;
579 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
585 ** Scan the Filter Table looking for vlans that need
586 ** to be removed, then create the data to hand to the AQ
590 ixlv_del_vlans(struct ixlv_sc *sc)
592 device_t dev = sc->dev;
593 struct i40e_virtchnl_vlan_filter_list *v;
594 struct ixlv_vlan_filter *f, *ftmp;
595 int len, i = 0, cnt = 0;
597 /* Get count of VLAN filters to delete */
598 SLIST_FOREACH(f, sc->vlan_filters, next) {
599 if (f->flags & IXL_FILTER_DEL)
603 if (!cnt) { /* no work... */
604 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
609 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
612 if (len > IXL_AQ_BUF_SZ) {
613 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
615 ixl_vc_schedule_retry(&sc->vc_mgr);
619 v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
621 device_printf(dev, "%s: unable to allocate memory\n",
623 ixl_vc_schedule_retry(&sc->vc_mgr);
627 v->vsi_id = sc->vsi_res->vsi_id;
628 v->num_elements = cnt;
630 /* Scan the filter array */
631 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
632 if (f->flags & IXL_FILTER_DEL) {
633 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
635 SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
642 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
649 ** This routine takes additions to the vsi filter
650 ** table and creates an Admin Queue call to create
651 ** the filters in the hardware.
654 ixlv_add_ether_filters(struct ixlv_sc *sc)
656 struct i40e_virtchnl_ether_addr_list *a;
657 struct ixlv_mac_filter *f;
658 device_t dev = sc->dev;
659 int len, j = 0, cnt = 0;
661 /* Get count of MAC addresses to add */
662 SLIST_FOREACH(f, sc->mac_filters, next) {
663 if (f->flags & IXL_FILTER_ADD)
666 if (cnt == 0) { /* Should not happen... */
667 DDPRINTF(dev, "cnt == 0, exiting...");
668 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
673 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
674 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
676 a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
678 device_printf(dev, "%s: Failed to get memory for "
679 "virtchnl_ether_addr_list\n", __func__);
680 ixl_vc_schedule_retry(&sc->vc_mgr);
683 a->vsi_id = sc->vsi.id;
684 a->num_elements = cnt;
686 /* Scan the filter array */
687 SLIST_FOREACH(f, sc->mac_filters, next) {
688 if (f->flags & IXL_FILTER_ADD) {
689 bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
690 f->flags &= ~IXL_FILTER_ADD;
693 DDPRINTF(dev, "ADD: " MAC_FORMAT,
694 MAC_FORMAT_ARGS(f->macaddr));
699 DDPRINTF(dev, "len %d, j %d, cnt %d",
702 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
709 ** This routine takes filters flagged for deletion in the
710 ** sc MAC filter list and creates an Admin Queue call
711 ** to delete those filters in the hardware.
714 ixlv_del_ether_filters(struct ixlv_sc *sc)
716 struct i40e_virtchnl_ether_addr_list *d;
717 device_t dev = sc->dev;
718 struct ixlv_mac_filter *f, *f_temp;
719 int len, j = 0, cnt = 0;
721 /* Get count of MAC addresses to delete */
722 SLIST_FOREACH(f, sc->mac_filters, next) {
723 if (f->flags & IXL_FILTER_DEL)
727 DDPRINTF(dev, "cnt == 0, exiting...");
728 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
733 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
734 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
736 d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
738 device_printf(dev, "%s: Failed to get memory for "
739 "virtchnl_ether_addr_list\n", __func__);
740 ixl_vc_schedule_retry(&sc->vc_mgr);
743 d->vsi_id = sc->vsi.id;
744 d->num_elements = cnt;
746 /* Scan the filter array */
747 SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
748 if (f->flags & IXL_FILTER_DEL) {
749 bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
750 DDPRINTF(dev, "DEL: " MAC_FORMAT,
751 MAC_FORMAT_ARGS(f->macaddr));
753 SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
760 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
767 ** ixlv_request_reset
768 ** Request that the PF reset this VF. No response is expected.
771 ixlv_request_reset(struct ixlv_sc *sc)
774 ** Set the reset status to "in progress" before
775 ** the request, this avoids any possibility of
776 ** a mistaken early detection of completion.
778 wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
779 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
783 ** ixlv_request_stats
784 ** Request the statistics for this VF's VSI from PF.
787 ixlv_request_stats(struct ixlv_sc *sc)
789 struct i40e_virtchnl_queue_select vqs;
792 vqs.vsi_id = sc->vsi_res->vsi_id;
793 /* Low priority, we don't need to error check */
794 error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
795 (u8 *)&vqs, sizeof(vqs));
798 device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
803 ** Updates driver's stats counters with VSI stats returned from PF.
806 ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
808 struct ixl_vsi *vsi = &sc->vsi;
809 uint64_t tx_discards;
811 tx_discards = es->tx_discards;
812 for (int i = 0; i < vsi->num_queues; i++)
813 tx_discards += sc->vsi.queues[i].txr.br->br_drops;
815 /* Update ifnet stats */
816 IXL_SET_IPACKETS(vsi, es->rx_unicast +
819 IXL_SET_OPACKETS(vsi, es->tx_unicast +
822 IXL_SET_IBYTES(vsi, es->rx_bytes);
823 IXL_SET_OBYTES(vsi, es->tx_bytes);
824 IXL_SET_IMCASTS(vsi, es->rx_multicast);
825 IXL_SET_OMCASTS(vsi, es->tx_multicast);
827 IXL_SET_OERRORS(vsi, es->tx_errors);
828 IXL_SET_IQDROPS(vsi, es->rx_discards);
829 IXL_SET_OQDROPS(vsi, tx_discards);
830 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
831 IXL_SET_COLLISIONS(vsi, 0);
833 vsi->eth_stats = *es;
837 ixlv_config_rss_key(struct ixlv_sc *sc)
839 struct i40e_virtchnl_rss_key *rss_key_msg;
840 int msg_len, key_length;
841 u8 rss_seed[IXL_RSS_KEY_SIZE];
844 /* Fetch the configured RSS key */
845 rss_getkey((uint8_t *) &rss_seed);
847 ixl_get_default_rss_key((u32 *)rss_seed);
850 /* Send the fetched key */
851 key_length = IXL_RSS_KEY_SIZE;
852 msg_len = sizeof(struct i40e_virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
853 rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
854 if (rss_key_msg == NULL) {
855 device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
859 rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
860 rss_key_msg->key_len = key_length;
861 bcopy(rss_seed, &rss_key_msg->key[0], key_length);
863 DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d",
864 rss_key_msg->vsi_id, rss_key_msg->key_len);
866 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
867 (u8 *)rss_key_msg, msg_len);
869 free(rss_key_msg, M_DEVBUF);
873 ixlv_set_rss_hena(struct ixlv_sc *sc)
875 struct i40e_virtchnl_rss_hena hena;
877 hena.hena = IXL_DEFAULT_RSS_HENA_X722;
879 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
880 (u8 *)&hena, sizeof(hena));
884 ixlv_config_rss_lut(struct ixlv_sc *sc)
886 struct i40e_virtchnl_rss_lut *rss_lut_msg;
892 lut_length = IXL_RSS_VSI_LUT_SIZE;
893 msg_len = sizeof(struct i40e_virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
894 rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
895 if (rss_lut_msg == NULL) {
896 device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
900 rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
901 /* Each LUT entry is a max of 1 byte, so this is easy */
902 rss_lut_msg->lut_entries = lut_length;
904 /* Populate the LUT with max no. of queues in round robin fashion */
905 for (i = 0; i < lut_length; i++) {
908 * Fetch the RSS bucket id for the given indirection entry.
909 * Cap it at the number of configured buckets (which is
912 que_id = rss_get_indirection_to_bucket(i);
913 que_id = que_id % sc->vsi.num_queues;
915 que_id = i % sc->vsi.num_queues;
917 lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
918 rss_lut_msg->lut[i] = lut;
921 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
922 (u8 *)rss_lut_msg, msg_len);
924 free(rss_lut_msg, M_DEVBUF);
928 ** ixlv_vc_completion
930 ** Asynchronous completion function for admin queue messages. Rather than busy
931 ** wait, we fire off our requests and assume that no errors will be returned.
932 ** This function handles the reply messages.
935 ixlv_vc_completion(struct ixlv_sc *sc,
936 enum i40e_virtchnl_ops v_opcode,
937 i40e_status v_retval, u8 *msg, u16 msglen)
939 device_t dev = sc->dev;
940 struct ixl_vsi *vsi = &sc->vsi;
942 if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
943 struct i40e_virtchnl_pf_event *vpe =
944 (struct i40e_virtchnl_pf_event *)msg;
946 switch (vpe->event) {
947 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
949 device_printf(dev, "Link change: status %d, speed %d\n",
950 vpe->event_data.link_event.link_status,
951 vpe->event_data.link_event.link_speed);
954 vpe->event_data.link_event.link_status;
956 vpe->event_data.link_event.link_speed;
957 ixlv_update_link_status(sc);
959 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
960 device_printf(dev, "PF initiated reset!\n");
961 sc->init_state = IXLV_RESET_PENDING;
962 mtx_unlock(&sc->mtx);
967 device_printf(dev, "%s: Unknown event %d from AQ\n",
968 __func__, vpe->event);
975 /* Catch-all error response */
978 "%s: AQ returned error %s to our request %s!\n",
979 __func__, i40e_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
983 if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
984 DDPRINTF(dev, "opcode %d", v_opcode);
988 case I40E_VIRTCHNL_OP_GET_STATS:
989 ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
991 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
992 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
995 device_printf(dev, "WARNING: Error adding VF mac filter!\n");
996 device_printf(dev, "WARNING: Device may not receive traffic!\n");
999 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1000 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
1003 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1004 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
1007 case I40E_VIRTCHNL_OP_ADD_VLAN:
1008 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
1011 case I40E_VIRTCHNL_OP_DEL_VLAN:
1012 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
1015 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1016 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
1018 if (v_retval == 0) {
1019 /* Update link status */
1020 ixlv_update_link_status(sc);
1021 /* Turn on all interrupts */
1022 ixlv_enable_intr(vsi);
1023 /* And inform the stack we're ready */
1024 vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1025 /* TODO: Clear a state flag, so we know we're ready to run init again */
1028 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1029 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
1031 if (v_retval == 0) {
1032 /* Turn off all interrupts */
1033 ixlv_disable_intr(vsi);
1034 /* Tell the stack that the interface is no longer active */
1035 vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1038 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1039 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
1042 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1043 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
1046 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
1047 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY,
1050 case I40E_VIRTCHNL_OP_SET_RSS_HENA:
1051 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA,
1054 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
1055 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT,
1061 "%s: Received unexpected message %s from PF.\n",
1062 __func__, ixl_vc_opcode_str(v_opcode));
1070 ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
1074 case IXLV_FLAG_AQ_MAP_VECTORS:
1075 ixlv_map_queues(sc);
1078 case IXLV_FLAG_AQ_ADD_MAC_FILTER:
1079 ixlv_add_ether_filters(sc);
1082 case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
1086 case IXLV_FLAG_AQ_DEL_MAC_FILTER:
1087 ixlv_del_ether_filters(sc);
1090 case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
1094 case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
1095 ixlv_configure_queues(sc);
1098 case IXLV_FLAG_AQ_DISABLE_QUEUES:
1099 ixlv_disable_queues(sc);
1102 case IXLV_FLAG_AQ_ENABLE_QUEUES:
1103 ixlv_enable_queues(sc);
1106 case IXLV_FLAG_AQ_CONFIG_RSS_KEY:
1107 ixlv_config_rss_key(sc);
1110 case IXLV_FLAG_AQ_SET_RSS_HENA:
1111 ixlv_set_rss_hena(sc);
1114 case IXLV_FLAG_AQ_CONFIG_RSS_LUT:
1115 ixlv_config_rss_lut(sc);
1121 ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
1124 mgr->current = NULL;
1125 TAILQ_INIT(&mgr->pending);
1126 callout_init_mtx(&mgr->callout, &sc->mtx, 0);
1130 ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
1132 struct ixl_vc_cmd *cmd;
1135 mgr->current = NULL;
1136 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1138 cmd->callback(cmd, cmd->arg, err);
1139 ixl_vc_process_next(mgr);
1143 ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
1144 enum i40e_status_code err)
1146 struct ixl_vc_cmd *cmd;
1149 if (cmd == NULL || cmd->request != request)
1152 callout_stop(&mgr->callout);
1153 ixl_vc_process_completion(mgr, err);
1157 ixl_vc_cmd_timeout(void *arg)
1159 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1161 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1162 ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
1166 ixl_vc_cmd_retry(void *arg)
1168 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1170 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1171 ixl_vc_send_current(mgr);
1175 ixl_vc_send_current(struct ixl_vc_mgr *mgr)
1177 struct ixl_vc_cmd *cmd;
1180 ixl_vc_send_cmd(mgr->sc, cmd->request);
1181 callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
1185 ixl_vc_process_next(struct ixl_vc_mgr *mgr)
1187 struct ixl_vc_cmd *cmd;
1189 if (mgr->current != NULL)
1192 if (TAILQ_EMPTY(&mgr->pending))
1195 cmd = TAILQ_FIRST(&mgr->pending);
1196 TAILQ_REMOVE(&mgr->pending, cmd, next);
1199 ixl_vc_send_current(mgr);
1203 ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
1206 callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
1210 ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
1211 uint32_t req, ixl_vc_callback_t *callback, void *arg)
1213 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1215 if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
1216 if (mgr->current == cmd)
1217 mgr->current = NULL;
1219 TAILQ_REMOVE(&mgr->pending, cmd, next);
1223 cmd->callback = callback;
1225 cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
1226 TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
1228 ixl_vc_process_next(mgr);
1232 ixl_vc_flush(struct ixl_vc_mgr *mgr)
1234 struct ixl_vc_cmd *cmd;
1236 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1237 KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
1238 ("ixlv: pending commands waiting but no command in progress"));
1242 mgr->current = NULL;
1243 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1244 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1247 while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
1248 TAILQ_REMOVE(&mgr->pending, cmd, next);
1249 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1250 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1253 callout_stop(&mgr->callout);