1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** Virtual Channel support
37 ** These are support functions to communication
38 ** between the VF and PF drivers.
43 #include "i40e_prototype.h"
46 /* busy wait delay in msec */
47 #define IXLV_BUSY_WAIT_DELAY 10
48 #define IXLV_BUSY_WAIT_COUNT 50
50 static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
51 enum i40e_status_code);
52 static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
53 static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
54 static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
58 ** Validate VF messages
60 static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
63 bool err_msg_format = false;
66 /* Validate message length. */
68 case I40E_VIRTCHNL_OP_VERSION:
69 valid_len = sizeof(struct i40e_virtchnl_version_info);
71 case I40E_VIRTCHNL_OP_RESET_VF:
72 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
75 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
76 valid_len = sizeof(struct i40e_virtchnl_txq_info);
78 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
79 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
81 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
82 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
83 if (msglen >= valid_len) {
84 struct i40e_virtchnl_vsi_queue_config_info *vqc =
85 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
86 valid_len += (vqc->num_queue_pairs *
88 i40e_virtchnl_queue_pair_info));
89 if (vqc->num_queue_pairs == 0)
90 err_msg_format = true;
93 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
94 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
95 if (msglen >= valid_len) {
96 struct i40e_virtchnl_irq_map_info *vimi =
97 (struct i40e_virtchnl_irq_map_info *)msg;
98 valid_len += (vimi->num_vectors *
99 sizeof(struct i40e_virtchnl_vector_map));
100 if (vimi->num_vectors == 0)
101 err_msg_format = true;
104 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
105 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
106 valid_len = sizeof(struct i40e_virtchnl_queue_select);
108 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
109 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
110 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
111 if (msglen >= valid_len) {
112 struct i40e_virtchnl_ether_addr_list *veal =
113 (struct i40e_virtchnl_ether_addr_list *)msg;
114 valid_len += veal->num_elements *
115 sizeof(struct i40e_virtchnl_ether_addr);
116 if (veal->num_elements == 0)
117 err_msg_format = true;
120 case I40E_VIRTCHNL_OP_ADD_VLAN:
121 case I40E_VIRTCHNL_OP_DEL_VLAN:
122 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
123 if (msglen >= valid_len) {
124 struct i40e_virtchnl_vlan_filter_list *vfl =
125 (struct i40e_virtchnl_vlan_filter_list *)msg;
126 valid_len += vfl->num_elements * sizeof(u16);
127 if (vfl->num_elements == 0)
128 err_msg_format = true;
131 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
132 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
134 case I40E_VIRTCHNL_OP_GET_STATS:
135 valid_len = sizeof(struct i40e_virtchnl_queue_select);
137 /* These are always errors coming from the VF. */
138 case I40E_VIRTCHNL_OP_EVENT:
139 case I40E_VIRTCHNL_OP_UNKNOWN:
144 /* few more checks */
145 if ((valid_len != msglen) || (err_msg_format))
155 ** Send message to PF and print status if failure.
158 ixlv_send_pf_msg(struct ixlv_sc *sc,
159 enum i40e_virtchnl_ops op, u8 *msg, u16 len)
161 struct i40e_hw *hw = &sc->hw;
162 device_t dev = sc->dev;
167 ** Pre-validating messages to the PF
170 val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
172 device_printf(dev, "Error validating msg to PF for op %d,"
173 " msglen %d: error %d\n", op, len, val_err);
176 err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
178 device_printf(dev, "Unable to send opcode %d to PF, "
179 "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
187 ** Send API version admin queue message to the PF. The reply is not checked
188 ** in this function. Returns 0 if the message was successfully
189 ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
192 ixlv_send_api_ver(struct ixlv_sc *sc)
194 struct i40e_virtchnl_version_info vvi;
196 vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
197 vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
199 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
200 (u8 *)&vvi, sizeof(vvi));
204 ** ixlv_verify_api_ver
206 ** Compare API versions with the PF. Must be called after admin queue is
207 ** initialized. Returns 0 if API versions match, EIO if
208 ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
211 ixlv_verify_api_ver(struct ixlv_sc *sc)
213 struct i40e_virtchnl_version_info *pf_vvi;
214 struct i40e_hw *hw = &sc->hw;
215 struct i40e_arq_event_info event;
219 event.buf_len = IXL_AQ_BUFSZ;
220 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
221 if (!event.msg_buf) {
227 if (++retries > IXLV_AQ_MAX_ERR)
230 /* NOTE: initial delay is necessary */
231 i40e_msec_delay(100);
232 err = i40e_clean_arq_element(hw, &event, NULL);
233 } while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
237 err = (i40e_status)le32toh(event.desc.cookie_low);
243 if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
244 I40E_VIRTCHNL_OP_VERSION) {
245 DDPRINTF(sc->dev, "Received unexpected op response: %d\n",
246 le32toh(event.desc.cookie_high));
251 pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
252 if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
253 ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
254 (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
257 sc->pf_version = pf_vvi->minor;
260 free(event.msg_buf, M_DEVBUF);
266 ** ixlv_send_vf_config_msg
268 ** Send VF configuration request admin queue message to the PF. The reply
269 ** is not checked in this function. Returns 0 if the message was
270 ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
273 ixlv_send_vf_config_msg(struct ixlv_sc *sc)
277 caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
278 I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
279 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
280 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
283 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
284 (u8 *)&caps, sizeof(caps));
286 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
291 ** ixlv_get_vf_config
293 ** Get VF configuration from PF and populate hw structure. Must be called after
294 ** admin queue is initialized. Busy waits until response is received from PF,
295 ** with maximum timeout. Response from PF is returned in the buffer for further
296 ** processing by the caller.
299 ixlv_get_vf_config(struct ixlv_sc *sc)
301 struct i40e_hw *hw = &sc->hw;
302 device_t dev = sc->dev;
303 struct i40e_arq_event_info event;
308 /* Note this assumes a single VSI */
309 len = sizeof(struct i40e_virtchnl_vf_resource) +
310 sizeof(struct i40e_virtchnl_vsi_resource);
312 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
313 if (!event.msg_buf) {
319 err = i40e_clean_arq_element(hw, &event, NULL);
320 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
321 if (++retries <= IXLV_AQ_MAX_ERR)
323 } else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
324 I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
325 DDPRINTF(dev, "Received a response from PF,"
326 " opcode %d, error %d",
327 le32toh(event.desc.cookie_high),
328 le32toh(event.desc.cookie_low));
332 err = (i40e_status)le32toh(event.desc.cookie_low);
334 device_printf(dev, "%s: Error returned from PF,"
335 " opcode %d, error %d\n", __func__,
336 le32toh(event.desc.cookie_high),
337 le32toh(event.desc.cookie_low));
341 /* We retrieved the config message, with no errors */
345 if (retries > IXLV_AQ_MAX_ERR) {
346 INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
353 memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
354 i40e_vf_parse_hw_config(hw, sc->vf_res);
357 free(event.msg_buf, M_DEVBUF);
363 ** ixlv_configure_queues
365 ** Request that the PF set up our queues.
368 ixlv_configure_queues(struct ixlv_sc *sc)
370 device_t dev = sc->dev;
371 struct ixl_vsi *vsi = &sc->vsi;
372 struct ixl_queue *que = vsi->queues;
377 struct i40e_virtchnl_vsi_queue_config_info *vqci;
378 struct i40e_virtchnl_queue_pair_info *vqpi;
380 pairs = vsi->num_queues;
381 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
382 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
383 vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
385 device_printf(dev, "%s: unable to allocate memory\n", __func__);
386 ixl_vc_schedule_retry(&sc->vc_mgr);
389 vqci->vsi_id = sc->vsi_res->vsi_id;
390 vqci->num_queue_pairs = pairs;
392 /* Size check is not needed here - HW max is 16 queue pairs, and we
393 * can fit info for 31 of them into the AQ buffer before it overflows.
395 for (int i = 0; i < pairs; i++, que++, vqpi++) {
398 vqpi->txq.vsi_id = vqci->vsi_id;
399 vqpi->txq.queue_id = i;
400 vqpi->txq.ring_len = que->num_desc;
401 vqpi->txq.dma_ring_addr = txr->dma.pa;
402 /* Enable Head writeback */
403 vqpi->txq.headwb_enabled = 1;
404 vqpi->txq.dma_headwb_addr = txr->dma.pa +
405 (que->num_desc * sizeof(struct i40e_tx_desc));
407 vqpi->rxq.vsi_id = vqci->vsi_id;
408 vqpi->rxq.queue_id = i;
409 vqpi->rxq.ring_len = que->num_desc;
410 vqpi->rxq.dma_ring_addr = rxr->dma.pa;
411 vqpi->rxq.max_pkt_size = vsi->max_frame_size;
412 vqpi->rxq.databuffer_size = rxr->mbuf_sz;
413 vqpi->rxq.splithdr_enabled = 0;
416 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
418 free(vqci, M_DEVBUF);
422 ** ixlv_enable_queues
424 ** Request that the PF enable all of our queues.
427 ixlv_enable_queues(struct ixlv_sc *sc)
429 struct i40e_virtchnl_queue_select vqs;
431 vqs.vsi_id = sc->vsi_res->vsi_id;
432 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
433 vqs.rx_queues = vqs.tx_queues;
434 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
435 (u8 *)&vqs, sizeof(vqs));
439 ** ixlv_disable_queues
441 ** Request that the PF disable all of our queues.
444 ixlv_disable_queues(struct ixlv_sc *sc)
446 struct i40e_virtchnl_queue_select vqs;
448 vqs.vsi_id = sc->vsi_res->vsi_id;
449 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
450 vqs.rx_queues = vqs.tx_queues;
451 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
452 (u8 *)&vqs, sizeof(vqs));
458 ** Request that the PF map queues to interrupt vectors. Misc causes, including
459 ** admin queue, are always mapped to vector 0.
462 ixlv_map_queues(struct ixlv_sc *sc)
464 struct i40e_virtchnl_irq_map_info *vm;
466 struct ixl_vsi *vsi = &sc->vsi;
467 struct ixl_queue *que = vsi->queues;
469 /* How many queue vectors, adminq uses one */
472 len = sizeof(struct i40e_virtchnl_irq_map_info) +
473 (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
474 vm = malloc(len, M_DEVBUF, M_NOWAIT);
476 printf("%s: unable to allocate memory\n", __func__);
477 ixl_vc_schedule_retry(&sc->vc_mgr);
481 vm->num_vectors = sc->msix;
482 /* Queue vectors first */
483 for (i = 0; i < q; i++, que++) {
484 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
485 vm->vecmap[i].vector_id = i + 1; /* first is adminq */
486 vm->vecmap[i].txq_map = (1 << que->me);
487 vm->vecmap[i].rxq_map = (1 << que->me);
488 vm->vecmap[i].rxitr_idx = 0;
489 vm->vecmap[i].txitr_idx = 0;
492 /* Misc vector last - this is only for AdminQ messages */
493 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
494 vm->vecmap[i].vector_id = 0;
495 vm->vecmap[i].txq_map = 0;
496 vm->vecmap[i].rxq_map = 0;
497 vm->vecmap[i].rxitr_idx = 0;
498 vm->vecmap[i].txitr_idx = 0;
500 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
506 ** Scan the Filter List looking for vlans that need
507 ** to be added, then create the data to hand to the AQ
511 ixlv_add_vlans(struct ixlv_sc *sc)
513 struct i40e_virtchnl_vlan_filter_list *v;
514 struct ixlv_vlan_filter *f, *ftmp;
515 device_t dev = sc->dev;
516 int len, i = 0, cnt = 0;
518 /* Get count of VLAN filters to add */
519 SLIST_FOREACH(f, sc->vlan_filters, next) {
520 if (f->flags & IXL_FILTER_ADD)
524 if (!cnt) { /* no work... */
525 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
530 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
533 if (len > IXL_AQ_BUF_SZ) {
534 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
536 ixl_vc_schedule_retry(&sc->vc_mgr);
540 v = malloc(len, M_DEVBUF, M_NOWAIT);
542 device_printf(dev, "%s: unable to allocate memory\n",
544 ixl_vc_schedule_retry(&sc->vc_mgr);
548 v->vsi_id = sc->vsi_res->vsi_id;
549 v->num_elements = cnt;
551 /* Scan the filter array */
552 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
553 if (f->flags & IXL_FILTER_ADD) {
554 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
555 f->flags = IXL_FILTER_USED;
561 // ERJ: Should this be taken out?
562 if (i == 0) { /* Should not happen... */
563 device_printf(dev, "%s: i == 0?\n", __func__);
564 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
569 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
575 ** Scan the Filter Table looking for vlans that need
576 ** to be removed, then create the data to hand to the AQ
580 ixlv_del_vlans(struct ixlv_sc *sc)
582 device_t dev = sc->dev;
583 struct i40e_virtchnl_vlan_filter_list *v;
584 struct ixlv_vlan_filter *f, *ftmp;
585 int len, i = 0, cnt = 0;
587 /* Get count of VLAN filters to delete */
588 SLIST_FOREACH(f, sc->vlan_filters, next) {
589 if (f->flags & IXL_FILTER_DEL)
593 if (!cnt) { /* no work... */
594 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
599 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
602 if (len > IXL_AQ_BUF_SZ) {
603 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
605 ixl_vc_schedule_retry(&sc->vc_mgr);
609 v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
611 device_printf(dev, "%s: unable to allocate memory\n",
613 ixl_vc_schedule_retry(&sc->vc_mgr);
617 v->vsi_id = sc->vsi_res->vsi_id;
618 v->num_elements = cnt;
620 /* Scan the filter array */
621 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
622 if (f->flags & IXL_FILTER_DEL) {
623 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
625 SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
631 // ERJ: Take this out?
632 if (i == 0) { /* Should not happen... */
633 device_printf(dev, "%s: i == 0?\n", __func__);
634 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
639 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
646 ** This routine takes additions to the vsi filter
647 ** table and creates an Admin Queue call to create
648 ** the filters in the hardware.
651 ixlv_add_ether_filters(struct ixlv_sc *sc)
653 struct i40e_virtchnl_ether_addr_list *a;
654 struct ixlv_mac_filter *f;
655 device_t dev = sc->dev;
656 int len, j = 0, cnt = 0;
658 /* Get count of MAC addresses to add */
659 SLIST_FOREACH(f, sc->mac_filters, next) {
660 if (f->flags & IXL_FILTER_ADD)
663 if (cnt == 0) { /* Should not happen... */
664 DDPRINTF(dev, "cnt == 0, exiting...");
665 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
670 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
671 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
673 a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
675 device_printf(dev, "%s: Failed to get memory for "
676 "virtchnl_ether_addr_list\n", __func__);
677 ixl_vc_schedule_retry(&sc->vc_mgr);
680 a->vsi_id = sc->vsi.id;
681 a->num_elements = cnt;
683 /* Scan the filter array */
684 SLIST_FOREACH(f, sc->mac_filters, next) {
685 if (f->flags & IXL_FILTER_ADD) {
686 bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
687 f->flags &= ~IXL_FILTER_ADD;
690 DDPRINTF(dev, "ADD: " MAC_FORMAT,
691 MAC_FORMAT_ARGS(f->macaddr));
696 DDPRINTF(dev, "len %d, j %d, cnt %d",
699 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
706 ** This routine takes filters flagged for deletion in the
707 ** sc MAC filter list and creates an Admin Queue call
708 ** to delete those filters in the hardware.
711 ixlv_del_ether_filters(struct ixlv_sc *sc)
713 struct i40e_virtchnl_ether_addr_list *d;
714 device_t dev = sc->dev;
715 struct ixlv_mac_filter *f, *f_temp;
716 int len, j = 0, cnt = 0;
718 /* Get count of MAC addresses to delete */
719 SLIST_FOREACH(f, sc->mac_filters, next) {
720 if (f->flags & IXL_FILTER_DEL)
724 DDPRINTF(dev, "cnt == 0, exiting...");
725 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
730 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
731 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
733 d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
735 device_printf(dev, "%s: Failed to get memory for "
736 "virtchnl_ether_addr_list\n", __func__);
737 ixl_vc_schedule_retry(&sc->vc_mgr);
740 d->vsi_id = sc->vsi.id;
741 d->num_elements = cnt;
743 /* Scan the filter array */
744 SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
745 if (f->flags & IXL_FILTER_DEL) {
746 bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
747 DDPRINTF(dev, "DEL: " MAC_FORMAT,
748 MAC_FORMAT_ARGS(f->macaddr));
750 SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
757 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
764 ** ixlv_request_reset
765 ** Request that the PF reset this VF. No response is expected.
768 ixlv_request_reset(struct ixlv_sc *sc)
771 ** Set the reset status to "in progress" before
772 ** the request, this avoids any possibility of
773 ** a mistaken early detection of completion.
775 wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
776 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
780 ** ixlv_request_stats
781 ** Request the statistics for this VF's VSI from PF.
784 ixlv_request_stats(struct ixlv_sc *sc)
786 struct i40e_virtchnl_queue_select vqs;
789 vqs.vsi_id = sc->vsi_res->vsi_id;
790 /* Low priority, we don't need to error check */
791 error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
792 (u8 *)&vqs, sizeof(vqs));
795 device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
800 ** Updates driver's stats counters with VSI stats returned from PF.
803 ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
805 struct ixl_vsi *vsi = &sc->vsi;
806 uint64_t tx_discards;
808 tx_discards = es->tx_discards;
809 for (int i = 0; i < vsi->num_queues; i++)
810 tx_discards += sc->vsi.queues[i].txr.br->br_drops;
812 /* Update ifnet stats */
813 IXL_SET_IPACKETS(vsi, es->rx_unicast +
816 IXL_SET_OPACKETS(vsi, es->tx_unicast +
819 IXL_SET_IBYTES(vsi, es->rx_bytes);
820 IXL_SET_OBYTES(vsi, es->tx_bytes);
821 IXL_SET_IMCASTS(vsi, es->rx_multicast);
822 IXL_SET_OMCASTS(vsi, es->tx_multicast);
824 IXL_SET_OERRORS(vsi, es->tx_errors);
825 IXL_SET_IQDROPS(vsi, es->rx_discards);
826 IXL_SET_OQDROPS(vsi, tx_discards);
827 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
828 IXL_SET_COLLISIONS(vsi, 0);
830 vsi->eth_stats = *es;
834 ** ixlv_vc_completion
836 ** Asynchronous completion function for admin queue messages. Rather than busy
837 ** wait, we fire off our requests and assume that no errors will be returned.
838 ** This function handles the reply messages.
841 ixlv_vc_completion(struct ixlv_sc *sc,
842 enum i40e_virtchnl_ops v_opcode,
843 i40e_status v_retval, u8 *msg, u16 msglen)
845 device_t dev = sc->dev;
846 struct ixl_vsi *vsi = &sc->vsi;
848 if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
849 struct i40e_virtchnl_pf_event *vpe =
850 (struct i40e_virtchnl_pf_event *)msg;
852 switch (vpe->event) {
853 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
855 device_printf(dev, "Link change: status %d, speed %d\n",
856 vpe->event_data.link_event.link_status,
857 vpe->event_data.link_event.link_speed);
860 vpe->event_data.link_event.link_status;
862 vpe->event_data.link_event.link_speed;
863 ixlv_update_link_status(sc);
865 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
866 device_printf(dev, "PF initiated reset!\n");
867 sc->init_state = IXLV_RESET_PENDING;
871 device_printf(dev, "%s: Unknown event %d from AQ\n",
872 __func__, vpe->event);
879 /* Catch-all error response */
882 "%s: AQ returned error %d to our request %d!\n",
883 __func__, v_retval, v_opcode);
887 if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
888 DDPRINTF(dev, "opcode %d", v_opcode);
892 case I40E_VIRTCHNL_OP_GET_STATS:
893 ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
895 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
896 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
899 device_printf(dev, "WARNING: Error adding VF mac filter!\n");
900 device_printf(dev, "WARNING: Device may not receive traffic!\n");
903 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
904 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
907 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
908 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
911 case I40E_VIRTCHNL_OP_ADD_VLAN:
912 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
915 case I40E_VIRTCHNL_OP_DEL_VLAN:
916 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
919 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
920 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
923 /* Update link status */
924 ixlv_update_link_status(sc);
925 /* Turn on all interrupts */
926 ixlv_enable_intr(vsi);
927 /* And inform the stack we're ready */
928 vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
929 vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
932 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
933 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
936 /* Turn off all interrupts */
937 ixlv_disable_intr(vsi);
938 /* Tell the stack that the interface is no longer active */
939 vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
942 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
943 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
946 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
947 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
952 "%s: Received unexpected message %d from PF.\n",
960 ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
964 case IXLV_FLAG_AQ_MAP_VECTORS:
968 case IXLV_FLAG_AQ_ADD_MAC_FILTER:
969 ixlv_add_ether_filters(sc);
972 case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
976 case IXLV_FLAG_AQ_DEL_MAC_FILTER:
977 ixlv_del_ether_filters(sc);
980 case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
984 case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
985 ixlv_configure_queues(sc);
988 case IXLV_FLAG_AQ_DISABLE_QUEUES:
989 ixlv_disable_queues(sc);
992 case IXLV_FLAG_AQ_ENABLE_QUEUES:
993 ixlv_enable_queues(sc);
999 ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
1002 mgr->current = NULL;
1003 TAILQ_INIT(&mgr->pending);
1004 callout_init_mtx(&mgr->callout, &sc->mtx, 0);
1008 ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
1010 struct ixl_vc_cmd *cmd;
1013 mgr->current = NULL;
1014 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1016 cmd->callback(cmd, cmd->arg, err);
1017 ixl_vc_process_next(mgr);
1021 ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
1022 enum i40e_status_code err)
1024 struct ixl_vc_cmd *cmd;
1027 if (cmd == NULL || cmd->request != request)
1030 callout_stop(&mgr->callout);
1031 ixl_vc_process_completion(mgr, err);
1035 ixl_vc_cmd_timeout(void *arg)
1037 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1039 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1040 ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
1044 ixl_vc_cmd_retry(void *arg)
1046 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1048 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1049 ixl_vc_send_current(mgr);
1053 ixl_vc_send_current(struct ixl_vc_mgr *mgr)
1055 struct ixl_vc_cmd *cmd;
1058 ixl_vc_send_cmd(mgr->sc, cmd->request);
1059 callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
1063 ixl_vc_process_next(struct ixl_vc_mgr *mgr)
1065 struct ixl_vc_cmd *cmd;
1067 if (mgr->current != NULL)
1070 if (TAILQ_EMPTY(&mgr->pending))
1073 cmd = TAILQ_FIRST(&mgr->pending);
1074 TAILQ_REMOVE(&mgr->pending, cmd, next);
1077 ixl_vc_send_current(mgr);
1081 ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
1084 callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
1088 ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
1089 uint32_t req, ixl_vc_callback_t *callback, void *arg)
1091 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1093 if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
1094 if (mgr->current == cmd)
1095 mgr->current = NULL;
1097 TAILQ_REMOVE(&mgr->pending, cmd, next);
1101 cmd->callback = callback;
1103 cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
1104 TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
1106 ixl_vc_process_next(mgr);
1110 ixl_vc_flush(struct ixl_vc_mgr *mgr)
1112 struct ixl_vc_cmd *cmd;
1114 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1115 KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
1116 ("ixlv: pending commands waiting but no command in progress"));
1120 mgr->current = NULL;
1121 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1122 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1125 while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
1126 TAILQ_REMOVE(&mgr->pending, cmd, next);
1127 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1128 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1131 callout_stop(&mgr->callout);