1 /******************************************************************************
3 Copyright (c) 2013-2014, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** Virtual Channel support
37 ** These are support functions to communication
38 ** between the VF and PF drivers.
43 #include "i40e_prototype.h"
46 /* busy wait delay in msec */
47 #define IXLV_BUSY_WAIT_DELAY 10
48 #define IXLV_BUSY_WAIT_COUNT 50
51 ** Validate VF messages
53 static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
56 bool err_msg_format = false;
59 /* Validate message length. */
61 case I40E_VIRTCHNL_OP_VERSION:
62 valid_len = sizeof(struct i40e_virtchnl_version_info);
64 case I40E_VIRTCHNL_OP_RESET_VF:
65 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
68 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
69 valid_len = sizeof(struct i40e_virtchnl_txq_info);
71 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
72 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
74 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
75 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
76 if (msglen >= valid_len) {
77 struct i40e_virtchnl_vsi_queue_config_info *vqc =
78 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
79 valid_len += (vqc->num_queue_pairs *
81 i40e_virtchnl_queue_pair_info));
82 if (vqc->num_queue_pairs == 0)
83 err_msg_format = true;
86 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
87 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
88 if (msglen >= valid_len) {
89 struct i40e_virtchnl_irq_map_info *vimi =
90 (struct i40e_virtchnl_irq_map_info *)msg;
91 valid_len += (vimi->num_vectors *
92 sizeof(struct i40e_virtchnl_vector_map));
93 if (vimi->num_vectors == 0)
94 err_msg_format = true;
97 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
98 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
99 valid_len = sizeof(struct i40e_virtchnl_queue_select);
101 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
102 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
103 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
104 if (msglen >= valid_len) {
105 struct i40e_virtchnl_ether_addr_list *veal =
106 (struct i40e_virtchnl_ether_addr_list *)msg;
107 valid_len += veal->num_elements *
108 sizeof(struct i40e_virtchnl_ether_addr);
109 if (veal->num_elements == 0)
110 err_msg_format = true;
113 case I40E_VIRTCHNL_OP_ADD_VLAN:
114 case I40E_VIRTCHNL_OP_DEL_VLAN:
115 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
116 if (msglen >= valid_len) {
117 struct i40e_virtchnl_vlan_filter_list *vfl =
118 (struct i40e_virtchnl_vlan_filter_list *)msg;
119 valid_len += vfl->num_elements * sizeof(u16);
120 if (vfl->num_elements == 0)
121 err_msg_format = true;
124 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
125 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
127 case I40E_VIRTCHNL_OP_GET_STATS:
128 valid_len = sizeof(struct i40e_virtchnl_queue_select);
130 /* These are always errors coming from the VF. */
131 case I40E_VIRTCHNL_OP_EVENT:
132 case I40E_VIRTCHNL_OP_UNKNOWN:
137 /* few more checks */
138 if ((valid_len != msglen) || (err_msg_format))
147 ** Send message to PF and print status if failure.
150 ixlv_send_pf_msg(struct ixlv_sc *sc,
151 enum i40e_virtchnl_ops op, u8 *msg, u16 len)
153 struct i40e_hw *hw = &sc->hw;
154 device_t dev = sc->dev;
159 ** Pre-validating messages to the PF, this might be
160 ** removed for performance later?
162 val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
164 device_printf(dev, "Error validating msg to PF for op %d,"
165 " msglen %d: error %d\n", op, len, val_err);
167 err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
169 device_printf(dev, "Unable to send opcode %d to PF, "
170 "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
178 ** Send API version admin queue message to the PF. The reply is not checked
179 ** in this function. Returns 0 if the message was successfully
180 ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
183 ixlv_send_api_ver(struct ixlv_sc *sc)
185 struct i40e_virtchnl_version_info vvi;
187 vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
188 vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
190 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
191 (u8 *)&vvi, sizeof(vvi));
195 ** ixlv_verify_api_ver
197 ** Compare API versions with the PF. Must be called after admin queue is
198 ** initialized. Returns 0 if API versions match, EIO if
199 ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
201 int ixlv_verify_api_ver(struct ixlv_sc *sc)
203 struct i40e_virtchnl_version_info *pf_vvi;
204 struct i40e_hw *hw = &sc->hw;
205 struct i40e_arq_event_info event;
209 event.buf_len = IXL_AQ_BUFSZ;
210 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
211 if (!event.msg_buf) {
217 if (++retries > IXLV_AQ_MAX_ERR)
220 /* NOTE: initial delay is necessary */
221 i40e_msec_delay(100);
222 err = i40e_clean_arq_element(hw, &event, NULL);
223 } while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
227 err = (i40e_status)le32toh(event.desc.cookie_low);
233 if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
234 I40E_VIRTCHNL_OP_VERSION) {
239 pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
240 if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
241 (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
245 free(event.msg_buf, M_DEVBUF);
251 ** ixlv_send_vf_config_msg
253 ** Send VF configuration request admin queue message to the PF. The reply
254 ** is not checked in this function. Returns 0 if the message was
255 ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
258 ixlv_send_vf_config_msg(struct ixlv_sc *sc)
260 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
265 ** ixlv_get_vf_config
267 ** Get VF configuration from PF and populate hw structure. Must be called after
268 ** admin queue is initialized. Busy waits until response is received from PF,
269 ** with maximum timeout. Response from PF is returned in the buffer for further
270 ** processing by the caller.
273 ixlv_get_vf_config(struct ixlv_sc *sc)
275 struct i40e_hw *hw = &sc->hw;
276 device_t dev = sc->dev;
277 struct i40e_arq_event_info event;
282 /* Note this assumes a single VSI */
283 len = sizeof(struct i40e_virtchnl_vf_resource) +
284 sizeof(struct i40e_virtchnl_vsi_resource);
286 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
287 if (!event.msg_buf) {
293 err = i40e_clean_arq_element(hw, &event, NULL);
294 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
295 if (++retries <= IXLV_AQ_MAX_ERR)
296 i40e_msec_delay(100);
297 } else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
298 I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
299 device_printf(dev, "%s: Received a response from PF,"
300 " opcode %d, error %d\n", __func__,
301 le32toh(event.desc.cookie_high),
302 le32toh(event.desc.cookie_low));
306 err = (i40e_status)le32toh(event.desc.cookie_low);
308 device_printf(dev, "%s: Error returned from PF,"
309 " opcode %d, error %d\n", __func__,
310 le32toh(event.desc.cookie_high),
311 le32toh(event.desc.cookie_low));
318 if (retries > IXLV_AQ_MAX_ERR) {
319 INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
326 memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
327 i40e_vf_parse_hw_config(hw, sc->vf_res);
330 free(event.msg_buf, M_DEVBUF);
336 ** ixlv_configure_queues
338 ** Request that the PF set up our queues.
341 ixlv_configure_queues(struct ixlv_sc *sc)
343 device_t dev = sc->dev;
344 struct ixl_vsi *vsi = &sc->vsi;
345 struct ixl_queue *que = vsi->queues;
350 struct i40e_virtchnl_vsi_queue_config_info *vqci;
351 struct i40e_virtchnl_queue_pair_info *vqpi;
354 if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
355 /* bail because we already have a command pending */
357 device_printf(dev, "%s: command %d pending\n",
358 __func__, sc->current_op);
363 pairs = vsi->num_queues;
364 sc->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
365 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
366 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
367 vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
369 device_printf(dev, "%s: unable to allocate memory\n", __func__);
372 vqci->vsi_id = sc->vsi_res->vsi_id;
373 vqci->num_queue_pairs = pairs;
375 /* Size check is not needed here - HW max is 16 queue pairs, and we
376 * can fit info for 31 of them into the AQ buffer before it overflows.
378 for (int i = 0; i < pairs; i++, que++) {
381 vqpi->txq.vsi_id = vqci->vsi_id;
382 vqpi->txq.queue_id = i;
383 vqpi->txq.ring_len = que->num_desc;
384 vqpi->txq.dma_ring_addr = txr->dma.pa;
385 /* Enable Head writeback */
386 vqpi->txq.headwb_enabled = 1;
387 vqpi->txq.dma_headwb_addr = txr->dma.pa +
388 (que->num_desc * sizeof(struct i40e_tx_desc));
390 vqpi->rxq.vsi_id = vqci->vsi_id;
391 vqpi->rxq.queue_id = i;
392 vqpi->rxq.ring_len = que->num_desc;
393 vqpi->rxq.dma_ring_addr = rxr->dma.pa;
394 vqpi->rxq.max_pkt_size = vsi->max_frame_size;
395 vqpi->rxq.databuffer_size = rxr->mbuf_sz;
399 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
401 free(vqci, M_DEVBUF);
402 sc->aq_pending |= IXLV_FLAG_AQ_CONFIGURE_QUEUES;
403 sc->aq_required &= ~IXLV_FLAG_AQ_CONFIGURE_QUEUES;
407 ** ixlv_enable_queues
409 ** Request that the PF enable all of our queues.
412 ixlv_enable_queues(struct ixlv_sc *sc)
414 struct i40e_virtchnl_queue_select vqs;
416 if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
417 /* we already have a command pending */
419 device_printf(sc->dev, "%s: command %d pending\n",
420 __func__, sc->current_op);
424 sc->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
425 vqs.vsi_id = sc->vsi_res->vsi_id;
426 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
427 vqs.rx_queues = vqs.tx_queues;
428 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
429 (u8 *)&vqs, sizeof(vqs));
430 sc->aq_pending |= IXLV_FLAG_AQ_ENABLE_QUEUES;
431 sc->aq_required &= ~IXLV_FLAG_AQ_ENABLE_QUEUES;
435 ** ixlv_disable_queues
437 ** Request that the PF disable all of our queues.
440 ixlv_disable_queues(struct ixlv_sc *sc)
442 struct i40e_virtchnl_queue_select vqs;
444 if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
445 /* we already have a command pending */
447 device_printf(sc->dev, "%s: command %d pending\n",
448 __func__, sc->current_op);
452 sc->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
453 vqs.vsi_id = sc->vsi_res->vsi_id;
454 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
455 vqs.rx_queues = vqs.tx_queues;
456 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
457 (u8 *)&vqs, sizeof(vqs));
458 sc->aq_pending |= IXLV_FLAG_AQ_DISABLE_QUEUES;
459 sc->aq_required &= ~IXLV_FLAG_AQ_DISABLE_QUEUES;
465 ** Request that the PF map queues to interrupt vectors. Misc causes, including
466 ** admin queue, are always mapped to vector 0.
469 ixlv_map_queues(struct ixlv_sc *sc)
471 struct i40e_virtchnl_irq_map_info *vm;
473 struct ixl_vsi *vsi = &sc->vsi;
474 struct ixl_queue *que = vsi->queues;
476 if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
477 /* we already have a command pending */
479 device_printf(sc->dev, "%s: command %d pending\n",
480 __func__, sc->current_op);
484 sc->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
486 /* How many queue vectors, adminq uses one */
489 len = sizeof(struct i40e_virtchnl_irq_map_info) +
490 (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
491 vm = malloc(len, M_DEVBUF, M_NOWAIT);
493 printf("%s: unable to allocate memory\n", __func__);
497 vm->num_vectors = sc->msix;
498 /* Queue vectors first */
499 for (i = 0; i < q; i++, que++) {
500 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
501 vm->vecmap[i].vector_id = i + 1; /* first is adminq */
502 vm->vecmap[i].txq_map = (1 << que->me);
503 vm->vecmap[i].rxq_map = (1 << que->me);
506 /* Misc vector last - this is only for AdminQ messages */
507 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
508 vm->vecmap[i].vector_id = 0;
509 vm->vecmap[i].txq_map = 0;
510 vm->vecmap[i].rxq_map = 0;
512 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
515 sc->aq_pending |= IXLV_FLAG_AQ_MAP_VECTORS;
516 sc->aq_required &= ~IXLV_FLAG_AQ_MAP_VECTORS;
520 ** Scan the Filter List looking for vlans that need
521 ** to be added, then create the data to hand to the AQ
525 ixlv_add_vlans(struct ixlv_sc *sc)
527 struct i40e_virtchnl_vlan_filter_list *v;
528 struct ixlv_vlan_filter *f, *ftmp;
529 device_t dev = sc->dev;
530 int len, i = 0, cnt = 0;
532 if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
535 sc->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
537 /* Get count of VLAN filters to add */
538 SLIST_FOREACH(f, sc->vlan_filters, next) {
539 if (f->flags & IXL_FILTER_ADD)
543 if (!cnt) { /* no work... */
544 sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
545 sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
549 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
552 if (len > IXL_AQ_BUF_SZ) {
553 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
558 v = malloc(len, M_DEVBUF, M_NOWAIT);
560 device_printf(dev, "%s: unable to allocate memory\n",
565 v->vsi_id = sc->vsi_res->vsi_id;
566 v->num_elements = cnt;
568 /* Scan the filter array */
569 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
570 if (f->flags & IXL_FILTER_ADD) {
571 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
572 f->flags = IXL_FILTER_USED;
578 if (i == 0) { /* Should not happen... */
579 device_printf(dev, "%s: i == 0?\n", __func__);
583 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
586 sc->aq_pending |= IXLV_FLAG_AQ_ADD_VLAN_FILTER;
587 sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER;
591 ** Scan the Filter Table looking for vlans that need
592 ** to be removed, then create the data to hand to the AQ
596 ixlv_del_vlans(struct ixlv_sc *sc)
598 device_t dev = sc->dev;
599 struct i40e_virtchnl_vlan_filter_list *v;
600 struct ixlv_vlan_filter *f, *ftmp;
601 int len, i = 0, cnt = 0;
603 if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
606 sc->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
608 /* Get count of VLAN filters to delete */
609 SLIST_FOREACH(f, sc->vlan_filters, next) {
610 if (f->flags & IXL_FILTER_DEL)
614 if (!cnt) { /* no work... */
615 sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
616 sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
620 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
623 if (len > IXL_AQ_BUF_SZ) {
624 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
629 v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
631 device_printf(dev, "%s: unable to allocate memory\n",
636 v->vsi_id = sc->vsi_res->vsi_id;
637 v->num_elements = cnt;
639 /* Scan the filter array */
640 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
641 if (f->flags & IXL_FILTER_DEL) {
642 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
644 SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
650 if (i == 0) { /* Should not happen... */
651 device_printf(dev, "%s: i == 0?\n", __func__);
655 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
658 sc->aq_pending |= IXLV_FLAG_AQ_DEL_VLAN_FILTER;
659 sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER;
664 ** This routine takes additions to the vsi filter
665 ** table and creates an Admin Queue call to create
666 ** the filters in the hardware.
669 ixlv_add_ether_filters(struct ixlv_sc *sc)
671 struct i40e_virtchnl_ether_addr_list *a;
672 struct ixlv_mac_filter *f;
673 device_t dev = sc->dev;
674 int len, j = 0, cnt = 0;
676 if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
679 sc->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
681 /* Get count of MAC addresses to add */
682 SLIST_FOREACH(f, sc->mac_filters, next) {
683 if (f->flags & IXL_FILTER_ADD)
686 if (cnt == 0) { /* Should not happen... */
687 DDPRINTF(dev, "cnt == 0, exiting...");
688 sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
689 sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
690 wakeup(&sc->add_ether_done);
694 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
695 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
697 a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
699 device_printf(dev, "%s: Failed to get memory for "
700 "virtchnl_ether_addr_list\n", __func__);
703 a->vsi_id = sc->vsi.id;
704 a->num_elements = cnt;
706 /* Scan the filter array */
707 SLIST_FOREACH(f, sc->mac_filters, next) {
708 if (f->flags & IXL_FILTER_ADD) {
709 bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
710 f->flags &= ~IXL_FILTER_ADD;
713 DDPRINTF(dev, "ADD: " MAC_FORMAT,
714 MAC_FORMAT_ARGS(f->macaddr));
719 DDPRINTF(dev, "len %d, j %d, cnt %d",
722 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
725 sc->aq_pending |= IXLV_FLAG_AQ_ADD_MAC_FILTER;
726 sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER;
731 ** This routine takes filters flagged for deletion in the
732 ** sc MAC filter list and creates an Admin Queue call
733 ** to delete those filters in the hardware.
736 ixlv_del_ether_filters(struct ixlv_sc *sc)
738 struct i40e_virtchnl_ether_addr_list *d;
739 device_t dev = sc->dev;
740 struct ixlv_mac_filter *f, *f_temp;
741 int len, j = 0, cnt = 0;
743 if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
746 sc->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
748 /* Get count of MAC addresses to delete */
749 SLIST_FOREACH(f, sc->mac_filters, next) {
750 if (f->flags & IXL_FILTER_DEL)
754 DDPRINTF(dev, "cnt == 0, exiting...");
755 sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
756 sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
757 wakeup(&sc->del_ether_done);
761 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
762 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
764 d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
766 device_printf(dev, "%s: Failed to get memory for "
767 "virtchnl_ether_addr_list\n", __func__);
770 d->vsi_id = sc->vsi.id;
771 d->num_elements = cnt;
773 /* Scan the filter array */
774 SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
775 if (f->flags & IXL_FILTER_DEL) {
776 bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
777 DDPRINTF(dev, "DEL: " MAC_FORMAT,
778 MAC_FORMAT_ARGS(f->macaddr));
780 SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
787 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
790 sc->aq_pending |= IXLV_FLAG_AQ_DEL_MAC_FILTER;
791 sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER;
796 ** ixlv_request_reset
797 ** Request that the PF reset this VF. No response is expected.
800 ixlv_request_reset(struct ixlv_sc *sc)
803 ** Set the reset status to "in progress" before
804 ** the request, this avoids any possibility of
805 ** a mistaken early detection of completion.
807 wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
808 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
809 sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
813 ** ixlv_request_stats
814 ** Request the statistics for this VF's VSI from PF.
817 ixlv_request_stats(struct ixlv_sc *sc)
819 struct i40e_virtchnl_queue_select vqs;
822 if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN)
825 sc->current_op = I40E_VIRTCHNL_OP_GET_STATS;
826 vqs.vsi_id = sc->vsi_res->vsi_id;
827 error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
828 (u8 *)&vqs, sizeof(vqs));
829 /* Low priority, ok if it fails */
831 sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
835 ** Updates driver's stats counters with VSI stats returned from PF.
838 ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
840 struct ifnet *ifp = sc->vsi.ifp;
842 ifp->if_ipackets = es->rx_unicast +
845 ifp->if_opackets = es->tx_unicast +
848 ifp->if_ibytes = es->rx_bytes;
849 ifp->if_obytes = es->tx_bytes;
850 ifp->if_imcasts = es->rx_multicast;
851 ifp->if_omcasts = es->tx_multicast;
853 ifp->if_oerrors = es->tx_errors;
854 ifp->if_iqdrops = es->rx_discards;
855 ifp->if_noproto = es->rx_unknown_protocol;
857 sc->vsi.eth_stats = *es;
861 ** ixlv_vc_completion
863 ** Asynchronous completion function for admin queue messages. Rather than busy
864 ** wait, we fire off our requests and assume that no errors will be returned.
865 ** This function handles the reply messages.
868 ixlv_vc_completion(struct ixlv_sc *sc,
869 enum i40e_virtchnl_ops v_opcode,
870 i40e_status v_retval, u8 *msg, u16 msglen)
872 device_t dev = sc->dev;
873 struct ixl_vsi *vsi = &sc->vsi;
875 if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
876 struct i40e_virtchnl_pf_event *vpe =
877 (struct i40e_virtchnl_pf_event *)msg;
879 switch (vpe->event) {
880 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
882 vpe->event_data.link_event.link_status;
884 vpe->event_data.link_event.link_speed;
886 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
887 device_printf(dev, "PF initiated reset!\n");
888 sc->init_state = IXLV_RESET_PENDING;
892 device_printf(dev, "%s: Unknown event %d from AQ\n",
893 __func__, vpe->event);
900 if (v_opcode != sc->current_op
901 && sc->current_op != I40E_VIRTCHNL_OP_GET_STATS) {
902 device_printf(dev, "%s: Pending op is %d, received %d.\n",
903 __func__, sc->current_op, v_opcode);
904 sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
908 /* Catch-all error response */
911 "%s: AQ returned error %d to our request %d!\n",
912 __func__, v_retval, v_opcode);
916 if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
917 DDPRINTF(dev, "opcode %d", v_opcode);
921 case I40E_VIRTCHNL_OP_GET_STATS:
922 ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
924 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
925 sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_MAC_FILTER);
927 device_printf(dev, "WARNING: Error adding VF mac filter!\n");
928 device_printf(dev, "WARNING: Device may not receive traffic!\n");
931 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
932 sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_MAC_FILTER);
934 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
935 sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_PROMISC);
937 case I40E_VIRTCHNL_OP_ADD_VLAN:
938 sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_VLAN_FILTER);
940 case I40E_VIRTCHNL_OP_DEL_VLAN:
941 sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_VLAN_FILTER);
943 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
944 sc->aq_pending &= ~(IXLV_FLAG_AQ_ENABLE_QUEUES);
946 /* Turn on all interrupts */
947 ixlv_enable_intr(vsi);
948 /* And inform the stack we're ready */
949 vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
950 vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
953 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
954 sc->aq_pending &= ~(IXLV_FLAG_AQ_DISABLE_QUEUES);
956 /* Turn off all interrupts */
957 ixlv_disable_intr(vsi);
958 /* Tell the stack that the interface is no longer active */
959 vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
962 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
963 sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_QUEUES);
965 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
966 sc->aq_pending &= ~(IXLV_FLAG_AQ_MAP_VECTORS);
970 "%s: Received unexpected message %d from PF.\n",
974 sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN;