1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** Virtual Channel support
37 ** These are support functions to communication
38 ** between the VF and PF drivers.
43 #include "i40e_prototype.h"
46 /* busy wait delay in msec */
47 #define IXLV_BUSY_WAIT_DELAY 10
48 #define IXLV_BUSY_WAIT_COUNT 50
50 static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
51 enum i40e_status_code);
52 static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
53 static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
54 static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
58 ** Validate VF messages
60 static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
63 bool err_msg_format = false;
66 /* Validate message length. */
68 case I40E_VIRTCHNL_OP_VERSION:
69 valid_len = sizeof(struct i40e_virtchnl_version_info);
71 case I40E_VIRTCHNL_OP_RESET_VF:
72 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
75 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
76 valid_len = sizeof(struct i40e_virtchnl_txq_info);
78 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
79 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
81 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
82 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
83 if (msglen >= valid_len) {
84 struct i40e_virtchnl_vsi_queue_config_info *vqc =
85 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
86 valid_len += (vqc->num_queue_pairs *
88 i40e_virtchnl_queue_pair_info));
89 if (vqc->num_queue_pairs == 0)
90 err_msg_format = true;
93 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
94 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
95 if (msglen >= valid_len) {
96 struct i40e_virtchnl_irq_map_info *vimi =
97 (struct i40e_virtchnl_irq_map_info *)msg;
98 valid_len += (vimi->num_vectors *
99 sizeof(struct i40e_virtchnl_vector_map));
100 if (vimi->num_vectors == 0)
101 err_msg_format = true;
104 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
105 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
106 valid_len = sizeof(struct i40e_virtchnl_queue_select);
108 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
109 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
110 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
111 if (msglen >= valid_len) {
112 struct i40e_virtchnl_ether_addr_list *veal =
113 (struct i40e_virtchnl_ether_addr_list *)msg;
114 valid_len += veal->num_elements *
115 sizeof(struct i40e_virtchnl_ether_addr);
116 if (veal->num_elements == 0)
117 err_msg_format = true;
120 case I40E_VIRTCHNL_OP_ADD_VLAN:
121 case I40E_VIRTCHNL_OP_DEL_VLAN:
122 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
123 if (msglen >= valid_len) {
124 struct i40e_virtchnl_vlan_filter_list *vfl =
125 (struct i40e_virtchnl_vlan_filter_list *)msg;
126 valid_len += vfl->num_elements * sizeof(u16);
127 if (vfl->num_elements == 0)
128 err_msg_format = true;
131 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
132 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
134 case I40E_VIRTCHNL_OP_GET_STATS:
135 valid_len = sizeof(struct i40e_virtchnl_queue_select);
137 /* These are always errors coming from the VF. */
138 case I40E_VIRTCHNL_OP_EVENT:
139 case I40E_VIRTCHNL_OP_UNKNOWN:
144 /* few more checks */
145 if ((valid_len != msglen) || (err_msg_format))
155 ** Send message to PF and print status if failure.
158 ixlv_send_pf_msg(struct ixlv_sc *sc,
159 enum i40e_virtchnl_ops op, u8 *msg, u16 len)
161 struct i40e_hw *hw = &sc->hw;
162 device_t dev = sc->dev;
167 ** Pre-validating messages to the PF
170 val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
172 device_printf(dev, "Error validating msg to PF for op %d,"
173 " msglen %d: error %d\n", op, len, val_err);
176 err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
178 device_printf(dev, "Unable to send opcode %d to PF, "
179 "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
187 ** Send API version admin queue message to the PF. The reply is not checked
188 ** in this function. Returns 0 if the message was successfully
189 ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
192 ixlv_send_api_ver(struct ixlv_sc *sc)
194 struct i40e_virtchnl_version_info vvi;
196 vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
197 vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
199 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
200 (u8 *)&vvi, sizeof(vvi));
204 ** ixlv_verify_api_ver
206 ** Compare API versions with the PF. Must be called after admin queue is
207 ** initialized. Returns 0 if API versions match, EIO if
208 ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
211 ixlv_verify_api_ver(struct ixlv_sc *sc)
213 struct i40e_virtchnl_version_info *pf_vvi;
214 struct i40e_hw *hw = &sc->hw;
215 struct i40e_arq_event_info event;
219 event.buf_len = IXL_AQ_BUFSZ;
220 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
221 if (!event.msg_buf) {
227 if (++retries > IXLV_AQ_MAX_ERR)
230 /* NOTE: initial delay is necessary */
231 i40e_msec_delay(100);
232 err = i40e_clean_arq_element(hw, &event, NULL);
233 } while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
237 err = (i40e_status)le32toh(event.desc.cookie_low);
243 if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
244 I40E_VIRTCHNL_OP_VERSION) {
245 DDPRINTF(sc->dev, "Received unexpected op response: %d\n",
246 le32toh(event.desc.cookie_high));
251 pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
252 if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
253 (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
257 free(event.msg_buf, M_DEVBUF);
263 ** ixlv_send_vf_config_msg
265 ** Send VF configuration request admin queue message to the PF. The reply
266 ** is not checked in this function. Returns 0 if the message was
267 ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
270 ixlv_send_vf_config_msg(struct ixlv_sc *sc)
272 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
277 ** ixlv_get_vf_config
279 ** Get VF configuration from PF and populate hw structure. Must be called after
280 ** admin queue is initialized. Busy waits until response is received from PF,
281 ** with maximum timeout. Response from PF is returned in the buffer for further
282 ** processing by the caller.
285 ixlv_get_vf_config(struct ixlv_sc *sc)
287 struct i40e_hw *hw = &sc->hw;
288 device_t dev = sc->dev;
289 struct i40e_arq_event_info event;
294 /* Note this assumes a single VSI */
295 len = sizeof(struct i40e_virtchnl_vf_resource) +
296 sizeof(struct i40e_virtchnl_vsi_resource);
298 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
299 if (!event.msg_buf) {
305 err = i40e_clean_arq_element(hw, &event, NULL);
306 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
307 if (++retries <= IXLV_AQ_MAX_ERR)
309 } else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
310 I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
311 DDPRINTF(dev, "Received a response from PF,"
312 " opcode %d, error %d",
313 le32toh(event.desc.cookie_high),
314 le32toh(event.desc.cookie_low));
318 err = (i40e_status)le32toh(event.desc.cookie_low);
320 device_printf(dev, "%s: Error returned from PF,"
321 " opcode %d, error %d\n", __func__,
322 le32toh(event.desc.cookie_high),
323 le32toh(event.desc.cookie_low));
327 /* We retrieved the config message, with no errors */
331 if (retries > IXLV_AQ_MAX_ERR) {
332 INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
339 memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
340 i40e_vf_parse_hw_config(hw, sc->vf_res);
343 free(event.msg_buf, M_DEVBUF);
349 ** ixlv_configure_queues
351 ** Request that the PF set up our queues.
354 ixlv_configure_queues(struct ixlv_sc *sc)
356 device_t dev = sc->dev;
357 struct ixl_vsi *vsi = &sc->vsi;
358 struct ixl_queue *que = vsi->queues;
363 struct i40e_virtchnl_vsi_queue_config_info *vqci;
364 struct i40e_virtchnl_queue_pair_info *vqpi;
366 pairs = vsi->num_queues;
367 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
368 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
369 vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
371 device_printf(dev, "%s: unable to allocate memory\n", __func__);
372 ixl_vc_schedule_retry(&sc->vc_mgr);
375 vqci->vsi_id = sc->vsi_res->vsi_id;
376 vqci->num_queue_pairs = pairs;
378 /* Size check is not needed here - HW max is 16 queue pairs, and we
379 * can fit info for 31 of them into the AQ buffer before it overflows.
381 for (int i = 0; i < pairs; i++, que++, vqpi++) {
384 vqpi->txq.vsi_id = vqci->vsi_id;
385 vqpi->txq.queue_id = i;
386 vqpi->txq.ring_len = que->num_desc;
387 vqpi->txq.dma_ring_addr = txr->dma.pa;
388 /* Enable Head writeback */
389 vqpi->txq.headwb_enabled = 1;
390 vqpi->txq.dma_headwb_addr = txr->dma.pa +
391 (que->num_desc * sizeof(struct i40e_tx_desc));
393 vqpi->rxq.vsi_id = vqci->vsi_id;
394 vqpi->rxq.queue_id = i;
395 vqpi->rxq.ring_len = que->num_desc;
396 vqpi->rxq.dma_ring_addr = rxr->dma.pa;
397 vqpi->rxq.max_pkt_size = vsi->max_frame_size;
398 vqpi->rxq.databuffer_size = rxr->mbuf_sz;
399 vqpi->rxq.splithdr_enabled = 0;
402 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
404 free(vqci, M_DEVBUF);
408 ** ixlv_enable_queues
410 ** Request that the PF enable all of our queues.
413 ixlv_enable_queues(struct ixlv_sc *sc)
415 struct i40e_virtchnl_queue_select vqs;
417 vqs.vsi_id = sc->vsi_res->vsi_id;
418 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
419 vqs.rx_queues = vqs.tx_queues;
420 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
421 (u8 *)&vqs, sizeof(vqs));
425 ** ixlv_disable_queues
427 ** Request that the PF disable all of our queues.
430 ixlv_disable_queues(struct ixlv_sc *sc)
432 struct i40e_virtchnl_queue_select vqs;
434 vqs.vsi_id = sc->vsi_res->vsi_id;
435 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
436 vqs.rx_queues = vqs.tx_queues;
437 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
438 (u8 *)&vqs, sizeof(vqs));
444 ** Request that the PF map queues to interrupt vectors. Misc causes, including
445 ** admin queue, are always mapped to vector 0.
448 ixlv_map_queues(struct ixlv_sc *sc)
450 struct i40e_virtchnl_irq_map_info *vm;
452 struct ixl_vsi *vsi = &sc->vsi;
453 struct ixl_queue *que = vsi->queues;
455 /* How many queue vectors, adminq uses one */
458 len = sizeof(struct i40e_virtchnl_irq_map_info) +
459 (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
460 vm = malloc(len, M_DEVBUF, M_NOWAIT);
462 printf("%s: unable to allocate memory\n", __func__);
463 ixl_vc_schedule_retry(&sc->vc_mgr);
467 vm->num_vectors = sc->msix;
468 /* Queue vectors first */
469 for (i = 0; i < q; i++, que++) {
470 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
471 vm->vecmap[i].vector_id = i + 1; /* first is adminq */
472 vm->vecmap[i].txq_map = (1 << que->me);
473 vm->vecmap[i].rxq_map = (1 << que->me);
474 vm->vecmap[i].rxitr_idx = 0;
475 vm->vecmap[i].txitr_idx = 0;
478 /* Misc vector last - this is only for AdminQ messages */
479 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
480 vm->vecmap[i].vector_id = 0;
481 vm->vecmap[i].txq_map = 0;
482 vm->vecmap[i].rxq_map = 0;
483 vm->vecmap[i].rxitr_idx = 0;
484 vm->vecmap[i].txitr_idx = 0;
486 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
492 ** Scan the Filter List looking for vlans that need
493 ** to be added, then create the data to hand to the AQ
497 ixlv_add_vlans(struct ixlv_sc *sc)
499 struct i40e_virtchnl_vlan_filter_list *v;
500 struct ixlv_vlan_filter *f, *ftmp;
501 device_t dev = sc->dev;
502 int len, i = 0, cnt = 0;
504 /* Get count of VLAN filters to add */
505 SLIST_FOREACH(f, sc->vlan_filters, next) {
506 if (f->flags & IXL_FILTER_ADD)
510 if (!cnt) { /* no work... */
511 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
516 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
519 if (len > IXL_AQ_BUF_SZ) {
520 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
522 ixl_vc_schedule_retry(&sc->vc_mgr);
526 v = malloc(len, M_DEVBUF, M_NOWAIT);
528 device_printf(dev, "%s: unable to allocate memory\n",
530 ixl_vc_schedule_retry(&sc->vc_mgr);
534 v->vsi_id = sc->vsi_res->vsi_id;
535 v->num_elements = cnt;
537 /* Scan the filter array */
538 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
539 if (f->flags & IXL_FILTER_ADD) {
540 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
541 f->flags = IXL_FILTER_USED;
547 // ERJ: Should this be taken out?
548 if (i == 0) { /* Should not happen... */
549 device_printf(dev, "%s: i == 0?\n", __func__);
550 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
555 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
561 ** Scan the Filter Table looking for vlans that need
562 ** to be removed, then create the data to hand to the AQ
566 ixlv_del_vlans(struct ixlv_sc *sc)
568 device_t dev = sc->dev;
569 struct i40e_virtchnl_vlan_filter_list *v;
570 struct ixlv_vlan_filter *f, *ftmp;
571 int len, i = 0, cnt = 0;
573 /* Get count of VLAN filters to delete */
574 SLIST_FOREACH(f, sc->vlan_filters, next) {
575 if (f->flags & IXL_FILTER_DEL)
579 if (!cnt) { /* no work... */
580 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
585 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
588 if (len > IXL_AQ_BUF_SZ) {
589 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
591 ixl_vc_schedule_retry(&sc->vc_mgr);
595 v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
597 device_printf(dev, "%s: unable to allocate memory\n",
599 ixl_vc_schedule_retry(&sc->vc_mgr);
603 v->vsi_id = sc->vsi_res->vsi_id;
604 v->num_elements = cnt;
606 /* Scan the filter array */
607 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
608 if (f->flags & IXL_FILTER_DEL) {
609 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
611 SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
617 // ERJ: Take this out?
618 if (i == 0) { /* Should not happen... */
619 device_printf(dev, "%s: i == 0?\n", __func__);
620 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
625 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
632 ** This routine takes additions to the vsi filter
633 ** table and creates an Admin Queue call to create
634 ** the filters in the hardware.
637 ixlv_add_ether_filters(struct ixlv_sc *sc)
639 struct i40e_virtchnl_ether_addr_list *a;
640 struct ixlv_mac_filter *f;
641 device_t dev = sc->dev;
642 int len, j = 0, cnt = 0;
644 /* Get count of MAC addresses to add */
645 SLIST_FOREACH(f, sc->mac_filters, next) {
646 if (f->flags & IXL_FILTER_ADD)
649 if (cnt == 0) { /* Should not happen... */
650 DDPRINTF(dev, "cnt == 0, exiting...");
651 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
656 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
657 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
659 a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
661 device_printf(dev, "%s: Failed to get memory for "
662 "virtchnl_ether_addr_list\n", __func__);
663 ixl_vc_schedule_retry(&sc->vc_mgr);
666 a->vsi_id = sc->vsi.id;
667 a->num_elements = cnt;
669 /* Scan the filter array */
670 SLIST_FOREACH(f, sc->mac_filters, next) {
671 if (f->flags & IXL_FILTER_ADD) {
672 bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
673 f->flags &= ~IXL_FILTER_ADD;
676 DDPRINTF(dev, "ADD: " MAC_FORMAT,
677 MAC_FORMAT_ARGS(f->macaddr));
682 DDPRINTF(dev, "len %d, j %d, cnt %d",
685 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
692 ** This routine takes filters flagged for deletion in the
693 ** sc MAC filter list and creates an Admin Queue call
694 ** to delete those filters in the hardware.
697 ixlv_del_ether_filters(struct ixlv_sc *sc)
699 struct i40e_virtchnl_ether_addr_list *d;
700 device_t dev = sc->dev;
701 struct ixlv_mac_filter *f, *f_temp;
702 int len, j = 0, cnt = 0;
704 /* Get count of MAC addresses to delete */
705 SLIST_FOREACH(f, sc->mac_filters, next) {
706 if (f->flags & IXL_FILTER_DEL)
710 DDPRINTF(dev, "cnt == 0, exiting...");
711 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
716 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
717 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
719 d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
721 device_printf(dev, "%s: Failed to get memory for "
722 "virtchnl_ether_addr_list\n", __func__);
723 ixl_vc_schedule_retry(&sc->vc_mgr);
726 d->vsi_id = sc->vsi.id;
727 d->num_elements = cnt;
729 /* Scan the filter array */
730 SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
731 if (f->flags & IXL_FILTER_DEL) {
732 bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
733 DDPRINTF(dev, "DEL: " MAC_FORMAT,
734 MAC_FORMAT_ARGS(f->macaddr));
736 SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
743 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
750 ** ixlv_request_reset
751 ** Request that the PF reset this VF. No response is expected.
754 ixlv_request_reset(struct ixlv_sc *sc)
757 ** Set the reset status to "in progress" before
758 ** the request, this avoids any possibility of
759 ** a mistaken early detection of completion.
761 wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
762 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
766 ** ixlv_request_stats
767 ** Request the statistics for this VF's VSI from PF.
770 ixlv_request_stats(struct ixlv_sc *sc)
772 struct i40e_virtchnl_queue_select vqs;
775 vqs.vsi_id = sc->vsi_res->vsi_id;
776 /* Low priority, we don't need to error check */
777 error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
778 (u8 *)&vqs, sizeof(vqs));
781 device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
786 ** Updates driver's stats counters with VSI stats returned from PF.
789 ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
792 uint64_t tx_discards;
797 tx_discards = es->tx_discards;
798 for (i = 0; i < sc->vsi.num_queues; i++)
799 tx_discards += sc->vsi.queues[i].txr.br->br_drops;
801 /* Update ifnet stats */
802 IXL_SET_IPACKETS(vsi, es->rx_unicast +
805 IXL_SET_OPACKETS(vsi, es->tx_unicast +
808 IXL_SET_IBYTES(vsi, es->rx_bytes);
809 IXL_SET_OBYTES(vsi, es->tx_bytes);
810 IXL_SET_IMCASTS(vsi, es->rx_multicast);
811 IXL_SET_OMCASTS(vsi, es->tx_multicast);
813 IXL_SET_OERRORS(vsi, es->tx_errors);
814 IXL_SET_IQDROPS(vsi, es->rx_discards);
815 IXL_SET_OQDROPS(vsi, tx_discards);
816 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
817 IXL_SET_COLLISIONS(vsi, 0);
819 sc->vsi.eth_stats = *es;
823 ** ixlv_vc_completion
825 ** Asynchronous completion function for admin queue messages. Rather than busy
826 ** wait, we fire off our requests and assume that no errors will be returned.
827 ** This function handles the reply messages.
830 ixlv_vc_completion(struct ixlv_sc *sc,
831 enum i40e_virtchnl_ops v_opcode,
832 i40e_status v_retval, u8 *msg, u16 msglen)
834 device_t dev = sc->dev;
835 struct ixl_vsi *vsi = &sc->vsi;
837 if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
838 struct i40e_virtchnl_pf_event *vpe =
839 (struct i40e_virtchnl_pf_event *)msg;
841 switch (vpe->event) {
842 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
844 device_printf(dev, "Link change: status %d, speed %d\n",
845 vpe->event_data.link_event.link_status,
846 vpe->event_data.link_event.link_speed);
849 vpe->event_data.link_event.link_status;
851 vpe->event_data.link_event.link_speed;
852 ixlv_update_link_status(sc);
854 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
855 device_printf(dev, "PF initiated reset!\n");
856 sc->init_state = IXLV_RESET_PENDING;
860 device_printf(dev, "%s: Unknown event %d from AQ\n",
861 __func__, vpe->event);
868 /* Catch-all error response */
871 "%s: AQ returned error %d to our request %d!\n",
872 __func__, v_retval, v_opcode);
876 if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
877 DDPRINTF(dev, "opcode %d", v_opcode);
881 case I40E_VIRTCHNL_OP_GET_STATS:
882 ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
884 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
885 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
888 device_printf(dev, "WARNING: Error adding VF mac filter!\n");
889 device_printf(dev, "WARNING: Device may not receive traffic!\n");
892 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
893 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
896 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
897 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
900 case I40E_VIRTCHNL_OP_ADD_VLAN:
901 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
904 case I40E_VIRTCHNL_OP_DEL_VLAN:
905 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
908 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
909 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
912 /* Update link status */
913 ixlv_update_link_status(sc);
914 /* Turn on all interrupts */
915 ixlv_enable_intr(vsi);
916 /* And inform the stack we're ready */
917 vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
918 vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
921 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
922 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
925 /* Turn off all interrupts */
926 ixlv_disable_intr(vsi);
927 /* Tell the stack that the interface is no longer active */
928 vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
931 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
932 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
935 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
936 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
941 "%s: Received unexpected message %d from PF.\n",
949 ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
953 case IXLV_FLAG_AQ_MAP_VECTORS:
957 case IXLV_FLAG_AQ_ADD_MAC_FILTER:
958 ixlv_add_ether_filters(sc);
961 case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
965 case IXLV_FLAG_AQ_DEL_MAC_FILTER:
966 ixlv_del_ether_filters(sc);
969 case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
973 case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
974 ixlv_configure_queues(sc);
977 case IXLV_FLAG_AQ_DISABLE_QUEUES:
978 ixlv_disable_queues(sc);
981 case IXLV_FLAG_AQ_ENABLE_QUEUES:
982 ixlv_enable_queues(sc);
988 ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
992 TAILQ_INIT(&mgr->pending);
993 callout_init_mtx(&mgr->callout, &sc->mtx, 0);
997 ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
999 struct ixl_vc_cmd *cmd;
1002 mgr->current = NULL;
1003 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1005 cmd->callback(cmd, cmd->arg, err);
1006 ixl_vc_process_next(mgr);
1010 ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
1011 enum i40e_status_code err)
1013 struct ixl_vc_cmd *cmd;
1016 if (cmd == NULL || cmd->request != request)
1019 callout_stop(&mgr->callout);
1020 ixl_vc_process_completion(mgr, err);
1024 ixl_vc_cmd_timeout(void *arg)
1026 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1028 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1029 ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
1033 ixl_vc_cmd_retry(void *arg)
1035 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1037 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1038 ixl_vc_send_current(mgr);
1042 ixl_vc_send_current(struct ixl_vc_mgr *mgr)
1044 struct ixl_vc_cmd *cmd;
1047 ixl_vc_send_cmd(mgr->sc, cmd->request);
1048 callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
1052 ixl_vc_process_next(struct ixl_vc_mgr *mgr)
1054 struct ixl_vc_cmd *cmd;
1056 if (mgr->current != NULL)
1059 if (TAILQ_EMPTY(&mgr->pending))
1062 cmd = TAILQ_FIRST(&mgr->pending);
1063 TAILQ_REMOVE(&mgr->pending, cmd, next);
1066 ixl_vc_send_current(mgr);
1070 ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
1073 callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
1077 ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
1078 uint32_t req, ixl_vc_callback_t *callback, void *arg)
1080 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1082 if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
1083 if (mgr->current == cmd)
1084 mgr->current = NULL;
1086 TAILQ_REMOVE(&mgr->pending, cmd, next);
1090 cmd->callback = callback;
1092 cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
1093 TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
1095 ixl_vc_process_next(mgr);
1099 ixl_vc_flush(struct ixl_vc_mgr *mgr)
1101 struct ixl_vc_cmd *cmd;
1103 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1104 KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
1105 ("ixlv: pending commands waiting but no command in progress"));
1109 mgr->current = NULL;
1110 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1111 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1114 while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
1115 TAILQ_REMOVE(&mgr->pending, cmd, next);
1116 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1117 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1120 callout_stop(&mgr->callout);