1 /******************************************************************************
3 Copyright (c) 2013-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 ** Virtual Channel support
37 ** These are support functions to communication
38 ** between the VF and PF drivers.
43 #include "i40e_prototype.h"
46 /* busy wait delay in msec */
47 #define IXLV_BUSY_WAIT_DELAY 10
48 #define IXLV_BUSY_WAIT_COUNT 50
50 static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
51 enum i40e_status_code);
52 static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
53 static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
54 static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
58 ** Validate VF messages
60 static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
63 bool err_msg_format = false;
66 /* Validate message length. */
68 case I40E_VIRTCHNL_OP_VERSION:
69 valid_len = sizeof(struct i40e_virtchnl_version_info);
71 case I40E_VIRTCHNL_OP_RESET_VF:
74 case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
75 /* Valid length in api v1.0 is 0, v1.1 is 4 */
78 case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
79 valid_len = sizeof(struct i40e_virtchnl_txq_info);
81 case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
82 valid_len = sizeof(struct i40e_virtchnl_rxq_info);
84 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
85 valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
86 if (msglen >= valid_len) {
87 struct i40e_virtchnl_vsi_queue_config_info *vqc =
88 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
89 valid_len += (vqc->num_queue_pairs *
91 i40e_virtchnl_queue_pair_info));
92 if (vqc->num_queue_pairs == 0)
93 err_msg_format = true;
96 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
97 valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
98 if (msglen >= valid_len) {
99 struct i40e_virtchnl_irq_map_info *vimi =
100 (struct i40e_virtchnl_irq_map_info *)msg;
101 valid_len += (vimi->num_vectors *
102 sizeof(struct i40e_virtchnl_vector_map));
103 if (vimi->num_vectors == 0)
104 err_msg_format = true;
107 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
108 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
109 valid_len = sizeof(struct i40e_virtchnl_queue_select);
111 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
112 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
113 valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
114 if (msglen >= valid_len) {
115 struct i40e_virtchnl_ether_addr_list *veal =
116 (struct i40e_virtchnl_ether_addr_list *)msg;
117 valid_len += veal->num_elements *
118 sizeof(struct i40e_virtchnl_ether_addr);
119 if (veal->num_elements == 0)
120 err_msg_format = true;
123 case I40E_VIRTCHNL_OP_ADD_VLAN:
124 case I40E_VIRTCHNL_OP_DEL_VLAN:
125 valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
126 if (msglen >= valid_len) {
127 struct i40e_virtchnl_vlan_filter_list *vfl =
128 (struct i40e_virtchnl_vlan_filter_list *)msg;
129 valid_len += vfl->num_elements * sizeof(u16);
130 if (vfl->num_elements == 0)
131 err_msg_format = true;
134 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
135 valid_len = sizeof(struct i40e_virtchnl_promisc_info);
137 case I40E_VIRTCHNL_OP_GET_STATS:
138 valid_len = sizeof(struct i40e_virtchnl_queue_select);
140 /* These are always errors coming from the VF. */
141 case I40E_VIRTCHNL_OP_EVENT:
142 case I40E_VIRTCHNL_OP_UNKNOWN:
147 /* few more checks */
148 if ((valid_len != msglen) || (err_msg_format))
158 ** Send message to PF and print status if failure.
161 ixlv_send_pf_msg(struct ixlv_sc *sc,
162 enum i40e_virtchnl_ops op, u8 *msg, u16 len)
164 struct i40e_hw *hw = &sc->hw;
165 device_t dev = sc->dev;
170 ** Pre-validating messages to the PF
173 val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
175 device_printf(dev, "Error validating msg to PF for op %d,"
176 " msglen %d: error %d\n", op, len, val_err);
179 err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
181 device_printf(dev, "Unable to send opcode %d to PF, "
182 "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
190 ** Send API version admin queue message to the PF. The reply is not checked
191 ** in this function. Returns 0 if the message was successfully
192 ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
195 ixlv_send_api_ver(struct ixlv_sc *sc)
197 struct i40e_virtchnl_version_info vvi;
199 vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
200 vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
202 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
203 (u8 *)&vvi, sizeof(vvi));
207 ** ixlv_verify_api_ver
209 ** Compare API versions with the PF. Must be called after admin queue is
210 ** initialized. Returns 0 if API versions match, EIO if
211 ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
214 ixlv_verify_api_ver(struct ixlv_sc *sc)
216 struct i40e_virtchnl_version_info *pf_vvi;
217 struct i40e_hw *hw = &sc->hw;
218 struct i40e_arq_event_info event;
219 device_t dev = sc->dev;
223 event.buf_len = IXL_AQ_BUF_SZ;
224 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
225 if (!event.msg_buf) {
231 if (++retries > IXLV_AQ_MAX_ERR)
234 /* Initial delay here is necessary */
235 i40e_msec_pause(100);
236 err = i40e_clean_arq_element(hw, &event, NULL);
237 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
244 if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
245 I40E_VIRTCHNL_OP_VERSION) {
246 DDPRINTF(dev, "Received unexpected op response: %d\n",
247 le32toh(event.desc.cookie_high));
248 /* Don't stop looking for expected response */
252 err = (i40e_status)le32toh(event.desc.cookie_low);
260 pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
261 if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
262 ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
263 (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) {
264 device_printf(dev, "Critical PF/VF API version mismatch!\n");
267 sc->pf_version = pf_vvi->minor;
269 /* Log PF/VF api versions */
270 device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
271 pf_vvi->major, pf_vvi->minor,
272 I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR);
275 free(event.msg_buf, M_DEVBUF);
281 ** ixlv_send_vf_config_msg
283 ** Send VF configuration request admin queue message to the PF. The reply
284 ** is not checked in this function. Returns 0 if the message was
285 ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
288 ixlv_send_vf_config_msg(struct ixlv_sc *sc)
292 caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
293 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
294 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
296 if (sc->pf_version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
297 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
300 return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
301 (u8 *)&caps, sizeof(caps));
305 ** ixlv_get_vf_config
307 ** Get VF configuration from PF and populate hw structure. Must be called after
308 ** admin queue is initialized. Busy waits until response is received from PF,
309 ** with maximum timeout. Response from PF is returned in the buffer for further
310 ** processing by the caller.
313 ixlv_get_vf_config(struct ixlv_sc *sc)
315 struct i40e_hw *hw = &sc->hw;
316 device_t dev = sc->dev;
317 struct i40e_arq_event_info event;
322 /* Note this assumes a single VSI */
323 len = sizeof(struct i40e_virtchnl_vf_resource) +
324 sizeof(struct i40e_virtchnl_vsi_resource);
326 event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
327 if (!event.msg_buf) {
333 err = i40e_clean_arq_element(hw, &event, NULL);
334 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
335 if (++retries <= IXLV_AQ_MAX_ERR)
337 } else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
338 I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
339 DDPRINTF(dev, "Received a response from PF,"
340 " opcode %d, error %d",
341 le32toh(event.desc.cookie_high),
342 le32toh(event.desc.cookie_low));
346 err = (i40e_status)le32toh(event.desc.cookie_low);
348 device_printf(dev, "%s: Error returned from PF,"
349 " opcode %d, error %d\n", __func__,
350 le32toh(event.desc.cookie_high),
351 le32toh(event.desc.cookie_low));
355 /* We retrieved the config message, with no errors */
359 if (retries > IXLV_AQ_MAX_ERR) {
360 INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
367 memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
368 i40e_vf_parse_hw_config(hw, sc->vf_res);
371 free(event.msg_buf, M_DEVBUF);
377 ** ixlv_configure_queues
379 ** Request that the PF set up our queues.
382 ixlv_configure_queues(struct ixlv_sc *sc)
384 device_t dev = sc->dev;
385 struct ixl_vsi *vsi = &sc->vsi;
386 struct ixl_queue *que = vsi->queues;
391 struct i40e_virtchnl_vsi_queue_config_info *vqci;
392 struct i40e_virtchnl_queue_pair_info *vqpi;
394 pairs = vsi->num_queues;
395 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
396 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
397 vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
399 device_printf(dev, "%s: unable to allocate memory\n", __func__);
400 ixl_vc_schedule_retry(&sc->vc_mgr);
403 vqci->vsi_id = sc->vsi_res->vsi_id;
404 vqci->num_queue_pairs = pairs;
406 /* Size check is not needed here - HW max is 16 queue pairs, and we
407 * can fit info for 31 of them into the AQ buffer before it overflows.
409 for (int i = 0; i < pairs; i++, que++, vqpi++) {
412 vqpi->txq.vsi_id = vqci->vsi_id;
413 vqpi->txq.queue_id = i;
414 vqpi->txq.ring_len = que->num_desc;
415 vqpi->txq.dma_ring_addr = txr->dma.pa;
416 /* Enable Head writeback */
417 vqpi->txq.headwb_enabled = 1;
418 vqpi->txq.dma_headwb_addr = txr->dma.pa +
419 (que->num_desc * sizeof(struct i40e_tx_desc));
421 vqpi->rxq.vsi_id = vqci->vsi_id;
422 vqpi->rxq.queue_id = i;
423 vqpi->rxq.ring_len = que->num_desc;
424 vqpi->rxq.dma_ring_addr = rxr->dma.pa;
425 vqpi->rxq.max_pkt_size = vsi->max_frame_size;
426 vqpi->rxq.databuffer_size = rxr->mbuf_sz;
427 vqpi->rxq.splithdr_enabled = 0;
430 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
432 free(vqci, M_DEVBUF);
436 ** ixlv_enable_queues
438 ** Request that the PF enable all of our queues.
441 ixlv_enable_queues(struct ixlv_sc *sc)
443 struct i40e_virtchnl_queue_select vqs;
445 vqs.vsi_id = sc->vsi_res->vsi_id;
446 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
447 vqs.rx_queues = vqs.tx_queues;
448 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
449 (u8 *)&vqs, sizeof(vqs));
453 ** ixlv_disable_queues
455 ** Request that the PF disable all of our queues.
458 ixlv_disable_queues(struct ixlv_sc *sc)
460 struct i40e_virtchnl_queue_select vqs;
462 vqs.vsi_id = sc->vsi_res->vsi_id;
463 vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
464 vqs.rx_queues = vqs.tx_queues;
465 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
466 (u8 *)&vqs, sizeof(vqs));
472 ** Request that the PF map queues to interrupt vectors. Misc causes, including
473 ** admin queue, are always mapped to vector 0.
476 ixlv_map_queues(struct ixlv_sc *sc)
478 struct i40e_virtchnl_irq_map_info *vm;
480 struct ixl_vsi *vsi = &sc->vsi;
481 struct ixl_queue *que = vsi->queues;
483 /* How many queue vectors, adminq uses one */
486 len = sizeof(struct i40e_virtchnl_irq_map_info) +
487 (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
488 vm = malloc(len, M_DEVBUF, M_NOWAIT);
490 printf("%s: unable to allocate memory\n", __func__);
491 ixl_vc_schedule_retry(&sc->vc_mgr);
495 vm->num_vectors = sc->msix;
496 /* Queue vectors first */
497 for (i = 0; i < q; i++, que++) {
498 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
499 vm->vecmap[i].vector_id = i + 1; /* first is adminq */
500 vm->vecmap[i].txq_map = (1 << que->me);
501 vm->vecmap[i].rxq_map = (1 << que->me);
502 vm->vecmap[i].rxitr_idx = 0;
503 vm->vecmap[i].txitr_idx = 1;
506 /* Misc vector last - this is only for AdminQ messages */
507 vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
508 vm->vecmap[i].vector_id = 0;
509 vm->vecmap[i].txq_map = 0;
510 vm->vecmap[i].rxq_map = 0;
511 vm->vecmap[i].rxitr_idx = 0;
512 vm->vecmap[i].txitr_idx = 0;
514 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
520 ** Scan the Filter List looking for vlans that need
521 ** to be added, then create the data to hand to the AQ
525 ixlv_add_vlans(struct ixlv_sc *sc)
527 struct i40e_virtchnl_vlan_filter_list *v;
528 struct ixlv_vlan_filter *f, *ftmp;
529 device_t dev = sc->dev;
530 int len, i = 0, cnt = 0;
532 /* Get count of VLAN filters to add */
533 SLIST_FOREACH(f, sc->vlan_filters, next) {
534 if (f->flags & IXL_FILTER_ADD)
538 if (!cnt) { /* no work... */
539 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
544 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
547 if (len > IXL_AQ_BUF_SZ) {
548 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
550 ixl_vc_schedule_retry(&sc->vc_mgr);
554 v = malloc(len, M_DEVBUF, M_NOWAIT);
556 device_printf(dev, "%s: unable to allocate memory\n",
558 ixl_vc_schedule_retry(&sc->vc_mgr);
562 v->vsi_id = sc->vsi_res->vsi_id;
563 v->num_elements = cnt;
565 /* Scan the filter array */
566 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
567 if (f->flags & IXL_FILTER_ADD) {
568 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
569 f->flags = IXL_FILTER_USED;
576 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
582 ** Scan the Filter Table looking for vlans that need
583 ** to be removed, then create the data to hand to the AQ
587 ixlv_del_vlans(struct ixlv_sc *sc)
589 device_t dev = sc->dev;
590 struct i40e_virtchnl_vlan_filter_list *v;
591 struct ixlv_vlan_filter *f, *ftmp;
592 int len, i = 0, cnt = 0;
594 /* Get count of VLAN filters to delete */
595 SLIST_FOREACH(f, sc->vlan_filters, next) {
596 if (f->flags & IXL_FILTER_DEL)
600 if (!cnt) { /* no work... */
601 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
606 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
609 if (len > IXL_AQ_BUF_SZ) {
610 device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
612 ixl_vc_schedule_retry(&sc->vc_mgr);
616 v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
618 device_printf(dev, "%s: unable to allocate memory\n",
620 ixl_vc_schedule_retry(&sc->vc_mgr);
624 v->vsi_id = sc->vsi_res->vsi_id;
625 v->num_elements = cnt;
627 /* Scan the filter array */
628 SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
629 if (f->flags & IXL_FILTER_DEL) {
630 bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
632 SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
639 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
646 ** This routine takes additions to the vsi filter
647 ** table and creates an Admin Queue call to create
648 ** the filters in the hardware.
651 ixlv_add_ether_filters(struct ixlv_sc *sc)
653 struct i40e_virtchnl_ether_addr_list *a;
654 struct ixlv_mac_filter *f;
655 device_t dev = sc->dev;
656 int len, j = 0, cnt = 0;
658 /* Get count of MAC addresses to add */
659 SLIST_FOREACH(f, sc->mac_filters, next) {
660 if (f->flags & IXL_FILTER_ADD)
663 if (cnt == 0) { /* Should not happen... */
664 DDPRINTF(dev, "cnt == 0, exiting...");
665 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
670 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
671 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
673 a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
675 device_printf(dev, "%s: Failed to get memory for "
676 "virtchnl_ether_addr_list\n", __func__);
677 ixl_vc_schedule_retry(&sc->vc_mgr);
680 a->vsi_id = sc->vsi.id;
681 a->num_elements = cnt;
683 /* Scan the filter array */
684 SLIST_FOREACH(f, sc->mac_filters, next) {
685 if (f->flags & IXL_FILTER_ADD) {
686 bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
687 f->flags &= ~IXL_FILTER_ADD;
690 DDPRINTF(dev, "ADD: " MAC_FORMAT,
691 MAC_FORMAT_ARGS(f->macaddr));
696 DDPRINTF(dev, "len %d, j %d, cnt %d",
699 I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
706 ** This routine takes filters flagged for deletion in the
707 ** sc MAC filter list and creates an Admin Queue call
708 ** to delete those filters in the hardware.
711 ixlv_del_ether_filters(struct ixlv_sc *sc)
713 struct i40e_virtchnl_ether_addr_list *d;
714 device_t dev = sc->dev;
715 struct ixlv_mac_filter *f, *f_temp;
716 int len, j = 0, cnt = 0;
718 /* Get count of MAC addresses to delete */
719 SLIST_FOREACH(f, sc->mac_filters, next) {
720 if (f->flags & IXL_FILTER_DEL)
724 DDPRINTF(dev, "cnt == 0, exiting...");
725 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
730 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
731 (cnt * sizeof(struct i40e_virtchnl_ether_addr));
733 d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
735 device_printf(dev, "%s: Failed to get memory for "
736 "virtchnl_ether_addr_list\n", __func__);
737 ixl_vc_schedule_retry(&sc->vc_mgr);
740 d->vsi_id = sc->vsi.id;
741 d->num_elements = cnt;
743 /* Scan the filter array */
744 SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
745 if (f->flags & IXL_FILTER_DEL) {
746 bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
747 DDPRINTF(dev, "DEL: " MAC_FORMAT,
748 MAC_FORMAT_ARGS(f->macaddr));
750 SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
757 I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
764 ** ixlv_request_reset
765 ** Request that the PF reset this VF. No response is expected.
768 ixlv_request_reset(struct ixlv_sc *sc)
771 ** Set the reset status to "in progress" before
772 ** the request, this avoids any possibility of
773 ** a mistaken early detection of completion.
775 wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
776 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
780 ** ixlv_request_stats
781 ** Request the statistics for this VF's VSI from PF.
784 ixlv_request_stats(struct ixlv_sc *sc)
786 struct i40e_virtchnl_queue_select vqs;
789 vqs.vsi_id = sc->vsi_res->vsi_id;
790 /* Low priority, we don't need to error check */
791 error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
792 (u8 *)&vqs, sizeof(vqs));
795 device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
800 ** Updates driver's stats counters with VSI stats returned from PF.
803 ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
805 struct ixl_vsi *vsi = &sc->vsi;
806 uint64_t tx_discards;
808 tx_discards = es->tx_discards;
809 for (int i = 0; i < vsi->num_queues; i++)
810 tx_discards += sc->vsi.queues[i].txr.br->br_drops;
812 /* Update ifnet stats */
813 IXL_SET_IPACKETS(vsi, es->rx_unicast +
816 IXL_SET_OPACKETS(vsi, es->tx_unicast +
819 IXL_SET_IBYTES(vsi, es->rx_bytes);
820 IXL_SET_OBYTES(vsi, es->tx_bytes);
821 IXL_SET_IMCASTS(vsi, es->rx_multicast);
822 IXL_SET_OMCASTS(vsi, es->tx_multicast);
824 IXL_SET_OERRORS(vsi, es->tx_errors);
825 IXL_SET_IQDROPS(vsi, es->rx_discards);
826 IXL_SET_OQDROPS(vsi, tx_discards);
827 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
828 IXL_SET_COLLISIONS(vsi, 0);
830 vsi->eth_stats = *es;
834 ixlv_config_rss_key(struct ixlv_sc *sc)
836 struct i40e_virtchnl_rss_key *rss_key_msg;
837 int msg_len, key_length;
838 u8 rss_seed[IXL_RSS_KEY_SIZE];
841 /* Fetch the configured RSS key */
842 rss_getkey((uint8_t *) &rss_seed);
844 ixl_get_default_rss_key((u32 *)rss_seed);
847 /* Send the fetched key */
848 key_length = IXL_RSS_KEY_SIZE;
849 msg_len = sizeof(struct i40e_virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
850 rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
851 if (rss_key_msg == NULL) {
852 device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
856 rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
857 rss_key_msg->key_len = key_length;
858 bcopy(rss_seed, &rss_key_msg->key[0], key_length);
860 DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d",
861 rss_key_msg->vsi_id, rss_key_msg->key_len);
863 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
864 (u8 *)rss_key_msg, msg_len);
866 free(rss_key_msg, M_DEVBUF);
870 ixlv_set_rss_hena(struct ixlv_sc *sc)
872 struct i40e_virtchnl_rss_hena hena;
874 hena.hena = IXL_DEFAULT_RSS_HENA;
876 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
877 (u8 *)&hena, sizeof(hena));
881 ixlv_config_rss_lut(struct ixlv_sc *sc)
883 struct i40e_virtchnl_rss_lut *rss_lut_msg;
889 lut_length = IXL_RSS_VSI_LUT_SIZE;
890 msg_len = sizeof(struct i40e_virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
891 rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
892 if (rss_lut_msg == NULL) {
893 device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
897 rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
898 /* Each LUT entry is a max of 1 byte, so this is easy */
899 rss_lut_msg->lut_entries = lut_length;
901 /* Populate the LUT with max no. of queues in round robin fashion */
902 for (i = 0; i < lut_length; i++) {
905 * Fetch the RSS bucket id for the given indirection entry.
906 * Cap it at the number of configured buckets (which is
909 que_id = rss_get_indirection_to_bucket(i);
910 que_id = que_id % sc->vsi.num_queues;
912 que_id = i % sc->vsi.num_queues;
914 lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
915 rss_lut_msg->lut[i] = lut;
918 ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
919 (u8 *)rss_lut_msg, msg_len);
921 free(rss_lut_msg, M_DEVBUF);
925 ** ixlv_vc_completion
927 ** Asynchronous completion function for admin queue messages. Rather than busy
928 ** wait, we fire off our requests and assume that no errors will be returned.
929 ** This function handles the reply messages.
932 ixlv_vc_completion(struct ixlv_sc *sc,
933 enum i40e_virtchnl_ops v_opcode,
934 i40e_status v_retval, u8 *msg, u16 msglen)
936 device_t dev = sc->dev;
937 struct ixl_vsi *vsi = &sc->vsi;
939 if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
940 struct i40e_virtchnl_pf_event *vpe =
941 (struct i40e_virtchnl_pf_event *)msg;
943 switch (vpe->event) {
944 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
946 device_printf(dev, "Link change: status %d, speed %d\n",
947 vpe->event_data.link_event.link_status,
948 vpe->event_data.link_event.link_speed);
951 vpe->event_data.link_event.link_status;
953 vpe->event_data.link_event.link_speed;
954 ixlv_update_link_status(sc);
956 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
957 device_printf(dev, "PF initiated reset!\n");
958 sc->init_state = IXLV_RESET_PENDING;
959 mtx_unlock(&sc->mtx);
964 device_printf(dev, "%s: Unknown event %d from AQ\n",
965 __func__, vpe->event);
972 /* Catch-all error response */
975 "%s: AQ returned error %d to our request %d!\n",
976 __func__, v_retval, v_opcode);
980 if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
981 DDPRINTF(dev, "opcode %d", v_opcode);
985 case I40E_VIRTCHNL_OP_GET_STATS:
986 ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
988 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
989 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
992 device_printf(dev, "WARNING: Error adding VF mac filter!\n");
993 device_printf(dev, "WARNING: Device may not receive traffic!\n");
996 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
997 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
1000 case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1001 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
1004 case I40E_VIRTCHNL_OP_ADD_VLAN:
1005 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
1008 case I40E_VIRTCHNL_OP_DEL_VLAN:
1009 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
1012 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1013 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
1015 if (v_retval == 0) {
1016 /* Update link status */
1017 ixlv_update_link_status(sc);
1018 /* Turn on all interrupts */
1019 ixlv_enable_intr(vsi);
1020 /* And inform the stack we're ready */
1021 vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1022 /* TODO: Clear a state flag, so we know we're ready to run init again */
1025 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1026 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
1028 if (v_retval == 0) {
1029 /* Turn off all interrupts */
1030 ixlv_disable_intr(vsi);
1031 /* Tell the stack that the interface is no longer active */
1032 vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
1035 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1036 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
1039 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1040 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
1043 case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
1044 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY,
1047 case I40E_VIRTCHNL_OP_SET_RSS_HENA:
1048 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA,
1051 case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
1052 ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT,
1058 "%s: Received unexpected message %d from PF.\n",
1059 __func__, v_opcode);
1067 ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
1071 case IXLV_FLAG_AQ_MAP_VECTORS:
1072 ixlv_map_queues(sc);
1075 case IXLV_FLAG_AQ_ADD_MAC_FILTER:
1076 ixlv_add_ether_filters(sc);
1079 case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
1083 case IXLV_FLAG_AQ_DEL_MAC_FILTER:
1084 ixlv_del_ether_filters(sc);
1087 case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
1091 case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
1092 ixlv_configure_queues(sc);
1095 case IXLV_FLAG_AQ_DISABLE_QUEUES:
1096 ixlv_disable_queues(sc);
1099 case IXLV_FLAG_AQ_ENABLE_QUEUES:
1100 ixlv_enable_queues(sc);
1103 case IXLV_FLAG_AQ_CONFIG_RSS_KEY:
1104 ixlv_config_rss_key(sc);
1107 case IXLV_FLAG_AQ_SET_RSS_HENA:
1108 ixlv_set_rss_hena(sc);
1111 case IXLV_FLAG_AQ_CONFIG_RSS_LUT:
1112 ixlv_config_rss_lut(sc);
1118 ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
1121 mgr->current = NULL;
1122 TAILQ_INIT(&mgr->pending);
1123 callout_init_mtx(&mgr->callout, &sc->mtx, 0);
1127 ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
1129 struct ixl_vc_cmd *cmd;
1132 mgr->current = NULL;
1133 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1135 cmd->callback(cmd, cmd->arg, err);
1136 ixl_vc_process_next(mgr);
1140 ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
1141 enum i40e_status_code err)
1143 struct ixl_vc_cmd *cmd;
1146 if (cmd == NULL || cmd->request != request)
1149 callout_stop(&mgr->callout);
1150 ixl_vc_process_completion(mgr, err);
1154 ixl_vc_cmd_timeout(void *arg)
1156 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1158 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1159 ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
1163 ixl_vc_cmd_retry(void *arg)
1165 struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
1167 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1168 ixl_vc_send_current(mgr);
1172 ixl_vc_send_current(struct ixl_vc_mgr *mgr)
1174 struct ixl_vc_cmd *cmd;
1177 ixl_vc_send_cmd(mgr->sc, cmd->request);
1178 callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
1182 ixl_vc_process_next(struct ixl_vc_mgr *mgr)
1184 struct ixl_vc_cmd *cmd;
1186 if (mgr->current != NULL)
1189 if (TAILQ_EMPTY(&mgr->pending))
1192 cmd = TAILQ_FIRST(&mgr->pending);
1193 TAILQ_REMOVE(&mgr->pending, cmd, next);
1196 ixl_vc_send_current(mgr);
1200 ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
1203 callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
1207 ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
1208 uint32_t req, ixl_vc_callback_t *callback, void *arg)
1210 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1212 if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
1213 if (mgr->current == cmd)
1214 mgr->current = NULL;
1216 TAILQ_REMOVE(&mgr->pending, cmd, next);
1220 cmd->callback = callback;
1222 cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
1223 TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
1225 ixl_vc_process_next(mgr);
1229 ixl_vc_flush(struct ixl_vc_mgr *mgr)
1231 struct ixl_vc_cmd *cmd;
1233 IXLV_CORE_LOCK_ASSERT(mgr->sc);
1234 KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
1235 ("ixlv: pending commands waiting but no command in progress"));
1239 mgr->current = NULL;
1240 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1241 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1244 while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
1245 TAILQ_REMOVE(&mgr->pending, cmd, next);
1246 cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
1247 cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
1250 callout_stop(&mgr->callout);