1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "ixgbe_sriov.h"
42 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
44 /************************************************************************
45 * ixgbe_pci_iov_detach
46 ************************************************************************/
48 ixgbe_pci_iov_detach(device_t dev)
50 return pci_iov_detach(dev);
53 /************************************************************************
54 * ixgbe_define_iov_schemas
55 ************************************************************************/
57 ixgbe_define_iov_schemas(device_t dev, int *error)
59 nvlist_t *pf_schema, *vf_schema;
61 pf_schema = pci_iov_schema_alloc_node();
62 vf_schema = pci_iov_schema_alloc_node();
63 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
64 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
65 IOV_SCHEMA_HASDEFAULT, TRUE);
66 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
67 IOV_SCHEMA_HASDEFAULT, FALSE);
68 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
69 IOV_SCHEMA_HASDEFAULT, FALSE);
70 *error = pci_iov_attach(dev, pf_schema, vf_schema);
73 "Error %d setting up SR-IOV\n", *error);
75 } /* ixgbe_define_iov_schemas */
77 /************************************************************************
78 * ixgbe_align_all_queue_indices
79 ************************************************************************/
81 ixgbe_align_all_queue_indices(struct adapter *adapter)
86 for (i = 0; i < adapter->num_rx_queues; i++) {
87 index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
88 adapter->rx_queues[i].rxr.me = index;
91 for (i = 0; i < adapter->num_tx_queues; i++) {
92 index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
93 adapter->tx_queues[i].txr.me = index;
97 /* Support functions for SR-IOV/VF management */
99 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
101 if (vf->flags & IXGBE_VF_CTS)
102 msg |= IXGBE_VT_MSGTYPE_CTS;
104 adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
108 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 msg &= IXGBE_VT_MSG_MASK;
111 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
115 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
117 msg &= IXGBE_VT_MSG_MASK;
118 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
122 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
124 if (!(vf->flags & IXGBE_VF_CTS))
125 ixgbe_send_vf_nack(adapter, vf, 0);
128 static inline boolean_t
129 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
131 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
135 ixgbe_vf_queues(int mode)
149 ixgbe_vf_que_index(int mode, int vfnum, int num)
151 return ((vfnum * ixgbe_vf_queues(mode)) + num);
155 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
157 if (adapter->max_frame_size < max_frame)
158 adapter->max_frame_size = max_frame;
162 ixgbe_get_mrqc(int iov_mode)
168 mrqc = IXGBE_MRQC_VMDQRSS64EN;
171 mrqc = IXGBE_MRQC_VMDQRSS32EN;
177 panic("Unexpected SR-IOV mode %d", iov_mode);
185 ixgbe_get_mtqc(int iov_mode)
191 mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
194 mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
197 mtqc = IXGBE_MTQC_64Q_1PB;
200 panic("Unexpected SR-IOV mode %d", iov_mode);
207 ixgbe_ping_all_vfs(struct adapter *adapter)
211 for (int i = 0; i < adapter->num_vfs; i++) {
212 vf = &adapter->vfs[i];
213 if (vf->flags & IXGBE_VF_ACTIVE)
214 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
216 } /* ixgbe_ping_all_vfs */
220 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
224 uint32_t vmolr, vmvir;
230 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
232 /* Do not receive packets that pass inexact filters. */
233 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
235 /* Disable Multicast Promicuous Mode. */
236 vmolr &= ~IXGBE_VMOLR_MPE;
238 /* Accept broadcasts. */
239 vmolr |= IXGBE_VMOLR_BAM;
242 /* Accept non-vlan tagged traffic. */
243 vmolr |= IXGBE_VMOLR_AUPE;
245 /* Allow VM to tag outgoing traffic; no default tag. */
248 /* Require vlan-tagged traffic. */
249 vmolr &= ~IXGBE_VMOLR_AUPE;
251 /* Tag all traffic with provided vlan tag. */
252 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
254 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
255 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
256 } /* ixgbe_vf_set_default_vlan */
260 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
264 * Frame size compatibility between PF and VF is only a problem on
265 * 82599-based cards. X540 and later support any combination of jumbo
266 * frames on PFs and VFs.
268 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
271 switch (vf->api_ver) {
272 case IXGBE_API_VER_1_0:
273 case IXGBE_API_VER_UNKNOWN:
275 * On legacy (1.0 and older) VF versions, we don't support jumbo
276 * frames on either the PF or the VF.
278 if (adapter->max_frame_size > ETHER_MAX_LEN ||
279 vf->maximum_frame_size > ETHER_MAX_LEN)
285 case IXGBE_API_VER_1_1:
288 * 1.1 or later VF versions always work if they aren't using
291 if (vf->maximum_frame_size <= ETHER_MAX_LEN)
295 * Jumbo frames only work with VFs if the PF is also using jumbo
298 if (adapter->max_frame_size <= ETHER_MAX_LEN)
303 } /* ixgbe_vf_frame_size_compatible */
307 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
309 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
311 // XXX clear multicast addresses
313 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
315 vf->api_ver = IXGBE_API_VER_UNKNOWN;
316 } /* ixgbe_process_vf_reset */
320 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
323 uint32_t vf_index, vfte;
327 vf_index = IXGBE_VF_INDEX(vf->pool);
328 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
329 vfte |= IXGBE_VF_BIT(vf->pool);
330 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
331 } /* ixgbe_vf_enable_transmit */
335 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
338 uint32_t vf_index, vfre;
342 vf_index = IXGBE_VF_INDEX(vf->pool);
343 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
344 if (ixgbe_vf_frame_size_compatible(adapter, vf))
345 vfre |= IXGBE_VF_BIT(vf->pool);
347 vfre &= ~IXGBE_VF_BIT(vf->pool);
348 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
349 } /* ixgbe_vf_enable_receive */
353 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
357 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
361 ixgbe_process_vf_reset(adapter, vf);
363 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
364 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
366 ack = IXGBE_VT_MSGTYPE_ACK;
368 ack = IXGBE_VT_MSGTYPE_NACK;
370 ixgbe_vf_enable_transmit(adapter, vf);
371 ixgbe_vf_enable_receive(adapter, vf);
373 vf->flags |= IXGBE_VF_CTS;
375 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
376 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
377 resp[3] = hw->mac.mc_filter_type;
378 hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
379 } /* ixgbe_vf_reset_msg */
383 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
387 mac = (uint8_t*)&msg[1];
389 /* Check that the VF has permission to change the MAC address. */
390 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
391 ixgbe_send_vf_nack(adapter, vf, msg[0]);
395 if (ixgbe_validate_mac_addr(mac) != 0) {
396 ixgbe_send_vf_nack(adapter, vf, msg[0]);
400 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
402 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
405 ixgbe_send_vf_ack(adapter, vf, msg[0]);
406 } /* ixgbe_vf_set_mac */
410 * VF multicast addresses are set by using the appropriate bit in
411 * 1 of 128 32 bit addresses (4096 possible).
414 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
416 u16 *list = (u16*)&msg[1];
418 u32 vmolr, vec_bit, vec_reg, mta_reg;
420 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
421 entries = min(entries, IXGBE_MAX_VF_MC);
423 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
425 vf->num_mc_hashes = entries;
427 /* Set the appropriate MTA bit */
428 for (int i = 0; i < entries; i++) {
429 vf->mc_hash[i] = list[i];
430 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
431 vec_bit = vf->mc_hash[i] & 0x1F;
432 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
433 mta_reg |= (1 << vec_bit);
434 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
437 vmolr |= IXGBE_VMOLR_ROMPE;
438 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
439 ixgbe_send_vf_ack(adapter, vf, msg[0]);
440 } /* ixgbe_vf_set_mc_addr */
444 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
451 enable = IXGBE_VT_MSGINFO(msg[0]);
452 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
454 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
455 ixgbe_send_vf_nack(adapter, vf, msg[0]);
459 /* It is illegal to enable vlan tag 0. */
460 if (tag == 0 && enable != 0) {
461 ixgbe_send_vf_nack(adapter, vf, msg[0]);
465 ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
466 ixgbe_send_vf_ack(adapter, vf, msg[0]);
467 } /* ixgbe_vf_set_vlan */
471 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
474 uint32_t vf_max_size, pf_max_size, mhadd;
477 vf_max_size = msg[1];
479 if (vf_max_size < ETHER_CRC_LEN) {
480 /* We intentionally ACK invalid LPE requests. */
481 ixgbe_send_vf_ack(adapter, vf, msg[0]);
485 vf_max_size -= ETHER_CRC_LEN;
487 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
488 /* We intentionally ACK invalid LPE requests. */
489 ixgbe_send_vf_ack(adapter, vf, msg[0]);
493 vf->maximum_frame_size = vf_max_size;
494 ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
497 * We might have to disable reception to this VF if the frame size is
498 * not compatible with the config on the PF.
500 ixgbe_vf_enable_receive(adapter, vf);
502 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
503 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
505 if (pf_max_size < adapter->max_frame_size) {
506 mhadd &= ~IXGBE_MHADD_MFS_MASK;
507 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
508 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
511 ixgbe_send_vf_ack(adapter, vf, msg[0]);
512 } /* ixgbe_vf_set_lpe */
516 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
520 ixgbe_send_vf_nack(adapter, vf, msg[0]);
521 } /* ixgbe_vf_set_macvlan */
525 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
530 case IXGBE_API_VER_1_0:
531 case IXGBE_API_VER_1_1:
532 vf->api_ver = msg[1];
533 ixgbe_send_vf_ack(adapter, vf, msg[0]);
536 vf->api_ver = IXGBE_API_VER_UNKNOWN;
537 ixgbe_send_vf_nack(adapter, vf, msg[0]);
540 } /* ixgbe_vf_api_negotiate */
544 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
547 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
552 /* GET_QUEUES is not supported on pre-1.1 APIs. */
554 case IXGBE_API_VER_1_0:
555 case IXGBE_API_VER_UNKNOWN:
556 ixgbe_send_vf_nack(adapter, vf, msg[0]);
560 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
561 IXGBE_VT_MSGTYPE_CTS;
563 num_queues = ixgbe_vf_queues(adapter->iov_mode);
564 resp[IXGBE_VF_TX_QUEUES] = num_queues;
565 resp[IXGBE_VF_RX_QUEUES] = num_queues;
566 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
567 resp[IXGBE_VF_DEF_QUEUE] = 0;
569 hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
570 } /* ixgbe_vf_get_queues */
574 ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
576 struct adapter *adapter = iflib_get_softc(ctx);
578 struct ifnet *ifp = iflib_get_ifp(ctx);
581 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
586 error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
591 CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname,
593 if (msg[0] == IXGBE_VF_RESET) {
594 ixgbe_vf_reset_msg(adapter, vf, msg);
598 if (!(vf->flags & IXGBE_VF_CTS)) {
599 ixgbe_send_vf_nack(adapter, vf, msg[0]);
603 switch (msg[0] & IXGBE_VT_MSG_MASK) {
604 case IXGBE_VF_SET_MAC_ADDR:
605 ixgbe_vf_set_mac(adapter, vf, msg);
607 case IXGBE_VF_SET_MULTICAST:
608 ixgbe_vf_set_mc_addr(adapter, vf, msg);
610 case IXGBE_VF_SET_VLAN:
611 ixgbe_vf_set_vlan(adapter, vf, msg);
613 case IXGBE_VF_SET_LPE:
614 ixgbe_vf_set_lpe(adapter, vf, msg);
616 case IXGBE_VF_SET_MACVLAN:
617 ixgbe_vf_set_macvlan(adapter, vf, msg);
619 case IXGBE_VF_API_NEGOTIATE:
620 ixgbe_vf_api_negotiate(adapter, vf, msg);
622 case IXGBE_VF_GET_QUEUES:
623 ixgbe_vf_get_queues(adapter, vf, msg);
626 ixgbe_send_vf_nack(adapter, vf, msg[0]);
628 } /* ixgbe_process_vf_msg */
631 /* Tasklet for handling VF -> PF mailbox messages */
633 ixgbe_handle_mbx(void *context)
635 if_ctx_t ctx = context;
636 struct adapter *adapter = iflib_get_softc(ctx);
643 for (i = 0; i < adapter->num_vfs; i++) {
644 vf = &adapter->vfs[i];
646 if (vf->flags & IXGBE_VF_ACTIVE) {
647 if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
648 ixgbe_process_vf_reset(adapter, vf);
650 if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
651 ixgbe_process_vf_msg(ctx, vf);
653 if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
654 ixgbe_process_vf_ack(adapter, vf);
657 } /* ixgbe_handle_mbx */
660 ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
662 struct adapter *adapter;
665 adapter = iflib_get_softc(ctx);
666 adapter->iov_mode = IXGBE_NO_VM;
669 /* Would we ever get num_vfs = 0? */
675 * We've got to reserve a VM's worth of queues for the PF,
676 * thus we go into "64 VF mode" if 32+ VFs are requested.
677 * With 64 VFs, you can only have two queues per VF.
678 * With 32 VFs, you can have up to four queues per VF.
680 if (num_vfs >= IXGBE_32_VM)
681 adapter->iov_mode = IXGBE_64_VM;
683 adapter->iov_mode = IXGBE_32_VM;
685 /* Again, reserving 1 VM's worth of queues for the PF */
686 adapter->pool = adapter->iov_mode - 1;
688 if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
693 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
696 if (adapter->vfs == NULL) {
701 adapter->num_vfs = num_vfs;
702 ixgbe_if_init(adapter->ctx);
703 adapter->feat_en |= IXGBE_FEATURE_SRIOV;
708 adapter->num_vfs = 0;
710 adapter->iov_mode = IXGBE_NO_VM;
713 } /* ixgbe_if_iov_init */
716 ixgbe_if_iov_uninit(if_ctx_t ctx)
719 struct adapter *adapter;
720 uint32_t pf_reg, vf_reg;
722 adapter = iflib_get_softc(ctx);
725 /* Enable rx/tx for the PF and disable it for all VFs. */
726 pf_reg = IXGBE_VF_INDEX(adapter->pool);
727 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
728 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
734 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
735 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
737 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
739 free(adapter->vfs, M_IXGBE_SRIOV);
741 adapter->num_vfs = 0;
742 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
743 } /* ixgbe_if_iov_uninit */
746 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
749 uint32_t vf_index, pfmbimr;
753 if (!(vf->flags & IXGBE_VF_ACTIVE))
756 vf_index = IXGBE_VF_INDEX(vf->pool);
757 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
758 pfmbimr |= IXGBE_VF_BIT(vf->pool);
759 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
761 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
763 // XXX multicast addresses
765 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
766 ixgbe_set_rar(&adapter->hw, vf->rar_index,
767 vf->ether_addr, vf->pool, TRUE);
770 ixgbe_vf_enable_transmit(adapter, vf);
771 ixgbe_vf_enable_receive(adapter, vf);
773 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
774 } /* ixgbe_init_vf */
777 ixgbe_initialize_iov(struct adapter *adapter)
779 struct ixgbe_hw *hw = &adapter->hw;
780 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
783 if (adapter->iov_mode == IXGBE_NO_VM)
786 /* RMW appropriate registers based on IOV mode */
788 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
789 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
790 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
792 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
793 mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
794 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
795 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
796 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
797 switch (adapter->iov_mode) {
799 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
800 mtqc |= IXGBE_MTQC_64VF;
801 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
802 gpie |= IXGBE_GPIE_VTMODE_64;
805 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
806 mtqc |= IXGBE_MTQC_32VF;
807 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
808 gpie |= IXGBE_GPIE_VTMODE_32;
811 panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
814 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
815 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
816 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
817 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
819 /* Enable rx/tx for the PF. */
820 vf_reg = IXGBE_VF_INDEX(adapter->pool);
821 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
822 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
824 /* Allow VM-to-VM communication. */
825 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
827 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
828 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
829 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
831 for (i = 0; i < adapter->num_vfs; i++)
832 ixgbe_init_vf(adapter, &adapter->vfs[i]);
833 } /* ixgbe_initialize_iov */
836 /* Check the max frame setting of all active VF's */
838 ixgbe_recalculate_max_frame(struct adapter *adapter)
842 for (int i = 0; i < adapter->num_vfs; i++) {
843 vf = &adapter->vfs[i];
844 if (vf->flags & IXGBE_VF_ACTIVE)
845 ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
847 } /* ixgbe_recalculate_max_frame */
850 ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
852 struct adapter *adapter;
856 adapter = iflib_get_softc(ctx);
858 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
859 vfnum, adapter->num_vfs));
861 vf = &adapter->vfs[vfnum];
864 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
865 vf->rar_index = vfnum + 1;
866 vf->default_vlan = 0;
867 vf->maximum_frame_size = ETHER_MAX_LEN;
868 ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
870 if (nvlist_exists_binary(config, "mac-addr")) {
871 mac = nvlist_get_binary(config, "mac-addr", NULL);
872 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
873 if (nvlist_get_bool(config, "allow-set-mac"))
874 vf->flags |= IXGBE_VF_CAP_MAC;
877 * If the administrator has not specified a MAC address then
878 * we must allow the VF to choose one.
880 vf->flags |= IXGBE_VF_CAP_MAC;
882 vf->flags |= IXGBE_VF_ACTIVE;
884 ixgbe_init_vf(adapter, vf);
887 } /* ixgbe_if_iov_vf_add */
892 ixgbe_handle_mbx(void *context)
894 UNREFERENCED_PARAMETER(context);
895 } /* ixgbe_handle_mbx */