1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
36 #include "ixgbe_sriov.h"
40 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
42 /************************************************************************
43 * ixgbe_pci_iov_detach
44 ************************************************************************/
46 ixgbe_pci_iov_detach(device_t dev)
48 return pci_iov_detach(dev);
51 /************************************************************************
52 * ixgbe_define_iov_schemas
53 ************************************************************************/
55 ixgbe_define_iov_schemas(device_t dev, int *error)
57 nvlist_t *pf_schema, *vf_schema;
59 pf_schema = pci_iov_schema_alloc_node();
60 vf_schema = pci_iov_schema_alloc_node();
61 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
62 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
63 IOV_SCHEMA_HASDEFAULT, TRUE);
64 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
65 IOV_SCHEMA_HASDEFAULT, FALSE);
66 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
67 IOV_SCHEMA_HASDEFAULT, FALSE);
68 *error = pci_iov_attach(dev, pf_schema, vf_schema);
71 "Error %d setting up SR-IOV\n", *error);
73 } /* ixgbe_define_iov_schemas */
75 /************************************************************************
76 * ixgbe_align_all_queue_indices
77 ************************************************************************/
79 ixgbe_align_all_queue_indices(struct adapter *adapter)
84 for (i = 0; i < adapter->num_rx_queues; i++) {
85 index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
86 adapter->rx_queues[i].rxr.me = index;
89 for (i = 0; i < adapter->num_tx_queues; i++) {
90 index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
91 adapter->tx_queues[i].txr.me = index;
95 /* Support functions for SR-IOV/VF management */
97 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
99 if (vf->flags & IXGBE_VF_CTS)
100 msg |= IXGBE_VT_MSGTYPE_CTS;
102 adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
106 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
108 msg &= IXGBE_VT_MSG_MASK;
109 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
113 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
115 msg &= IXGBE_VT_MSG_MASK;
116 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
120 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
122 if (!(vf->flags & IXGBE_VF_CTS))
123 ixgbe_send_vf_nack(adapter, vf, 0);
126 static inline boolean_t
127 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
129 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
133 ixgbe_vf_queues(int mode)
147 ixgbe_vf_que_index(int mode, int vfnum, int num)
149 return ((vfnum * ixgbe_vf_queues(mode)) + num);
153 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
155 if (adapter->max_frame_size < max_frame)
156 adapter->max_frame_size = max_frame;
160 ixgbe_get_mrqc(int iov_mode)
166 mrqc = IXGBE_MRQC_VMDQRSS64EN;
169 mrqc = IXGBE_MRQC_VMDQRSS32EN;
175 panic("Unexpected SR-IOV mode %d", iov_mode);
183 ixgbe_get_mtqc(int iov_mode)
189 mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
192 mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
195 mtqc = IXGBE_MTQC_64Q_1PB;
198 panic("Unexpected SR-IOV mode %d", iov_mode);
205 ixgbe_ping_all_vfs(struct adapter *adapter)
209 for (int i = 0; i < adapter->num_vfs; i++) {
210 vf = &adapter->vfs[i];
211 if (vf->flags & IXGBE_VF_ACTIVE)
212 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
214 } /* ixgbe_ping_all_vfs */
218 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
222 uint32_t vmolr, vmvir;
228 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
230 /* Do not receive packets that pass inexact filters. */
231 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
233 /* Disable Multicast Promicuous Mode. */
234 vmolr &= ~IXGBE_VMOLR_MPE;
236 /* Accept broadcasts. */
237 vmolr |= IXGBE_VMOLR_BAM;
240 /* Accept non-vlan tagged traffic. */
241 vmolr |= IXGBE_VMOLR_AUPE;
243 /* Allow VM to tag outgoing traffic; no default tag. */
246 /* Require vlan-tagged traffic. */
247 vmolr &= ~IXGBE_VMOLR_AUPE;
249 /* Tag all traffic with provided vlan tag. */
250 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
252 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
253 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
254 } /* ixgbe_vf_set_default_vlan */
258 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
262 * Frame size compatibility between PF and VF is only a problem on
263 * 82599-based cards. X540 and later support any combination of jumbo
264 * frames on PFs and VFs.
266 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
269 switch (vf->api_ver) {
270 case IXGBE_API_VER_1_0:
271 case IXGBE_API_VER_UNKNOWN:
273 * On legacy (1.0 and older) VF versions, we don't support jumbo
274 * frames on either the PF or the VF.
276 if (adapter->max_frame_size > ETHER_MAX_LEN ||
277 vf->maximum_frame_size > ETHER_MAX_LEN)
283 case IXGBE_API_VER_1_1:
286 * 1.1 or later VF versions always work if they aren't using
289 if (vf->maximum_frame_size <= ETHER_MAX_LEN)
293 * Jumbo frames only work with VFs if the PF is also using jumbo
296 if (adapter->max_frame_size <= ETHER_MAX_LEN)
301 } /* ixgbe_vf_frame_size_compatible */
305 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
307 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
309 // XXX clear multicast addresses
311 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
313 vf->api_ver = IXGBE_API_VER_UNKNOWN;
314 } /* ixgbe_process_vf_reset */
318 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
321 uint32_t vf_index, vfte;
325 vf_index = IXGBE_VF_INDEX(vf->pool);
326 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
327 vfte |= IXGBE_VF_BIT(vf->pool);
328 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
329 } /* ixgbe_vf_enable_transmit */
333 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
336 uint32_t vf_index, vfre;
340 vf_index = IXGBE_VF_INDEX(vf->pool);
341 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
342 if (ixgbe_vf_frame_size_compatible(adapter, vf))
343 vfre |= IXGBE_VF_BIT(vf->pool);
345 vfre &= ~IXGBE_VF_BIT(vf->pool);
346 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
347 } /* ixgbe_vf_enable_receive */
351 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
355 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
359 ixgbe_process_vf_reset(adapter, vf);
361 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
362 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
364 ack = IXGBE_VT_MSGTYPE_ACK;
366 ack = IXGBE_VT_MSGTYPE_NACK;
368 ixgbe_vf_enable_transmit(adapter, vf);
369 ixgbe_vf_enable_receive(adapter, vf);
371 vf->flags |= IXGBE_VF_CTS;
373 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
374 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
375 resp[3] = hw->mac.mc_filter_type;
376 hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
377 } /* ixgbe_vf_reset_msg */
381 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
385 mac = (uint8_t*)&msg[1];
387 /* Check that the VF has permission to change the MAC address. */
388 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
389 ixgbe_send_vf_nack(adapter, vf, msg[0]);
393 if (ixgbe_validate_mac_addr(mac) != 0) {
394 ixgbe_send_vf_nack(adapter, vf, msg[0]);
398 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
400 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
403 ixgbe_send_vf_ack(adapter, vf, msg[0]);
404 } /* ixgbe_vf_set_mac */
408 * VF multicast addresses are set by using the appropriate bit in
409 * 1 of 128 32 bit addresses (4096 possible).
412 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
414 u16 *list = (u16*)&msg[1];
416 u32 vmolr, vec_bit, vec_reg, mta_reg;
418 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
419 entries = min(entries, IXGBE_MAX_VF_MC);
421 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
423 vf->num_mc_hashes = entries;
425 /* Set the appropriate MTA bit */
426 for (int i = 0; i < entries; i++) {
427 vf->mc_hash[i] = list[i];
428 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
429 vec_bit = vf->mc_hash[i] & 0x1F;
430 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
431 mta_reg |= (1 << vec_bit);
432 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
435 vmolr |= IXGBE_VMOLR_ROMPE;
436 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
437 ixgbe_send_vf_ack(adapter, vf, msg[0]);
438 } /* ixgbe_vf_set_mc_addr */
442 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
449 enable = IXGBE_VT_MSGINFO(msg[0]);
450 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
452 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
453 ixgbe_send_vf_nack(adapter, vf, msg[0]);
457 /* It is illegal to enable vlan tag 0. */
458 if (tag == 0 && enable != 0) {
459 ixgbe_send_vf_nack(adapter, vf, msg[0]);
463 ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
464 ixgbe_send_vf_ack(adapter, vf, msg[0]);
465 } /* ixgbe_vf_set_vlan */
469 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
472 uint32_t vf_max_size, pf_max_size, mhadd;
475 vf_max_size = msg[1];
477 if (vf_max_size < ETHER_CRC_LEN) {
478 /* We intentionally ACK invalid LPE requests. */
479 ixgbe_send_vf_ack(adapter, vf, msg[0]);
483 vf_max_size -= ETHER_CRC_LEN;
485 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
486 /* We intentionally ACK invalid LPE requests. */
487 ixgbe_send_vf_ack(adapter, vf, msg[0]);
491 vf->maximum_frame_size = vf_max_size;
492 ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
495 * We might have to disable reception to this VF if the frame size is
496 * not compatible with the config on the PF.
498 ixgbe_vf_enable_receive(adapter, vf);
500 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
501 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
503 if (pf_max_size < adapter->max_frame_size) {
504 mhadd &= ~IXGBE_MHADD_MFS_MASK;
505 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
506 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
509 ixgbe_send_vf_ack(adapter, vf, msg[0]);
510 } /* ixgbe_vf_set_lpe */
514 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
518 ixgbe_send_vf_nack(adapter, vf, msg[0]);
519 } /* ixgbe_vf_set_macvlan */
523 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
528 case IXGBE_API_VER_1_0:
529 case IXGBE_API_VER_1_1:
530 vf->api_ver = msg[1];
531 ixgbe_send_vf_ack(adapter, vf, msg[0]);
534 vf->api_ver = IXGBE_API_VER_UNKNOWN;
535 ixgbe_send_vf_nack(adapter, vf, msg[0]);
538 } /* ixgbe_vf_api_negotiate */
542 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
545 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
550 /* GET_QUEUES is not supported on pre-1.1 APIs. */
552 case IXGBE_API_VER_1_0:
553 case IXGBE_API_VER_UNKNOWN:
554 ixgbe_send_vf_nack(adapter, vf, msg[0]);
558 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
559 IXGBE_VT_MSGTYPE_CTS;
561 num_queues = ixgbe_vf_queues(adapter->iov_mode);
562 resp[IXGBE_VF_TX_QUEUES] = num_queues;
563 resp[IXGBE_VF_RX_QUEUES] = num_queues;
564 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
565 resp[IXGBE_VF_DEF_QUEUE] = 0;
567 hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
568 } /* ixgbe_vf_get_queues */
572 ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
574 struct adapter *adapter = iflib_get_softc(ctx);
576 struct ifnet *ifp = iflib_get_ifp(ctx);
579 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
584 error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
589 CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname,
591 if (msg[0] == IXGBE_VF_RESET) {
592 ixgbe_vf_reset_msg(adapter, vf, msg);
596 if (!(vf->flags & IXGBE_VF_CTS)) {
597 ixgbe_send_vf_nack(adapter, vf, msg[0]);
601 switch (msg[0] & IXGBE_VT_MSG_MASK) {
602 case IXGBE_VF_SET_MAC_ADDR:
603 ixgbe_vf_set_mac(adapter, vf, msg);
605 case IXGBE_VF_SET_MULTICAST:
606 ixgbe_vf_set_mc_addr(adapter, vf, msg);
608 case IXGBE_VF_SET_VLAN:
609 ixgbe_vf_set_vlan(adapter, vf, msg);
611 case IXGBE_VF_SET_LPE:
612 ixgbe_vf_set_lpe(adapter, vf, msg);
614 case IXGBE_VF_SET_MACVLAN:
615 ixgbe_vf_set_macvlan(adapter, vf, msg);
617 case IXGBE_VF_API_NEGOTIATE:
618 ixgbe_vf_api_negotiate(adapter, vf, msg);
620 case IXGBE_VF_GET_QUEUES:
621 ixgbe_vf_get_queues(adapter, vf, msg);
624 ixgbe_send_vf_nack(adapter, vf, msg[0]);
626 } /* ixgbe_process_vf_msg */
629 /* Tasklet for handling VF -> PF mailbox messages */
631 ixgbe_handle_mbx(void *context)
633 if_ctx_t ctx = context;
634 struct adapter *adapter = iflib_get_softc(ctx);
641 for (i = 0; i < adapter->num_vfs; i++) {
642 vf = &adapter->vfs[i];
644 if (vf->flags & IXGBE_VF_ACTIVE) {
645 if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
646 ixgbe_process_vf_reset(adapter, vf);
648 if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
649 ixgbe_process_vf_msg(ctx, vf);
651 if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
652 ixgbe_process_vf_ack(adapter, vf);
655 } /* ixgbe_handle_mbx */
658 ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
660 struct adapter *adapter;
663 adapter = iflib_get_softc(ctx);
664 adapter->iov_mode = IXGBE_NO_VM;
667 /* Would we ever get num_vfs = 0? */
673 * We've got to reserve a VM's worth of queues for the PF,
674 * thus we go into "64 VF mode" if 32+ VFs are requested.
675 * With 64 VFs, you can only have two queues per VF.
676 * With 32 VFs, you can have up to four queues per VF.
678 if (num_vfs >= IXGBE_32_VM)
679 adapter->iov_mode = IXGBE_64_VM;
681 adapter->iov_mode = IXGBE_32_VM;
683 /* Again, reserving 1 VM's worth of queues for the PF */
684 adapter->pool = adapter->iov_mode - 1;
686 if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
691 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
694 if (adapter->vfs == NULL) {
699 adapter->num_vfs = num_vfs;
700 ixgbe_if_init(adapter->ctx);
701 adapter->feat_en |= IXGBE_FEATURE_SRIOV;
706 adapter->num_vfs = 0;
708 adapter->iov_mode = IXGBE_NO_VM;
711 } /* ixgbe_if_iov_init */
714 ixgbe_if_iov_uninit(if_ctx_t ctx)
717 struct adapter *adapter;
718 uint32_t pf_reg, vf_reg;
720 adapter = iflib_get_softc(ctx);
723 /* Enable rx/tx for the PF and disable it for all VFs. */
724 pf_reg = IXGBE_VF_INDEX(adapter->pool);
725 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
726 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
732 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
733 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
735 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
737 free(adapter->vfs, M_IXGBE_SRIOV);
739 adapter->num_vfs = 0;
740 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
741 } /* ixgbe_if_iov_uninit */
744 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
747 uint32_t vf_index, pfmbimr;
751 if (!(vf->flags & IXGBE_VF_ACTIVE))
754 vf_index = IXGBE_VF_INDEX(vf->pool);
755 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
756 pfmbimr |= IXGBE_VF_BIT(vf->pool);
757 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
759 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
761 // XXX multicast addresses
763 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
764 ixgbe_set_rar(&adapter->hw, vf->rar_index,
765 vf->ether_addr, vf->pool, TRUE);
768 ixgbe_vf_enable_transmit(adapter, vf);
769 ixgbe_vf_enable_receive(adapter, vf);
771 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
772 } /* ixgbe_init_vf */
775 ixgbe_initialize_iov(struct adapter *adapter)
777 struct ixgbe_hw *hw = &adapter->hw;
778 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
781 if (adapter->iov_mode == IXGBE_NO_VM)
784 /* RMW appropriate registers based on IOV mode */
786 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
787 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
788 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
790 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
791 mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
792 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
793 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
794 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
795 switch (adapter->iov_mode) {
797 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
798 mtqc |= IXGBE_MTQC_64VF;
799 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
800 gpie |= IXGBE_GPIE_VTMODE_64;
803 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
804 mtqc |= IXGBE_MTQC_32VF;
805 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
806 gpie |= IXGBE_GPIE_VTMODE_32;
809 panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
812 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
813 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
814 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
815 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
817 /* Enable rx/tx for the PF. */
818 vf_reg = IXGBE_VF_INDEX(adapter->pool);
819 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
820 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
822 /* Allow VM-to-VM communication. */
823 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
825 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
826 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
827 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
829 for (i = 0; i < adapter->num_vfs; i++)
830 ixgbe_init_vf(adapter, &adapter->vfs[i]);
831 } /* ixgbe_initialize_iov */
834 /* Check the max frame setting of all active VF's */
836 ixgbe_recalculate_max_frame(struct adapter *adapter)
840 for (int i = 0; i < adapter->num_vfs; i++) {
841 vf = &adapter->vfs[i];
842 if (vf->flags & IXGBE_VF_ACTIVE)
843 ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
845 } /* ixgbe_recalculate_max_frame */
848 ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
850 struct adapter *adapter;
854 adapter = iflib_get_softc(ctx);
856 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
857 vfnum, adapter->num_vfs));
859 vf = &adapter->vfs[vfnum];
862 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
863 vf->rar_index = vfnum + 1;
864 vf->default_vlan = 0;
865 vf->maximum_frame_size = ETHER_MAX_LEN;
866 ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
868 if (nvlist_exists_binary(config, "mac-addr")) {
869 mac = nvlist_get_binary(config, "mac-addr", NULL);
870 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
871 if (nvlist_get_bool(config, "allow-set-mac"))
872 vf->flags |= IXGBE_VF_CAP_MAC;
875 * If the administrator has not specified a MAC address then
876 * we must allow the VF to choose one.
878 vf->flags |= IXGBE_VF_CAP_MAC;
880 vf->flags |= IXGBE_VF_ACTIVE;
882 ixgbe_init_vf(adapter, vf);
885 } /* ixgbe_if_iov_vf_add */
890 ixgbe_handle_mbx(void *context)
892 UNREFERENCED_PARAMETER(context);
893 } /* ixgbe_handle_mbx */