1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
41 /************************************************************************
42 * ixgbe_pci_iov_detach
43 ************************************************************************/
45 ixgbe_pci_iov_detach(device_t dev)
47 return pci_iov_detach(dev);
50 /************************************************************************
51 * ixgbe_define_iov_schemas
52 ************************************************************************/
54 ixgbe_define_iov_schemas(device_t dev, int *error)
56 nvlist_t *pf_schema, *vf_schema;
58 pf_schema = pci_iov_schema_alloc_node();
59 vf_schema = pci_iov_schema_alloc_node();
60 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
61 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
62 IOV_SCHEMA_HASDEFAULT, TRUE);
63 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
64 IOV_SCHEMA_HASDEFAULT, FALSE);
65 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
66 IOV_SCHEMA_HASDEFAULT, FALSE);
67 *error = pci_iov_attach(dev, pf_schema, vf_schema);
70 "Error %d setting up SR-IOV\n", *error);
72 } /* ixgbe_define_iov_schemas */
74 /************************************************************************
75 * ixgbe_align_all_queue_indices
76 ************************************************************************/
78 ixgbe_align_all_queue_indices(struct adapter *adapter)
83 for (i = 0; i < adapter->num_queues; i++) {
84 index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
85 adapter->rx_rings[i].me = index;
86 adapter->tx_rings[i].me = index;
90 /* Support functions for SR-IOV/VF management */
92 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
94 if (vf->flags & IXGBE_VF_CTS)
95 msg |= IXGBE_VT_MSGTYPE_CTS;
97 adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
101 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
103 msg &= IXGBE_VT_MSG_MASK;
104 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
108 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 msg &= IXGBE_VT_MSG_MASK;
111 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
115 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
117 if (!(vf->flags & IXGBE_VF_CTS))
118 ixgbe_send_vf_nack(adapter, vf, 0);
121 static inline boolean_t
122 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
124 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
128 ixgbe_vf_queues(int mode)
142 ixgbe_vf_que_index(int mode, int vfnum, int num)
144 return ((vfnum * ixgbe_vf_queues(mode)) + num);
148 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
150 if (adapter->max_frame_size < max_frame)
151 adapter->max_frame_size = max_frame;
155 ixgbe_get_mrqc(int iov_mode)
161 mrqc = IXGBE_MRQC_VMDQRSS64EN;
164 mrqc = IXGBE_MRQC_VMDQRSS32EN;
170 panic("Unexpected SR-IOV mode %d", iov_mode);
178 ixgbe_get_mtqc(int iov_mode)
184 mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
187 mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
190 mtqc = IXGBE_MTQC_64Q_1PB;
193 panic("Unexpected SR-IOV mode %d", iov_mode);
200 ixgbe_ping_all_vfs(struct adapter *adapter)
204 for (int i = 0; i < adapter->num_vfs; i++) {
205 vf = &adapter->vfs[i];
206 if (vf->flags & IXGBE_VF_ACTIVE)
207 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
209 } /* ixgbe_ping_all_vfs */
213 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
217 uint32_t vmolr, vmvir;
223 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
225 /* Do not receive packets that pass inexact filters. */
226 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
228 /* Disable Multicast Promicuous Mode. */
229 vmolr &= ~IXGBE_VMOLR_MPE;
231 /* Accept broadcasts. */
232 vmolr |= IXGBE_VMOLR_BAM;
235 /* Accept non-vlan tagged traffic. */
236 //vmolr |= IXGBE_VMOLR_AUPE;
238 /* Allow VM to tag outgoing traffic; no default tag. */
241 /* Require vlan-tagged traffic. */
242 vmolr &= ~IXGBE_VMOLR_AUPE;
244 /* Tag all traffic with provided vlan tag. */
245 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
247 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
248 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
249 } /* ixgbe_vf_set_default_vlan */
253 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
257 * Frame size compatibility between PF and VF is only a problem on
258 * 82599-based cards. X540 and later support any combination of jumbo
259 * frames on PFs and VFs.
261 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
264 switch (vf->api_ver) {
265 case IXGBE_API_VER_1_0:
266 case IXGBE_API_VER_UNKNOWN:
268 * On legacy (1.0 and older) VF versions, we don't support jumbo
269 * frames on either the PF or the VF.
271 if (adapter->max_frame_size > ETHER_MAX_LEN ||
272 vf->max_frame_size > ETHER_MAX_LEN)
278 case IXGBE_API_VER_1_1:
281 * 1.1 or later VF versions always work if they aren't using
284 if (vf->max_frame_size <= ETHER_MAX_LEN)
288 * Jumbo frames only work with VFs if the PF is also using jumbo
291 if (adapter->max_frame_size <= ETHER_MAX_LEN)
297 } /* ixgbe_vf_frame_size_compatible */
301 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
303 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
305 // XXX clear multicast addresses
307 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
309 vf->api_ver = IXGBE_API_VER_UNKNOWN;
310 } /* ixgbe_process_vf_reset */
314 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
317 uint32_t vf_index, vfte;
321 vf_index = IXGBE_VF_INDEX(vf->pool);
322 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
323 vfte |= IXGBE_VF_BIT(vf->pool);
324 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
325 } /* ixgbe_vf_enable_transmit */
329 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
332 uint32_t vf_index, vfre;
336 vf_index = IXGBE_VF_INDEX(vf->pool);
337 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
338 if (ixgbe_vf_frame_size_compatible(adapter, vf))
339 vfre |= IXGBE_VF_BIT(vf->pool);
341 vfre &= ~IXGBE_VF_BIT(vf->pool);
342 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
343 } /* ixgbe_vf_enable_receive */
347 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
351 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
355 ixgbe_process_vf_reset(adapter, vf);
357 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
358 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
360 ack = IXGBE_VT_MSGTYPE_ACK;
362 ack = IXGBE_VT_MSGTYPE_NACK;
364 ixgbe_vf_enable_transmit(adapter, vf);
365 ixgbe_vf_enable_receive(adapter, vf);
367 vf->flags |= IXGBE_VF_CTS;
369 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
370 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
371 resp[3] = hw->mac.mc_filter_type;
372 hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
373 } /* ixgbe_vf_reset_msg */
377 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
381 mac = (uint8_t*)&msg[1];
383 /* Check that the VF has permission to change the MAC address. */
384 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
385 ixgbe_send_vf_nack(adapter, vf, msg[0]);
389 if (ixgbe_validate_mac_addr(mac) != 0) {
390 ixgbe_send_vf_nack(adapter, vf, msg[0]);
394 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
396 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
399 ixgbe_send_vf_ack(adapter, vf, msg[0]);
400 } /* ixgbe_vf_set_mac */
404 * VF multicast addresses are set by using the appropriate bit in
405 * 1 of 128 32 bit addresses (4096 possible).
408 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
410 u16 *list = (u16*)&msg[1];
412 u32 vmolr, vec_bit, vec_reg, mta_reg;
414 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
415 entries = min(entries, IXGBE_MAX_VF_MC);
417 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
419 vf->num_mc_hashes = entries;
421 /* Set the appropriate MTA bit */
422 for (int i = 0; i < entries; i++) {
423 vf->mc_hash[i] = list[i];
424 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
425 vec_bit = vf->mc_hash[i] & 0x1F;
426 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
427 mta_reg |= (1 << vec_bit);
428 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
431 vmolr |= IXGBE_VMOLR_ROMPE;
432 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
433 ixgbe_send_vf_ack(adapter, vf, msg[0]);
434 } /* ixgbe_vf_set_mc_addr */
438 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
445 enable = IXGBE_VT_MSGINFO(msg[0]);
446 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
448 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
449 ixgbe_send_vf_nack(adapter, vf, msg[0]);
453 /* It is illegal to enable vlan tag 0. */
454 if (tag == 0 && enable != 0){
455 ixgbe_send_vf_nack(adapter, vf, msg[0]);
459 ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
460 ixgbe_send_vf_ack(adapter, vf, msg[0]);
461 } /* ixgbe_vf_set_vlan */
465 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
468 uint32_t vf_max_size, pf_max_size, mhadd;
471 vf_max_size = msg[1];
473 if (vf_max_size < ETHER_CRC_LEN) {
474 /* We intentionally ACK invalid LPE requests. */
475 ixgbe_send_vf_ack(adapter, vf, msg[0]);
479 vf_max_size -= ETHER_CRC_LEN;
481 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
482 /* We intentionally ACK invalid LPE requests. */
483 ixgbe_send_vf_ack(adapter, vf, msg[0]);
487 vf->max_frame_size = vf_max_size;
488 ixgbe_update_max_frame(adapter, vf->max_frame_size);
491 * We might have to disable reception to this VF if the frame size is
492 * not compatible with the config on the PF.
494 ixgbe_vf_enable_receive(adapter, vf);
496 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
497 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
499 if (pf_max_size < adapter->max_frame_size) {
500 mhadd &= ~IXGBE_MHADD_MFS_MASK;
501 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
502 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
505 ixgbe_send_vf_ack(adapter, vf, msg[0]);
506 } /* ixgbe_vf_set_lpe */
510 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
514 ixgbe_send_vf_nack(adapter, vf, msg[0]);
515 } /* ixgbe_vf_set_macvlan */
519 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
524 case IXGBE_API_VER_1_0:
525 case IXGBE_API_VER_1_1:
526 vf->api_ver = msg[1];
527 ixgbe_send_vf_ack(adapter, vf, msg[0]);
530 vf->api_ver = IXGBE_API_VER_UNKNOWN;
531 ixgbe_send_vf_nack(adapter, vf, msg[0]);
534 } /* ixgbe_vf_api_negotiate */
538 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
541 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
546 /* GET_QUEUES is not supported on pre-1.1 APIs. */
548 case IXGBE_API_VER_1_0:
549 case IXGBE_API_VER_UNKNOWN:
550 ixgbe_send_vf_nack(adapter, vf, msg[0]);
554 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
555 IXGBE_VT_MSGTYPE_CTS;
557 num_queues = ixgbe_vf_queues(adapter->iov_mode);
558 resp[IXGBE_VF_TX_QUEUES] = num_queues;
559 resp[IXGBE_VF_RX_QUEUES] = num_queues;
560 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
561 resp[IXGBE_VF_DEF_QUEUE] = 0;
563 hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
564 } /* ixgbe_vf_get_queues */
568 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
571 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
576 error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
581 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
582 adapter->ifp->if_xname, msg[0], vf->pool);
583 if (msg[0] == IXGBE_VF_RESET) {
584 ixgbe_vf_reset_msg(adapter, vf, msg);
588 if (!(vf->flags & IXGBE_VF_CTS)) {
589 ixgbe_send_vf_nack(adapter, vf, msg[0]);
593 switch (msg[0] & IXGBE_VT_MSG_MASK) {
594 case IXGBE_VF_SET_MAC_ADDR:
595 ixgbe_vf_set_mac(adapter, vf, msg);
597 case IXGBE_VF_SET_MULTICAST:
598 ixgbe_vf_set_mc_addr(adapter, vf, msg);
600 case IXGBE_VF_SET_VLAN:
601 ixgbe_vf_set_vlan(adapter, vf, msg);
603 case IXGBE_VF_SET_LPE:
604 ixgbe_vf_set_lpe(adapter, vf, msg);
606 case IXGBE_VF_SET_MACVLAN:
607 ixgbe_vf_set_macvlan(adapter, vf, msg);
609 case IXGBE_VF_API_NEGOTIATE:
610 ixgbe_vf_api_negotiate(adapter, vf, msg);
612 case IXGBE_VF_GET_QUEUES:
613 ixgbe_vf_get_queues(adapter, vf, msg);
616 ixgbe_send_vf_nack(adapter, vf, msg[0]);
618 } /* ixgbe_process_vf_msg */
621 /* Tasklet for handling VF -> PF mailbox messages */
623 ixgbe_handle_mbx(void *context, int pending)
625 struct adapter *adapter;
633 IXGBE_CORE_LOCK(adapter);
634 for (i = 0; i < adapter->num_vfs; i++) {
635 vf = &adapter->vfs[i];
637 if (vf->flags & IXGBE_VF_ACTIVE) {
638 if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
639 ixgbe_process_vf_reset(adapter, vf);
641 if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
642 ixgbe_process_vf_msg(adapter, vf);
644 if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
645 ixgbe_process_vf_ack(adapter, vf);
648 IXGBE_CORE_UNLOCK(adapter);
649 } /* ixgbe_handle_mbx */
652 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
654 struct adapter *adapter;
657 adapter = device_get_softc(dev);
658 adapter->iov_mode = IXGBE_NO_VM;
661 /* Would we ever get num_vfs = 0? */
667 * We've got to reserve a VM's worth of queues for the PF,
668 * thus we go into "64 VF mode" if 32+ VFs are requested.
669 * With 64 VFs, you can only have two queues per VF.
670 * With 32 VFs, you can have up to four queues per VF.
672 if (num_vfs >= IXGBE_32_VM)
673 adapter->iov_mode = IXGBE_64_VM;
675 adapter->iov_mode = IXGBE_32_VM;
677 /* Again, reserving 1 VM's worth of queues for the PF */
678 adapter->pool = adapter->iov_mode - 1;
680 if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
685 IXGBE_CORE_LOCK(adapter);
687 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
690 if (adapter->vfs == NULL) {
692 IXGBE_CORE_UNLOCK(adapter);
696 adapter->num_vfs = num_vfs;
697 adapter->init_locked(adapter);
698 adapter->feat_en |= IXGBE_FEATURE_SRIOV;
700 IXGBE_CORE_UNLOCK(adapter);
705 adapter->num_vfs = 0;
707 adapter->iov_mode = IXGBE_NO_VM;
710 } /* ixgbe_init_iov */
713 ixgbe_uninit_iov(device_t dev)
716 struct adapter *adapter;
717 uint32_t pf_reg, vf_reg;
719 adapter = device_get_softc(dev);
722 IXGBE_CORE_LOCK(adapter);
724 /* Enable rx/tx for the PF and disable it for all VFs. */
725 pf_reg = IXGBE_VF_INDEX(adapter->pool);
726 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
727 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
733 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
734 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
736 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
738 free(adapter->vfs, M_IXGBE_SRIOV);
740 adapter->num_vfs = 0;
741 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
743 IXGBE_CORE_UNLOCK(adapter);
744 } /* ixgbe_uninit_iov */
747 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
750 uint32_t vf_index, pfmbimr;
752 IXGBE_CORE_LOCK_ASSERT(adapter);
756 if (!(vf->flags & IXGBE_VF_ACTIVE))
759 vf_index = IXGBE_VF_INDEX(vf->pool);
760 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
761 pfmbimr |= IXGBE_VF_BIT(vf->pool);
762 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
764 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
766 // XXX multicast addresses
768 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
769 ixgbe_set_rar(&adapter->hw, vf->rar_index,
770 vf->ether_addr, vf->pool, TRUE);
773 ixgbe_vf_enable_transmit(adapter, vf);
774 ixgbe_vf_enable_receive(adapter, vf);
776 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
777 } /* ixgbe_init_vf */
780 ixgbe_initialize_iov(struct adapter *adapter)
782 struct ixgbe_hw *hw = &adapter->hw;
783 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
786 if (adapter->iov_mode == IXGBE_NO_VM)
789 IXGBE_CORE_LOCK_ASSERT(adapter);
791 /* RMW appropriate registers based on IOV mode */
793 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
794 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
795 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
797 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
798 mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
799 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
800 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
801 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
802 switch (adapter->iov_mode) {
804 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
805 mtqc |= IXGBE_MTQC_64VF;
806 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
807 gpie |= IXGBE_GPIE_VTMODE_64;
810 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
811 mtqc |= IXGBE_MTQC_32VF;
812 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
813 gpie |= IXGBE_GPIE_VTMODE_32;
816 panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
819 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
820 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
821 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
822 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
824 /* Enable rx/tx for the PF. */
825 vf_reg = IXGBE_VF_INDEX(adapter->pool);
826 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
827 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
829 /* Allow VM-to-VM communication. */
830 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
832 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
833 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
834 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
836 for (i = 0; i < adapter->num_vfs; i++)
837 ixgbe_init_vf(adapter, &adapter->vfs[i]);
838 } /* ixgbe_initialize_iov */
841 /* Check the max frame setting of all active VF's */
843 ixgbe_recalculate_max_frame(struct adapter *adapter)
847 IXGBE_CORE_LOCK_ASSERT(adapter);
849 for (int i = 0; i < adapter->num_vfs; i++) {
850 vf = &adapter->vfs[i];
851 if (vf->flags & IXGBE_VF_ACTIVE)
852 ixgbe_update_max_frame(adapter, vf->max_frame_size);
854 } /* ixgbe_recalculate_max_frame */
857 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
859 struct adapter *adapter;
863 adapter = device_get_softc(dev);
865 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
866 vfnum, adapter->num_vfs));
868 IXGBE_CORE_LOCK(adapter);
869 vf = &adapter->vfs[vfnum];
872 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
873 vf->rar_index = vfnum + 1;
874 vf->default_vlan = 0;
875 vf->max_frame_size = ETHER_MAX_LEN;
876 ixgbe_update_max_frame(adapter, vf->max_frame_size);
878 if (nvlist_exists_binary(config, "mac-addr")) {
879 mac = nvlist_get_binary(config, "mac-addr", NULL);
880 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
881 if (nvlist_get_bool(config, "allow-set-mac"))
882 vf->flags |= IXGBE_VF_CAP_MAC;
885 * If the administrator has not specified a MAC address then
886 * we must allow the VF to choose one.
888 vf->flags |= IXGBE_VF_CAP_MAC;
890 vf->flags |= IXGBE_VF_ACTIVE;
892 ixgbe_init_vf(adapter, vf);
893 IXGBE_CORE_UNLOCK(adapter);
901 ixgbe_handle_mbx(void *context, int pending)
903 UNREFERENCED_2PARAMETER(context, pending);
904 } /* ixgbe_handle_mbx */
907 ixgbe_vf_que_index(int mode, int vfnum, int num)
909 UNREFERENCED_2PARAMETER(mode, vfnum);
912 } /* ixgbe_vf_que_index */