1 /******************************************************************************
3 Copyright (c) 2001-2017, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 MALLOC_DECLARE(M_IXGBE);
41 /************************************************************************
42 * ixgbe_pci_iov_detach
43 ************************************************************************/
45 ixgbe_pci_iov_detach(device_t dev)
47 return pci_iov_detach(dev);
50 /************************************************************************
51 * ixgbe_define_iov_schemas
52 ************************************************************************/
54 ixgbe_define_iov_schemas(device_t dev, int *error)
56 nvlist_t *pf_schema, *vf_schema;
58 pf_schema = pci_iov_schema_alloc_node();
59 vf_schema = pci_iov_schema_alloc_node();
60 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
61 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
62 IOV_SCHEMA_HASDEFAULT, TRUE);
63 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
64 IOV_SCHEMA_HASDEFAULT, FALSE);
65 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
66 IOV_SCHEMA_HASDEFAULT, FALSE);
67 *error = pci_iov_attach(dev, pf_schema, vf_schema);
70 "Error %d setting up SR-IOV\n", *error);
72 } /* ixgbe_define_iov_schemas */
74 /************************************************************************
75 * ixgbe_align_all_queue_indices
76 ************************************************************************/
78 ixgbe_align_all_queue_indices(struct adapter *adapter)
83 for (i = 0; i < adapter->num_queues; i++) {
84 index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
85 adapter->rx_rings[i].me = index;
86 adapter->tx_rings[i].me = index;
90 /* Support functions for SR-IOV/VF management */
92 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
94 if (vf->flags & IXGBE_VF_CTS)
95 msg |= IXGBE_VT_MSGTYPE_CTS;
97 ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
101 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
103 msg &= IXGBE_VT_MSG_MASK;
104 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
108 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 msg &= IXGBE_VT_MSG_MASK;
111 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
115 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
117 if (!(vf->flags & IXGBE_VF_CTS))
118 ixgbe_send_vf_nack(adapter, vf, 0);
121 static inline boolean_t
122 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
124 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
128 ixgbe_vf_queues(int mode)
142 ixgbe_vf_que_index(int mode, int vfnum, int num)
144 return ((vfnum * ixgbe_vf_queues(mode)) + num);
148 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
150 if (adapter->max_frame_size < max_frame)
151 adapter->max_frame_size = max_frame;
155 ixgbe_get_mrqc(int iov_mode)
161 mrqc = IXGBE_MRQC_VMDQRSS64EN;
164 mrqc = IXGBE_MRQC_VMDQRSS32EN;
170 panic("Unexpected SR-IOV mode %d", iov_mode);
178 ixgbe_get_mtqc(int iov_mode)
184 mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
187 mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
190 mtqc = IXGBE_MTQC_64Q_1PB;
193 panic("Unexpected SR-IOV mode %d", iov_mode);
200 ixgbe_ping_all_vfs(struct adapter *adapter)
204 for (int i = 0; i < adapter->num_vfs; i++) {
205 vf = &adapter->vfs[i];
206 if (vf->flags & IXGBE_VF_ACTIVE)
207 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
209 } /* ixgbe_ping_all_vfs */
213 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
217 uint32_t vmolr, vmvir;
223 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
225 /* Do not receive packets that pass inexact filters. */
226 vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
228 /* Disable Multicast Promicuous Mode. */
229 vmolr &= ~IXGBE_VMOLR_MPE;
231 /* Accept broadcasts. */
232 vmolr |= IXGBE_VMOLR_BAM;
235 /* Accept non-vlan tagged traffic. */
236 //vmolr |= IXGBE_VMOLR_AUPE;
238 /* Allow VM to tag outgoing traffic; no default tag. */
241 /* Require vlan-tagged traffic. */
242 vmolr &= ~IXGBE_VMOLR_AUPE;
244 /* Tag all traffic with provided vlan tag. */
245 vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
247 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
248 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
249 } /* ixgbe_vf_set_default_vlan */
253 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
257 * Frame size compatibility between PF and VF is only a problem on
258 * 82599-based cards. X540 and later support any combination of jumbo
259 * frames on PFs and VFs.
261 if (adapter->hw.mac.type != ixgbe_mac_82599EB)
264 switch (vf->api_ver) {
265 case IXGBE_API_VER_1_0:
266 case IXGBE_API_VER_UNKNOWN:
268 * On legacy (1.0 and older) VF versions, we don't support jumbo
269 * frames on either the PF or the VF.
271 if (adapter->max_frame_size > ETHER_MAX_LEN ||
272 vf->max_frame_size > ETHER_MAX_LEN)
278 case IXGBE_API_VER_1_1:
281 * 1.1 or later VF versions always work if they aren't using
284 if (vf->max_frame_size <= ETHER_MAX_LEN)
288 * Jumbo frames only work with VFs if the PF is also using jumbo
291 if (adapter->max_frame_size <= ETHER_MAX_LEN)
297 } /* ixgbe_vf_frame_size_compatible */
301 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
303 ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
305 // XXX clear multicast addresses
307 ixgbe_clear_rar(&adapter->hw, vf->rar_index);
309 vf->api_ver = IXGBE_API_VER_UNKNOWN;
310 } /* ixgbe_process_vf_reset */
314 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
317 uint32_t vf_index, vfte;
321 vf_index = IXGBE_VF_INDEX(vf->pool);
322 vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
323 vfte |= IXGBE_VF_BIT(vf->pool);
324 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
325 } /* ixgbe_vf_enable_transmit */
329 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
332 uint32_t vf_index, vfre;
336 vf_index = IXGBE_VF_INDEX(vf->pool);
337 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
338 if (ixgbe_vf_frame_size_compatible(adapter, vf))
339 vfre |= IXGBE_VF_BIT(vf->pool);
341 vfre &= ~IXGBE_VF_BIT(vf->pool);
342 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
343 } /* ixgbe_vf_enable_receive */
347 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
351 uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
355 ixgbe_process_vf_reset(adapter, vf);
357 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
358 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
360 ack = IXGBE_VT_MSGTYPE_ACK;
362 ack = IXGBE_VT_MSGTYPE_NACK;
364 ixgbe_vf_enable_transmit(adapter, vf);
365 ixgbe_vf_enable_receive(adapter, vf);
367 vf->flags |= IXGBE_VF_CTS;
369 resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
370 bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
371 resp[3] = hw->mac.mc_filter_type;
372 ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
373 } /* ixgbe_vf_reset_msg */
377 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
381 mac = (uint8_t*)&msg[1];
383 /* Check that the VF has permission to change the MAC address. */
384 if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
385 ixgbe_send_vf_nack(adapter, vf, msg[0]);
389 if (ixgbe_validate_mac_addr(mac) != 0) {
390 ixgbe_send_vf_nack(adapter, vf, msg[0]);
394 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
396 ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
399 ixgbe_send_vf_ack(adapter, vf, msg[0]);
400 } /* ixgbe_vf_set_mac */
404 * VF multicast addresses are set by using the appropriate bit in
405 * 1 of 128 32 bit addresses (4096 possible).
408 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
410 u16 *list = (u16*)&msg[1];
412 u32 vmolr, vec_bit, vec_reg, mta_reg;
414 entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
415 entries = min(entries, IXGBE_MAX_VF_MC);
417 vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
419 vf->num_mc_hashes = entries;
421 /* Set the appropriate MTA bit */
422 for (int i = 0; i < entries; i++) {
423 vf->mc_hash[i] = list[i];
424 vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
425 vec_bit = vf->mc_hash[i] & 0x1F;
426 mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
427 mta_reg |= (1 << vec_bit);
428 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
431 vmolr |= IXGBE_VMOLR_ROMPE;
432 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
433 ixgbe_send_vf_ack(adapter, vf, msg[0]);
435 } /* ixgbe_vf_set_mc_addr */
439 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
446 enable = IXGBE_VT_MSGINFO(msg[0]);
447 tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
449 if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
450 ixgbe_send_vf_nack(adapter, vf, msg[0]);
454 /* It is illegal to enable vlan tag 0. */
455 if (tag == 0 && enable != 0){
456 ixgbe_send_vf_nack(adapter, vf, msg[0]);
460 ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
461 ixgbe_send_vf_ack(adapter, vf, msg[0]);
462 } /* ixgbe_vf_set_vlan */
466 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
469 uint32_t vf_max_size, pf_max_size, mhadd;
472 vf_max_size = msg[1];
474 if (vf_max_size < ETHER_CRC_LEN) {
475 /* We intentionally ACK invalid LPE requests. */
476 ixgbe_send_vf_ack(adapter, vf, msg[0]);
480 vf_max_size -= ETHER_CRC_LEN;
482 if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
483 /* We intentionally ACK invalid LPE requests. */
484 ixgbe_send_vf_ack(adapter, vf, msg[0]);
488 vf->max_frame_size = vf_max_size;
489 ixgbe_update_max_frame(adapter, vf->max_frame_size);
492 * We might have to disable reception to this VF if the frame size is
493 * not compatible with the config on the PF.
495 ixgbe_vf_enable_receive(adapter, vf);
497 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
498 pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
500 if (pf_max_size < adapter->max_frame_size) {
501 mhadd &= ~IXGBE_MHADD_MFS_MASK;
502 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
503 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
506 ixgbe_send_vf_ack(adapter, vf, msg[0]);
507 } /* ixgbe_vf_set_lpe */
511 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
515 ixgbe_send_vf_nack(adapter, vf, msg[0]);
516 } /* ixgbe_vf_set_macvlan */
520 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
525 case IXGBE_API_VER_1_0:
526 case IXGBE_API_VER_1_1:
527 vf->api_ver = msg[1];
528 ixgbe_send_vf_ack(adapter, vf, msg[0]);
531 vf->api_ver = IXGBE_API_VER_UNKNOWN;
532 ixgbe_send_vf_nack(adapter, vf, msg[0]);
535 } /* ixgbe_vf_api_negotiate */
539 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
542 uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
547 /* GET_QUEUES is not supported on pre-1.1 APIs. */
549 case IXGBE_API_VER_1_0:
550 case IXGBE_API_VER_UNKNOWN:
551 ixgbe_send_vf_nack(adapter, vf, msg[0]);
555 resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
556 IXGBE_VT_MSGTYPE_CTS;
558 num_queues = ixgbe_vf_queues(adapter->iov_mode);
559 resp[IXGBE_VF_TX_QUEUES] = num_queues;
560 resp[IXGBE_VF_RX_QUEUES] = num_queues;
561 resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
562 resp[IXGBE_VF_DEF_QUEUE] = 0;
564 ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
565 } /* ixgbe_vf_get_queues */
569 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
572 uint32_t msg[IXGBE_VFMAILBOX_SIZE];
577 error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
582 CTR3(KTR_MALLOC, "%s: received msg %x from %d",
583 adapter->ifp->if_xname, msg[0], vf->pool);
584 if (msg[0] == IXGBE_VF_RESET) {
585 ixgbe_vf_reset_msg(adapter, vf, msg);
589 if (!(vf->flags & IXGBE_VF_CTS)) {
590 ixgbe_send_vf_nack(adapter, vf, msg[0]);
594 switch (msg[0] & IXGBE_VT_MSG_MASK) {
595 case IXGBE_VF_SET_MAC_ADDR:
596 ixgbe_vf_set_mac(adapter, vf, msg);
598 case IXGBE_VF_SET_MULTICAST:
599 ixgbe_vf_set_mc_addr(adapter, vf, msg);
601 case IXGBE_VF_SET_VLAN:
602 ixgbe_vf_set_vlan(adapter, vf, msg);
604 case IXGBE_VF_SET_LPE:
605 ixgbe_vf_set_lpe(adapter, vf, msg);
607 case IXGBE_VF_SET_MACVLAN:
608 ixgbe_vf_set_macvlan(adapter, vf, msg);
610 case IXGBE_VF_API_NEGOTIATE:
611 ixgbe_vf_api_negotiate(adapter, vf, msg);
613 case IXGBE_VF_GET_QUEUES:
614 ixgbe_vf_get_queues(adapter, vf, msg);
617 ixgbe_send_vf_nack(adapter, vf, msg[0]);
619 } /* ixgbe_process_vf_msg */
622 /* Tasklet for handling VF -> PF mailbox messages */
624 ixgbe_handle_mbx(void *context, int pending)
626 struct adapter *adapter;
634 IXGBE_CORE_LOCK(adapter);
635 for (i = 0; i < adapter->num_vfs; i++) {
636 vf = &adapter->vfs[i];
638 if (vf->flags & IXGBE_VF_ACTIVE) {
639 if (ixgbe_check_for_rst(hw, vf->pool) == 0)
640 ixgbe_process_vf_reset(adapter, vf);
642 if (ixgbe_check_for_msg(hw, vf->pool) == 0)
643 ixgbe_process_vf_msg(adapter, vf);
645 if (ixgbe_check_for_ack(hw, vf->pool) == 0)
646 ixgbe_process_vf_ack(adapter, vf);
649 IXGBE_CORE_UNLOCK(adapter);
650 } /* ixgbe_handle_mbx */
653 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
655 struct adapter *adapter;
658 adapter = device_get_softc(dev);
659 adapter->iov_mode = IXGBE_NO_VM;
660 adapter->num_vfs = num_vfs;
662 if (adapter->num_vfs == 0) {
663 /* Would we ever get num_vfs = 0? */
668 if (adapter->num_queues <= 2)
669 adapter->iov_mode = IXGBE_64_VM;
670 else if (adapter->num_queues <= 4)
671 adapter->iov_mode = IXGBE_32_VM;
677 /* Reserve 1 VM's worth of queues for the PF */
678 adapter->pool = adapter->iov_mode - 1;
680 if (num_vfs > adapter->pool) {
685 IXGBE_CORE_LOCK(adapter);
687 adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE,
690 if (adapter->vfs == NULL) {
692 IXGBE_CORE_UNLOCK(adapter);
696 ixgbe_init_locked(adapter);
697 adapter->feat_en |= IXGBE_FEATURE_SRIOV;
699 IXGBE_CORE_UNLOCK(adapter);
704 adapter->num_vfs = 0;
706 adapter->iov_mode = IXGBE_NO_VM;
709 } /* ixgbe_init_iov */
712 ixgbe_uninit_iov(device_t dev)
715 struct adapter *adapter;
716 uint32_t pf_reg, vf_reg;
718 adapter = device_get_softc(dev);
721 IXGBE_CORE_LOCK(adapter);
723 /* Enable rx/tx for the PF and disable it for all VFs. */
724 pf_reg = IXGBE_VF_INDEX(adapter->pool);
725 IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
726 IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
732 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
733 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
735 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
737 free(adapter->vfs, M_IXGBE);
739 adapter->num_vfs = 0;
740 adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
742 IXGBE_CORE_UNLOCK(adapter);
743 } /* ixgbe_uninit_iov */
746 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
749 uint32_t vf_index, pfmbimr;
751 IXGBE_CORE_LOCK_ASSERT(adapter);
755 if (!(vf->flags & IXGBE_VF_ACTIVE))
758 vf_index = IXGBE_VF_INDEX(vf->pool);
759 pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
760 pfmbimr |= IXGBE_VF_BIT(vf->pool);
761 IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
763 ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
765 // XXX multicast addresses
767 if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
768 ixgbe_set_rar(&adapter->hw, vf->rar_index,
769 vf->ether_addr, vf->pool, TRUE);
772 ixgbe_vf_enable_transmit(adapter, vf);
773 ixgbe_vf_enable_receive(adapter, vf);
775 ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
776 } /* ixgbe_init_vf */
779 ixgbe_initialize_iov(struct adapter *adapter)
781 struct ixgbe_hw *hw = &adapter->hw;
782 uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
785 if (adapter->iov_mode == IXGBE_NO_VM)
788 IXGBE_CORE_LOCK_ASSERT(adapter);
790 /* RMW appropriate registers based on IOV mode */
792 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
793 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
794 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
796 mrqc &= ~IXGBE_MRQC_MRQE_MASK;
797 mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
798 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
799 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
800 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
801 switch (adapter->iov_mode) {
803 mrqc |= IXGBE_MRQC_VMDQRSS64EN;
804 mtqc |= IXGBE_MTQC_64VF;
805 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
806 gpie |= IXGBE_GPIE_VTMODE_64;
809 mrqc |= IXGBE_MRQC_VMDQRSS32EN;
810 mtqc |= IXGBE_MTQC_32VF;
811 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
812 gpie |= IXGBE_GPIE_VTMODE_32;
815 panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
818 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
819 IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
820 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
821 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
823 /* Enable rx/tx for the PF. */
824 vf_reg = IXGBE_VF_INDEX(adapter->pool);
825 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
826 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
828 /* Allow VM-to-VM communication. */
829 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
831 vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
832 vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
833 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
835 for (i = 0; i < adapter->num_vfs; i++)
836 ixgbe_init_vf(adapter, &adapter->vfs[i]);
837 } /* ixgbe_initialize_iov */
840 /* Check the max frame setting of all active VF's */
842 ixgbe_recalculate_max_frame(struct adapter *adapter)
846 IXGBE_CORE_LOCK_ASSERT(adapter);
848 for (int i = 0; i < adapter->num_vfs; i++) {
849 vf = &adapter->vfs[i];
850 if (vf->flags & IXGBE_VF_ACTIVE)
851 ixgbe_update_max_frame(adapter, vf->max_frame_size);
853 } /* ixgbe_recalculate_max_frame */
856 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
858 struct adapter *adapter;
862 adapter = device_get_softc(dev);
864 KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
865 vfnum, adapter->num_vfs));
867 IXGBE_CORE_LOCK(adapter);
868 vf = &adapter->vfs[vfnum];
871 /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
872 vf->rar_index = vfnum + 1;
873 vf->default_vlan = 0;
874 vf->max_frame_size = ETHER_MAX_LEN;
875 ixgbe_update_max_frame(adapter, vf->max_frame_size);
877 if (nvlist_exists_binary(config, "mac-addr")) {
878 mac = nvlist_get_binary(config, "mac-addr", NULL);
879 bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
880 if (nvlist_get_bool(config, "allow-set-mac"))
881 vf->flags |= IXGBE_VF_CAP_MAC;
884 * If the administrator has not specified a MAC address then
885 * we must allow the VF to choose one.
887 vf->flags |= IXGBE_VF_CAP_MAC;
889 vf->flags = IXGBE_VF_ACTIVE;
891 ixgbe_init_vf(adapter, vf);
892 IXGBE_CORE_UNLOCK(adapter);
900 ixgbe_handle_mbx(void *context, int pending)
902 UNREFERENCED_2PARAMETER(context, pending);
903 } /* ixgbe_handle_mbx */