2 * Copyright (c) 2013-2015 Sandvine Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/systm.h>
37 #include <sys/fcntl.h>
38 #include <sys/ioccom.h>
40 #include <sys/linker.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
45 #include <sys/pciio.h>
46 #include <sys/queue.h>
48 #include <sys/sysctl.h>
50 #include <machine/bus.h>
51 #include <machine/stdarg.h>
54 #include <sys/iov_schema.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pci_iov.h>
59 #include <dev/pci/pci_private.h>
60 #include <dev/pci/pci_iov_private.h>
61 #include <dev/pci/schema_private.h>
65 static MALLOC_DEFINE(M_SRIOV, "sr_iov", "PCI SR-IOV allocations");
67 static d_ioctl_t pci_iov_ioctl;
69 static struct cdevsw iov_cdevsw = {
70 .d_version = D_VERSION,
72 .d_ioctl = pci_iov_ioctl
78 * The maximum amount of memory we will allocate for user configuration of an
79 * SR-IOV device. 1MB ought to be enough for anyone, but leave this
80 * configurable just in case.
82 static u_long pci_iov_max_config = 1024 * 1024;
83 SYSCTL_ULONG(_hw_pci, OID_AUTO, iov_max_config, CTLFLAG_RWTUN,
84 &pci_iov_max_config, 0, "Maximum allowed size of SR-IOV configuration.");
86 #define IOV_READ(d, r, w) \
87 pci_read_config((d)->cfg.dev, (d)->cfg.iov->iov_pos + r, w)
89 #define IOV_WRITE(d, r, v, w) \
90 pci_write_config((d)->cfg.dev, (d)->cfg.iov->iov_pos + r, v, w)
92 static nvlist_t *pci_iov_build_schema(nvlist_t **pf_schema,
93 nvlist_t **vf_schema);
94 static void pci_iov_build_pf_schema(nvlist_t *schema,
95 nvlist_t **driver_schema);
96 static void pci_iov_build_vf_schema(nvlist_t *schema,
97 nvlist_t **driver_schema);
98 static int pci_iov_delete_iov_children(struct pci_devinfo *dinfo);
99 static nvlist_t *pci_iov_get_pf_subsystem_schema(void);
100 static nvlist_t *pci_iov_get_vf_subsystem_schema(void);
103 pci_iov_attach_name(device_t dev, struct nvlist *pf_schema,
104 struct nvlist *vf_schema, const char *fmt, ...)
106 char buf[NAME_MAX + 1];
110 vsnprintf(buf, sizeof(buf), fmt, ap);
112 return (PCI_IOV_ATTACH(device_get_parent(dev), dev, pf_schema,
117 pci_iov_attach_method(device_t bus, device_t dev, nvlist_t *pf_schema,
118 nvlist_t *vf_schema, const char *name)
121 struct pci_devinfo *dinfo;
122 struct pcicfg_iov *iov;
128 dinfo = device_get_ivars(dev);
129 pcib = device_get_parent(bus);
132 error = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos);
137 version = pci_read_config(dev, iov_pos, 4);
138 if (PCI_EXTCAP_VER(version) != 1) {
141 "Unsupported version of SR-IOV (%d) detected\n",
142 PCI_EXTCAP_VER(version));
147 iov = malloc(sizeof(*dinfo->cfg.iov), M_SRIOV, M_WAITOK | M_ZERO);
150 if (dinfo->cfg.iov != NULL) {
154 iov->iov_pos = iov_pos;
156 schema = pci_iov_build_schema(&pf_schema, &vf_schema);
157 if (schema == NULL) {
162 error = pci_iov_validate_schema(schema);
165 iov->iov_schema = schema;
167 iov->iov_cdev = make_dev(&iov_cdevsw, device_get_unit(dev),
168 UID_ROOT, GID_WHEEL, 0600, "iov/%s", name);
170 if (iov->iov_cdev == NULL) {
175 dinfo->cfg.iov = iov;
176 iov->iov_cdev->si_drv1 = dinfo;
182 nvlist_destroy(schema);
183 nvlist_destroy(pf_schema);
184 nvlist_destroy(vf_schema);
191 pci_iov_detach_method(device_t bus, device_t dev)
193 struct pci_devinfo *dinfo;
194 struct pcicfg_iov *iov;
198 dinfo = device_get_ivars(dev);
199 iov = dinfo->cfg.iov;
206 if ((iov->iov_flags & IOV_BUSY) != 0) {
211 error = pci_iov_delete_iov_children(dinfo);
217 dinfo->cfg.iov = NULL;
220 destroy_dev(iov->iov_cdev);
221 iov->iov_cdev = NULL;
223 nvlist_destroy(iov->iov_schema);
232 pci_iov_build_schema(nvlist_t **pf, nvlist_t **vf)
234 nvlist_t *schema, *pf_driver, *vf_driver;
236 /* We always take ownership of the schemas. */
242 schema = pci_iov_schema_alloc_node();
246 pci_iov_build_pf_schema(schema, &pf_driver);
247 pci_iov_build_vf_schema(schema, &vf_driver);
249 if (nvlist_error(schema) != 0)
255 nvlist_destroy(schema);
256 nvlist_destroy(pf_driver);
257 nvlist_destroy(vf_driver);
262 pci_iov_build_pf_schema(nvlist_t *schema, nvlist_t **driver_schema)
264 nvlist_t *pf_schema, *iov_schema;
266 pf_schema = pci_iov_schema_alloc_node();
267 if (pf_schema == NULL) {
268 nvlist_set_error(schema, ENOMEM);
272 iov_schema = pci_iov_get_pf_subsystem_schema();
275 * Note that if either *driver_schema or iov_schema is NULL, then
276 * nvlist_move_nvlist will put the schema in the error state and
277 * SR-IOV will fail to initialize later, so we don't have to explicitly
280 nvlist_move_nvlist(pf_schema, DRIVER_CONFIG_NAME, *driver_schema);
281 nvlist_move_nvlist(pf_schema, IOV_CONFIG_NAME, iov_schema);
282 nvlist_move_nvlist(schema, PF_CONFIG_NAME, pf_schema);
283 *driver_schema = NULL;
287 pci_iov_build_vf_schema(nvlist_t *schema, nvlist_t **driver_schema)
289 nvlist_t *vf_schema, *iov_schema;
291 vf_schema = pci_iov_schema_alloc_node();
292 if (vf_schema == NULL) {
293 nvlist_set_error(schema, ENOMEM);
297 iov_schema = pci_iov_get_vf_subsystem_schema();
300 * Note that if either *driver_schema or iov_schema is NULL, then
301 * nvlist_move_nvlist will put the schema in the error state and
302 * SR-IOV will fail to initialize later, so we don't have to explicitly
305 nvlist_move_nvlist(vf_schema, DRIVER_CONFIG_NAME, *driver_schema);
306 nvlist_move_nvlist(vf_schema, IOV_CONFIG_NAME, iov_schema);
307 nvlist_move_nvlist(schema, VF_SCHEMA_NAME, vf_schema);
308 *driver_schema = NULL;
312 pci_iov_get_pf_subsystem_schema(void)
316 pf = pci_iov_schema_alloc_node();
320 pci_iov_schema_add_uint16(pf, "num_vfs", IOV_SCHEMA_REQUIRED, -1);
321 pci_iov_schema_add_string(pf, "device", IOV_SCHEMA_REQUIRED, NULL);
327 pci_iov_get_vf_subsystem_schema(void)
331 vf = pci_iov_schema_alloc_node();
335 pci_iov_schema_add_bool(vf, "passthrough", IOV_SCHEMA_HASDEFAULT, 0);
341 pci_iov_alloc_bar(struct pci_devinfo *dinfo, int bar, pci_addr_t bar_shift)
343 struct resource *res;
344 struct pcicfg_iov *iov;
346 rman_res_t start, end;
350 iov = dinfo->cfg.iov;
351 dev = dinfo->cfg.dev;
352 bus = device_get_parent(dev);
353 rid = iov->iov_pos + PCIR_SRIOV_BAR(bar);
354 bar_size = 1 << bar_shift;
356 res = pci_alloc_multi_resource(bus, dev, SYS_RES_MEMORY, &rid, 0,
357 ~0, 1, iov->iov_num_vfs, RF_ACTIVE);
362 iov->iov_bar[bar].res = res;
363 iov->iov_bar[bar].bar_size = bar_size;
364 iov->iov_bar[bar].bar_shift = bar_shift;
366 start = rman_get_start(res);
367 end = rman_get_end(res);
368 return (rman_manage_region(&iov->rman, start, end));
372 pci_iov_add_bars(struct pcicfg_iov *iov, struct pci_devinfo *dinfo)
374 struct pci_iov_bar *bar;
378 for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
379 bar = &iov->iov_bar[i];
380 if (bar->res != NULL) {
381 bar_start = rman_get_start(bar->res) +
382 dinfo->cfg.vf.index * bar->bar_size;
384 pci_add_bar(dinfo->cfg.dev, PCIR_BAR(i), bar_start,
391 pci_iov_parse_config(struct pcicfg_iov *iov, struct pci_iov_arg *arg,
399 packed_config = NULL;
401 if (arg->len > pci_iov_max_config) {
406 packed_config = malloc(arg->len, M_SRIOV, M_WAITOK);
408 error = copyin(arg->config, packed_config, arg->len);
412 config = nvlist_unpack(packed_config, arg->len, NV_FLAG_IGNORE_CASE);
413 if (config == NULL) {
418 error = pci_iov_schema_validate_config(iov->iov_schema, config);
422 error = nvlist_error(config);
430 nvlist_destroy(config);
431 free(packed_config, M_SRIOV);
436 * Set the ARI_EN bit in the lowest-numbered PCI function with the SR-IOV
437 * capability. This bit is only writeable on the lowest-numbered PF but
438 * affects all PFs on the device.
441 pci_iov_set_ari(device_t bus)
445 int i, error, devcount, lowest_func, lowest_pos, iov_pos, dev_func;
448 /* If ARI is disabled on the downstream port there is nothing to do. */
449 if (!PCIB_ARI_ENABLED(device_get_parent(bus)))
452 error = device_get_children(bus, &devlist, &devcount);
458 for (i = 0; i < devcount; i++) {
459 if (pci_find_extcap(devlist[i], PCIZ_SRIOV, &iov_pos) == 0) {
460 dev_func = pci_get_function(devlist[i]);
461 if (lowest == NULL || dev_func < lowest_func) {
463 lowest_func = dev_func;
464 lowest_pos = iov_pos;
468 free(devlist, M_TEMP);
471 * If we called this function some device must have the SR-IOV
474 KASSERT(lowest != NULL,
475 ("Could not find child of %s with SR-IOV capability",
476 device_get_nameunit(bus)));
478 iov_ctl = pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2);
479 iov_ctl |= PCIM_SRIOV_ARI_EN;
480 pci_write_config(lowest, lowest_pos + PCIR_SRIOV_CTL, iov_ctl, 2);
481 if ((pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2) &
482 PCIM_SRIOV_ARI_EN) == 0) {
483 device_printf(lowest, "failed to enable ARI\n");
490 pci_iov_config_page_size(struct pci_devinfo *dinfo)
492 uint32_t page_cap, page_size;
494 page_cap = IOV_READ(dinfo, PCIR_SRIOV_PAGE_CAP, 4);
497 * If the system page size is less than the smallest SR-IOV page size
498 * then round up to the smallest SR-IOV page size.
500 if (PAGE_SHIFT < PCI_SRIOV_BASE_PAGE_SHIFT)
501 page_size = (1 << 0);
503 page_size = (1 << (PAGE_SHIFT - PCI_SRIOV_BASE_PAGE_SHIFT));
505 /* Check that the device supports the system page size. */
506 if (!(page_size & page_cap))
509 IOV_WRITE(dinfo, PCIR_SRIOV_PAGE_SIZE, page_size, 4);
514 pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *config)
516 const nvlist_t *device, *driver_config;
518 device = nvlist_get_nvlist(config, PF_CONFIG_NAME);
519 driver_config = nvlist_get_nvlist(device, DRIVER_CONFIG_NAME);
520 return (PCI_IOV_INIT(dev, num_vfs, driver_config));
524 pci_iov_init_rman(device_t pf, struct pcicfg_iov *iov)
528 iov->rman.rm_start = 0;
529 iov->rman.rm_end = ~0;
530 iov->rman.rm_type = RMAN_ARRAY;
531 snprintf(iov->rman_name, sizeof(iov->rman_name), "%s VF I/O memory",
532 device_get_nameunit(pf));
533 iov->rman.rm_descr = iov->rman_name;
535 error = rman_init(&iov->rman);
539 iov->iov_flags |= IOV_RMAN_INITED;
544 pci_iov_alloc_bar_ea(struct pci_devinfo *dinfo, int bar)
546 struct pcicfg_iov *iov;
547 rman_res_t start, end;
548 struct resource *res;
549 struct resource_list *rl;
550 struct resource_list_entry *rle;
552 rl = &dinfo->resources;
553 iov = dinfo->cfg.iov;
555 rle = resource_list_find(rl, SYS_RES_MEMORY,
556 iov->iov_pos + PCIR_SRIOV_BAR(bar));
558 rle = resource_list_find(rl, SYS_RES_IOPORT,
559 iov->iov_pos + PCIR_SRIOV_BAR(bar));
564 iov->iov_bar[bar].res = res;
565 iov->iov_bar[bar].bar_size = rman_get_size(res) / iov->iov_num_vfs;
566 iov->iov_bar[bar].bar_shift = pci_mapsize(iov->iov_bar[bar].bar_size);
568 start = rman_get_start(res);
569 end = rman_get_end(res);
571 return (rman_manage_region(&iov->rman, start, end));
575 pci_iov_setup_bars(struct pci_devinfo *dinfo)
578 struct pcicfg_iov *iov;
579 pci_addr_t bar_value, testval;
580 int i, last_64, error;
582 iov = dinfo->cfg.iov;
583 dev = dinfo->cfg.dev;
586 pci_add_resources_ea(device_get_parent(dev), dev, 1);
588 for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
589 /* First, try to use BARs allocated with EA */
590 error = pci_iov_alloc_bar_ea(dinfo, i);
594 /* Allocate legacy-BAR only if EA is not enabled */
595 if (pci_ea_is_enabled(dev, iov->iov_pos + PCIR_SRIOV_BAR(i)))
599 * If a PCI BAR is a 64-bit wide BAR, then it spans two
600 * consecutive registers. Therefore if the last BAR that
601 * we looked at was a 64-bit BAR, we need to skip this
602 * register as it's the second half of the last BAR.
606 iov->iov_pos + PCIR_SRIOV_BAR(i),
607 &bar_value, &testval, &last_64);
610 error = pci_iov_alloc_bar(dinfo, i,
611 pci_mapsize(testval));
623 pci_iov_enumerate_vfs(struct pci_devinfo *dinfo, const nvlist_t *config,
624 uint16_t first_rid, uint16_t rid_stride)
626 char device_name[VF_MAX_NAME];
627 const nvlist_t *device, *driver_config, *iov_config;
628 device_t bus, dev, vf;
629 struct pcicfg_iov *iov;
630 struct pci_devinfo *vfinfo;
632 uint16_t vid, did, next_rid;
634 iov = dinfo->cfg.iov;
635 dev = dinfo->cfg.dev;
636 bus = device_get_parent(dev);
637 next_rid = first_rid;
638 vid = pci_get_vendor(dev);
639 did = IOV_READ(dinfo, PCIR_SRIOV_VF_DID, 2);
641 for (i = 0; i < iov->iov_num_vfs; i++, next_rid += rid_stride) {
642 snprintf(device_name, sizeof(device_name), VF_PREFIX"%d", i);
643 device = nvlist_get_nvlist(config, device_name);
644 iov_config = nvlist_get_nvlist(device, IOV_CONFIG_NAME);
645 driver_config = nvlist_get_nvlist(device, DRIVER_CONFIG_NAME);
647 vf = PCI_CREATE_IOV_CHILD(bus, dev, next_rid, vid, did);
652 * If we are creating passthrough devices then force the ppt
653 * driver to attach to prevent a VF driver from claiming the
656 if (nvlist_get_bool(iov_config, "passthrough"))
657 device_set_devclass_fixed(vf, "ppt");
659 vfinfo = device_get_ivars(vf);
661 vfinfo->cfg.iov = iov;
662 vfinfo->cfg.vf.index = i;
664 pci_iov_add_bars(iov, vfinfo);
666 error = PCI_IOV_ADD_VF(dev, i, driver_config);
668 device_printf(dev, "Failed to add VF %d\n", i);
669 device_delete_child(bus, vf);
673 bus_generic_attach(bus);
677 pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg)
680 struct pci_devinfo *dinfo;
681 struct pcicfg_iov *iov;
684 uint16_t rid_off, rid_stride;
685 uint16_t first_rid, last_rid;
687 uint16_t num_vfs, total_vfs;
691 dinfo = cdev->si_drv1;
692 iov = dinfo->cfg.iov;
693 dev = dinfo->cfg.dev;
694 bus = device_get_parent(dev);
698 if ((iov->iov_flags & IOV_BUSY) || iov->iov_num_vfs != 0) {
702 iov->iov_flags |= IOV_BUSY;
704 error = pci_iov_parse_config(iov, arg, &config);
708 num_vfs = pci_iov_config_get_num_vfs(config);
709 total_vfs = IOV_READ(dinfo, PCIR_SRIOV_TOTAL_VFS, 2);
710 if (num_vfs > total_vfs) {
715 error = pci_iov_config_page_size(dinfo);
719 error = pci_iov_set_ari(bus);
723 error = pci_iov_init(dev, num_vfs, config);
728 IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, num_vfs, 2);
730 rid_off = IOV_READ(dinfo, PCIR_SRIOV_VF_OFF, 2);
731 rid_stride = IOV_READ(dinfo, PCIR_SRIOV_VF_STRIDE, 2);
733 first_rid = pci_get_rid(dev) + rid_off;
734 last_rid = first_rid + (num_vfs - 1) * rid_stride;
736 /* We don't yet support allocating extra bus numbers for VFs. */
737 if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) {
742 iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
743 iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
744 IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);
746 error = pci_iov_init_rman(dev, iov);
750 iov->iov_num_vfs = num_vfs;
752 error = pci_iov_setup_bars(dinfo);
756 iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
757 iov_ctl |= PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE;
758 IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);
760 /* Per specification, we must wait 100ms before accessing VFs. */
761 pause("iov", roundup(hz, 10));
762 pci_iov_enumerate_vfs(dinfo, config, first_rid, rid_stride);
764 nvlist_destroy(config);
765 iov->iov_flags &= ~IOV_BUSY;
773 for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
774 if (iov->iov_bar[i].res != NULL) {
775 pci_release_resource(bus, dev, SYS_RES_MEMORY,
776 iov->iov_pos + PCIR_SRIOV_BAR(i),
777 iov->iov_bar[i].res);
778 pci_delete_resource(bus, dev, SYS_RES_MEMORY,
779 iov->iov_pos + PCIR_SRIOV_BAR(i));
780 iov->iov_bar[i].res = NULL;
784 if (iov->iov_flags & IOV_RMAN_INITED) {
785 rman_fini(&iov->rman);
786 iov->iov_flags &= ~IOV_RMAN_INITED;
789 nvlist_destroy(config);
790 iov->iov_num_vfs = 0;
791 iov->iov_flags &= ~IOV_BUSY;
797 pci_iov_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
799 struct pcicfg_iov *iov;
801 iov = dinfo->cfg.iov;
803 IOV_WRITE(dinfo, PCIR_SRIOV_PAGE_SIZE, iov->iov_page_size, 4);
804 IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, iov->iov_num_vfs, 2);
805 IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov->iov_ctl, 2);
809 pci_iov_cfg_save(device_t dev, struct pci_devinfo *dinfo)
811 struct pcicfg_iov *iov;
813 iov = dinfo->cfg.iov;
815 iov->iov_page_size = IOV_READ(dinfo, PCIR_SRIOV_PAGE_SIZE, 4);
816 iov->iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
819 /* Return true if child is a VF of the given PF. */
821 pci_iov_is_child_vf(struct pcicfg_iov *pf, device_t child)
823 struct pci_devinfo *vfinfo;
825 vfinfo = device_get_ivars(child);
827 if (!(vfinfo->cfg.flags & PCICFG_VF))
830 return (pf == vfinfo->cfg.iov);
834 pci_iov_delete_iov_children(struct pci_devinfo *dinfo)
836 device_t bus, dev, vf, *devlist;
837 struct pcicfg_iov *iov;
838 int i, error, devcount;
841 mtx_assert(&Giant, MA_OWNED);
843 iov = dinfo->cfg.iov;
844 dev = dinfo->cfg.dev;
845 bus = device_get_parent(dev);
848 iov->iov_flags |= IOV_BUSY;
850 error = device_get_children(bus, &devlist, &devcount);
855 for (i = 0; i < devcount; i++) {
858 if (!pci_iov_is_child_vf(iov, vf))
861 error = device_detach(vf);
864 "Could not disable SR-IOV: failed to detach VF %s\n",
865 device_get_nameunit(vf));
870 for (i = 0; i < devcount; i++) {
873 if (pci_iov_is_child_vf(iov, vf))
874 device_delete_child(bus, vf);
878 iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
879 iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
880 IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);
881 IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, 0, 2);
883 iov->iov_num_vfs = 0;
885 for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
886 if (iov->iov_bar[i].res != NULL) {
887 pci_release_resource(bus, dev, SYS_RES_MEMORY,
888 iov->iov_pos + PCIR_SRIOV_BAR(i),
889 iov->iov_bar[i].res);
890 pci_delete_resource(bus, dev, SYS_RES_MEMORY,
891 iov->iov_pos + PCIR_SRIOV_BAR(i));
892 iov->iov_bar[i].res = NULL;
896 if (iov->iov_flags & IOV_RMAN_INITED) {
897 rman_fini(&iov->rman);
898 iov->iov_flags &= ~IOV_RMAN_INITED;
903 free(devlist, M_TEMP);
904 iov->iov_flags &= ~IOV_BUSY;
909 pci_iov_delete(struct cdev *cdev)
911 struct pci_devinfo *dinfo;
912 struct pcicfg_iov *iov;
916 dinfo = cdev->si_drv1;
917 iov = dinfo->cfg.iov;
919 if ((iov->iov_flags & IOV_BUSY) != 0) {
923 if (iov->iov_num_vfs == 0) {
928 error = pci_iov_delete_iov_children(dinfo);
936 pci_iov_get_schema_ioctl(struct cdev *cdev, struct pci_iov_schema *output)
938 struct pci_devinfo *dinfo;
940 size_t output_len, size;
946 dinfo = cdev->si_drv1;
947 packed = nvlist_pack(dinfo->cfg.iov->iov_schema, &size);
950 if (packed == NULL) {
955 output_len = output->len;
957 if (size <= output_len) {
958 error = copyout(packed, output->schema, size);
966 * If we return an error then the ioctl code won't copyout
967 * output back to userland, so we flag the error in the struct
970 output->error = EMSGSIZE;
975 free(packed, M_NVLIST);
981 pci_iov_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
987 return (pci_iov_config(dev, (struct pci_iov_arg *)data));
989 return (pci_iov_delete(dev));
991 return (pci_iov_get_schema_ioctl(dev,
992 (struct pci_iov_schema *)data));
999 pci_vf_alloc_mem_resource(device_t dev, device_t child, int *rid,
1000 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1002 struct pci_devinfo *dinfo;
1003 struct pcicfg_iov *iov;
1004 struct pci_map *map;
1005 struct resource *res;
1006 struct resource_list_entry *rle;
1007 rman_res_t bar_start, bar_end;
1008 pci_addr_t bar_length;
1011 dinfo = device_get_ivars(child);
1012 iov = dinfo->cfg.iov;
1014 map = pci_find_bar(child, *rid);
1018 bar_length = 1 << map->pm_size;
1019 bar_start = map->pm_value;
1020 bar_end = bar_start + bar_length - 1;
1022 /* Make sure that the resource fits the constraints. */
1023 if (bar_start >= end || bar_end <= bar_start || count != 1)
1026 /* Clamp the resource to the constraints if necessary. */
1027 if (bar_start < start)
1031 bar_length = bar_end - bar_start + 1;
1033 res = rman_reserve_resource(&iov->rman, bar_start, bar_end,
1034 bar_length, flags, child);
1038 rle = resource_list_add(&dinfo->resources, SYS_RES_MEMORY, *rid,
1039 bar_start, bar_end, 1);
1041 rman_release_resource(res);
1045 rman_set_rid(res, *rid);
1047 if (flags & RF_ACTIVE) {
1048 error = bus_activate_resource(child, SYS_RES_MEMORY, *rid, res);
1050 resource_list_delete(&dinfo->resources, SYS_RES_MEMORY,
1052 rman_release_resource(res);
1062 pci_vf_release_mem_resource(device_t dev, device_t child, int rid,
1065 struct pci_devinfo *dinfo;
1066 struct resource_list_entry *rle;
1069 dinfo = device_get_ivars(child);
1071 if (rman_get_flags(r) & RF_ACTIVE) {
1072 error = bus_deactivate_resource(child, SYS_RES_MEMORY, rid, r);
1077 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, rid);
1080 resource_list_delete(&dinfo->resources, SYS_RES_MEMORY,
1084 return (rman_release_resource(r));