4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Functions to convert between a list of vdevs and an nvlist representing the
29 * configuration. Each entry in the list can be one of:
32 * disk=(path=..., devid=...)
41 * While the underlying implementation supports it, group vdevs cannot contain
42 * other group vdevs. All userland verification of devices is contained within
43 * this file. If successful, the nvlist returned can be passed directly to the
44 * kernel; we've done as much verification as possible in userland.
46 * Hot spares are a special case, and passed down as an array of disk vdevs, at
47 * the same level as the root of the vdev tree.
49 * The only function exported by this file is 'make_root_vdev'. The
50 * function performs several passes:
52 * 1. Construct the vdev specification. Performs syntax validation and
53 * makes sure each device is valid.
54 * 2. Check for devices in use. Using libdiskmgt, makes sure that no
55 * devices are also in use. Some can be overridden using the 'force'
56 * flag, others cannot.
57 * 3. Check for replication errors if the 'force' flag is not specified.
58 * validates that the replication level is consistent across the
60 * 4. Call libzfs to label any whole disks with an EFI label.
68 #include <libnvpair.h>
75 #include <sys/mntent.h>
78 #include "zpool_util.h"
81 * For any given vdev specification, we can have multiple errors. The
82 * vdev_error() function keeps track of whether we have seen an error yet, and
83 * prints out a header if its the first error we've seen.
90 vdev_error(const char *fmt, ...)
95 (void) fprintf(stderr, gettext("invalid vdev specification\n"));
97 (void) fprintf(stderr, gettext("use '-f' to override "
98 "the following errors:\n"));
100 (void) fprintf(stderr, gettext("the following errors "
101 "must be manually repaired:\n"));
106 (void) vfprintf(stderr, fmt, ap);
111 * Check that a file is valid. All we can do in this case is check that it's
112 * not in use by another pool, and not in use by swap.
115 check_file(const char *file, boolean_t force, boolean_t isspare)
125 if (dm_inuse_swap(file, &err)) {
127 libdiskmgt_error(err);
129 vdev_error(gettext("%s is currently used by swap. "
130 "Please see swap(1M).\n"), file);
135 if ((fd = open(file, O_RDONLY)) < 0)
138 if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) == 0 && inuse) {
142 case POOL_STATE_ACTIVE:
143 desc = gettext("active");
146 case POOL_STATE_EXPORTED:
147 desc = gettext("exported");
150 case POOL_STATE_POTENTIALLY_ACTIVE:
151 desc = gettext("potentially active");
155 desc = gettext("unknown");
160 * Allow hot spares to be shared between pools.
162 if (state == POOL_STATE_SPARE && isspare)
165 if (state == POOL_STATE_ACTIVE ||
166 state == POOL_STATE_SPARE || !force) {
168 case POOL_STATE_SPARE:
169 vdev_error(gettext("%s is reserved as a hot "
170 "spare for pool %s\n"), file, name);
173 vdev_error(gettext("%s is part of %s pool "
174 "'%s'\n"), file, desc, name);
188 check_provider(const char *name, boolean_t force, boolean_t isspare)
190 char path[MAXPATHLEN];
192 if (strncmp(name, _PATH_DEV, sizeof(_PATH_DEV) - 1) != 0)
193 snprintf(path, sizeof(path), "%s%s", _PATH_DEV, name);
195 strlcpy(path, name, sizeof(path));
197 return (check_file(path, force, isspare));
201 * By "whole disk" we mean an entire physical disk (something we can
202 * label, toggle the write cache on, etc.) as opposed to the full
203 * capacity of a pseudo-device such as lofi or did. We act as if we
204 * are labeling the disk, which should be a pretty good test of whether
205 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
209 is_whole_disk(const char *name)
213 fd = g_open(name, 0);
222 * Create a leaf vdev. Determine if this is a GEOM provider.
223 * Valid forms for a leaf vdev are:
225 * /dev/xxx Complete path to a GEOM provider
226 * xxx Shorthand for /dev/xxx
229 make_leaf_vdev(const char *arg, uint64_t is_log)
231 char path[MAXPATHLEN];
232 struct stat64 statbuf;
233 nvlist_t *vdev = NULL;
235 boolean_t wholedisk = B_FALSE;
238 * Determine what type of vdev this is, and put the full path into
239 * 'path'. We detect whether this is a device of file afterwards by
240 * checking the st_mode of the file.
244 * Complete device or file path. Exact type is determined by
245 * examining the file descriptor afterwards.
247 wholedisk = is_whole_disk(arg);
248 if (!wholedisk && (stat64(arg, &statbuf) != 0)) {
249 (void) fprintf(stderr,
250 gettext("cannot open '%s': %s\n"),
251 arg, strerror(errno));
255 (void) strlcpy(path, arg, sizeof (path));
258 * This may be a short path for a device, or it could be total
259 * gibberish. Check to see if it's a known device in
260 * /dev/dsk/. As part of this check, see if we've been given a
261 * an entire disk (minus the slice number).
263 if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
264 strlcpy(path, arg, sizeof (path));
266 snprintf(path, sizeof (path), "%s%s", _PATH_DEV, arg);
267 wholedisk = is_whole_disk(path);
268 if (!wholedisk && (stat64(path, &statbuf) != 0)) {
270 * If we got ENOENT, then the user gave us
271 * gibberish, so try to direct them with a
272 * reasonable error message. Otherwise,
273 * regurgitate strerror() since it's the best we
276 if (errno == ENOENT) {
277 (void) fprintf(stderr,
278 gettext("cannot open '%s': no such "
279 "GEOM provider\n"), arg);
280 (void) fprintf(stderr,
281 gettext("must be a full path or "
282 "shorthand device name\n"));
285 (void) fprintf(stderr,
286 gettext("cannot open '%s': %s\n"),
287 path, strerror(errno));
294 * Determine whether this is a device or a file.
297 type = VDEV_TYPE_DISK;
298 } else if (S_ISREG(statbuf.st_mode)) {
299 type = VDEV_TYPE_FILE;
301 (void) fprintf(stderr, gettext("cannot use '%s': must be a "
302 "GEOM provider or regular file\n"), path);
307 * Finally, we have the complete device or file, and we know that it is
308 * acceptable to use. Construct the nvlist to describe this vdev. All
309 * vdevs have a 'path' element, and devices also have a 'devid' element.
311 verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
312 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
313 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
314 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_LOG, is_log) == 0);
315 if (strcmp(type, VDEV_TYPE_DISK) == 0)
316 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
317 (uint64_t)B_FALSE) == 0);
320 * For a whole disk, defer getting its devid until after labeling it.
322 if (1 || (S_ISBLK(statbuf.st_mode) && !wholedisk)) {
324 * Get the devid for the device.
328 char *minor = NULL, *devid_str = NULL;
330 if ((fd = open(path, O_RDONLY)) < 0) {
331 (void) fprintf(stderr, gettext("cannot open '%s': "
332 "%s\n"), path, strerror(errno));
337 if (devid_get(fd, &devid) == 0) {
338 if (devid_get_minor_name(fd, &minor) == 0 &&
339 (devid_str = devid_str_encode(devid, minor)) !=
341 verify(nvlist_add_string(vdev,
342 ZPOOL_CONFIG_DEVID, devid_str) == 0);
344 if (devid_str != NULL)
345 devid_str_free(devid_str);
347 devid_str_free(minor);
358 * Go through and verify the replication level of the pool is consistent.
359 * Performs the following checks:
361 * For the new spec, verifies that devices in mirrors and raidz are the
364 * If the current configuration already has inconsistent replication
365 * levels, ignore any other potential problems in the new spec.
367 * Otherwise, make sure that the current spec (if there is one) and the new
368 * spec have consistent replication levels.
370 typedef struct replication_level {
372 uint64_t zprl_children;
373 uint64_t zprl_parity;
374 } replication_level_t;
376 #define ZPOOL_FUZZ (16 * 1024 * 1024)
379 * Given a list of toplevel vdevs, return the current replication level. If
380 * the config is inconsistent, then NULL is returned. If 'fatal' is set, then
381 * an error message will be displayed for each self-inconsistent vdev.
383 static replication_level_t *
384 get_replication(nvlist_t *nvroot, boolean_t fatal)
392 replication_level_t lastrep, rep, *ret;
393 boolean_t dontreport;
395 ret = safe_malloc(sizeof (replication_level_t));
397 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
398 &top, &toplevels) == 0);
400 lastrep.zprl_type = NULL;
401 for (t = 0; t < toplevels; t++) {
402 uint64_t is_log = B_FALSE;
407 * For separate logs we ignore the top level vdev replication
410 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
414 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE,
416 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
417 &child, &children) != 0) {
419 * This is a 'file' or 'disk' vdev.
421 rep.zprl_type = type;
422 rep.zprl_children = 1;
428 * This is a mirror or RAID-Z vdev. Go through and make
429 * sure the contents are all the same (files vs. disks),
430 * keeping track of the number of elements in the
433 * We also check that the size of each vdev (if it can
434 * be determined) is the same.
436 rep.zprl_type = type;
437 rep.zprl_children = 0;
439 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
440 verify(nvlist_lookup_uint64(nv,
441 ZPOOL_CONFIG_NPARITY,
442 &rep.zprl_parity) == 0);
443 assert(rep.zprl_parity != 0);
449 * The 'dontreport' variable indicates that we've
450 * already reported an error for this spec, so don't
451 * bother doing it again.
456 for (c = 0; c < children; c++) {
457 nvlist_t *cnv = child[c];
459 struct stat64 statbuf;
460 uint64_t size = -1ULL;
466 verify(nvlist_lookup_string(cnv,
467 ZPOOL_CONFIG_TYPE, &childtype) == 0);
470 * If this is a replacing or spare vdev, then
471 * get the real first child of the vdev.
473 if (strcmp(childtype,
474 VDEV_TYPE_REPLACING) == 0 ||
475 strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
479 verify(nvlist_lookup_nvlist_array(cnv,
480 ZPOOL_CONFIG_CHILDREN, &rchild,
482 assert(rchildren == 2);
485 verify(nvlist_lookup_string(cnv,
490 verify(nvlist_lookup_string(cnv,
491 ZPOOL_CONFIG_PATH, &path) == 0);
494 * If we have a raidz/mirror that combines disks
495 * with files, report it as an error.
497 if (!dontreport && type != NULL &&
498 strcmp(type, childtype) != 0) {
504 "mismatched replication "
505 "level: %s contains both "
506 "files and devices\n"),
514 * According to stat(2), the value of 'st_size'
515 * is undefined for block devices and character
516 * devices. But there is no effective way to
517 * determine the real size in userland.
519 * Instead, we'll take advantage of an
520 * implementation detail of spec_size(). If the
521 * device is currently open, then we (should)
522 * return a valid size.
524 * If we still don't get a valid size (indicated
525 * by a size of 0 or MAXOFFSET_T), then ignore
526 * this device altogether.
528 if ((fd = open(path, O_RDONLY)) >= 0) {
529 err = fstat64(fd, &statbuf);
531 S_ISCHR(statbuf.st_mode)) {
532 err = ioctl(fd, DIOCGMEDIASIZE,
537 err = stat64(path, &statbuf);
539 if (err != 0 || statbuf.st_size == 0)
542 size = statbuf.st_size;
545 * Also make sure that devices and
546 * slices have a consistent size. If
547 * they differ by a significant amount
548 * (~16MB) then report an error.
551 (vdev_size != -1ULL &&
552 (labs(size - vdev_size) >
559 "%s contains devices of "
560 "different sizes\n"),
573 * At this point, we have the replication of the last toplevel
574 * vdev in 'rep'. Compare it to 'lastrep' to see if its
577 if (lastrep.zprl_type != NULL) {
578 if (strcmp(lastrep.zprl_type, rep.zprl_type) != 0) {
584 "mismatched replication level: "
585 "both %s and %s vdevs are "
587 lastrep.zprl_type, rep.zprl_type);
590 } else if (lastrep.zprl_parity != rep.zprl_parity) {
596 "mismatched replication level: "
597 "both %llu and %llu device parity "
598 "%s vdevs are present\n"),
604 } else if (lastrep.zprl_children != rep.zprl_children) {
610 "mismatched replication level: "
611 "both %llu-way and %llu-way %s "
612 "vdevs are present\n"),
613 lastrep.zprl_children,
630 * Check the replication level of the vdev spec against the current pool. Calls
631 * get_replication() to make sure the new spec is self-consistent. If the pool
632 * has a consistent replication level, then we ignore any errors. Otherwise,
633 * report any difference between the two.
636 check_replication(nvlist_t *config, nvlist_t *newroot)
640 replication_level_t *current = NULL, *new;
644 * If we have a current pool configuration, check to see if it's
645 * self-consistent. If not, simply return success.
647 if (config != NULL) {
650 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
652 if ((current = get_replication(nvroot, B_FALSE)) == NULL)
656 * for spares there may be no children, and therefore no
657 * replication level to check
659 if ((nvlist_lookup_nvlist_array(newroot, ZPOOL_CONFIG_CHILDREN,
660 &child, &children) != 0) || (children == 0)) {
666 * If all we have is logs then there's no replication level to check.
668 if (num_logs(newroot) == children) {
674 * Get the replication level of the new vdev spec, reporting any
675 * inconsistencies found.
677 if ((new = get_replication(newroot, B_TRUE)) == NULL) {
683 * Check to see if the new vdev spec matches the replication level of
687 if (current != NULL) {
688 if (strcmp(current->zprl_type, new->zprl_type) != 0) {
690 "mismatched replication level: pool uses %s "
691 "and new vdev is %s\n"),
692 current->zprl_type, new->zprl_type);
694 } else if (current->zprl_parity != new->zprl_parity) {
696 "mismatched replication level: pool uses %llu "
697 "device parity and new vdev uses %llu\n"),
698 current->zprl_parity, new->zprl_parity);
700 } else if (current->zprl_children != new->zprl_children) {
702 "mismatched replication level: pool uses %llu-way "
703 "%s and new vdev uses %llu-way %s\n"),
704 current->zprl_children, current->zprl_type,
705 new->zprl_children, new->zprl_type);
718 * Determine if the given path is a hot spare within the given configuration.
721 is_spare(nvlist_t *config, const char *path)
727 uint64_t guid, spareguid;
733 if ((fd = open(path, O_RDONLY)) < 0)
736 if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 ||
738 state != POOL_STATE_SPARE ||
739 zpool_read_label(fd, &label) != 0) {
747 verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0);
750 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
752 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
753 &spares, &nspares) == 0) {
754 for (i = 0; i < nspares; i++) {
755 verify(nvlist_lookup_uint64(spares[i],
756 ZPOOL_CONFIG_GUID, &spareguid) == 0);
757 if (spareguid == guid)
766 * Go through and find any devices that are in use. We rely on libdiskmgt for
767 * the majority of this task.
770 check_in_use(nvlist_t *config, nvlist_t *nv, int force, int isreplacing,
777 char buf[MAXPATHLEN];
780 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
782 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
783 &child, &children) != 0) {
785 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
788 * As a generic check, we look to see if this is a replace of a
789 * hot spare within the same pool. If so, we allow it
790 * regardless of what libdiskmgt or zpool_in_use() says.
793 (void) strlcpy(buf, path, sizeof (buf));
794 if (is_spare(config, buf))
798 if (strcmp(type, VDEV_TYPE_DISK) == 0)
799 ret = check_provider(path, force, isspare);
801 if (strcmp(type, VDEV_TYPE_FILE) == 0)
802 ret = check_file(path, force, isspare);
807 for (c = 0; c < children; c++)
808 if ((ret = check_in_use(config, child[c], force,
809 isreplacing, B_FALSE)) != 0)
812 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
813 &child, &children) == 0)
814 for (c = 0; c < children; c++)
815 if ((ret = check_in_use(config, child[c], force,
816 isreplacing, B_TRUE)) != 0)
819 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
820 &child, &children) == 0)
821 for (c = 0; c < children; c++)
822 if ((ret = check_in_use(config, child[c], force,
823 isreplacing, B_FALSE)) != 0)
830 is_grouping(const char *type, int *mindev)
832 if (strcmp(type, "raidz") == 0 || strcmp(type, "raidz1") == 0) {
835 return (VDEV_TYPE_RAIDZ);
838 if (strcmp(type, "raidz2") == 0) {
841 return (VDEV_TYPE_RAIDZ);
844 if (strcmp(type, "mirror") == 0) {
847 return (VDEV_TYPE_MIRROR);
850 if (strcmp(type, "spare") == 0) {
853 return (VDEV_TYPE_SPARE);
856 if (strcmp(type, "log") == 0) {
859 return (VDEV_TYPE_LOG);
862 if (strcmp(type, "cache") == 0) {
865 return (VDEV_TYPE_L2CACHE);
872 * Construct a syntactically valid vdev specification,
873 * and ensure that all devices and files exist and can be opened.
874 * Note: we don't bother freeing anything in the error paths
875 * because the program is just going to exit anyway.
878 construct_spec(int argc, char **argv)
880 nvlist_t *nvroot, *nv, **top, **spares, **l2cache;
881 int t, toplevels, mindev, nspares, nlogs, nl2cache;
900 * If it's a mirror or raidz, the subsequent arguments are
901 * its leaves -- until we encounter the next mirror or raidz.
903 if ((type = is_grouping(argv[0], &mindev)) != NULL) {
904 nvlist_t **child = NULL;
907 if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
908 if (spares != NULL) {
909 (void) fprintf(stderr,
910 gettext("invalid vdev "
911 "specification: 'spare' can be "
912 "specified only once\n"));
918 if (strcmp(type, VDEV_TYPE_LOG) == 0) {
920 (void) fprintf(stderr,
921 gettext("invalid vdev "
922 "specification: 'log' can be "
923 "specified only once\n"));
931 * A log is not a real grouping device.
932 * We just set is_log and continue.
937 if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
938 if (l2cache != NULL) {
939 (void) fprintf(stderr,
940 gettext("invalid vdev "
941 "specification: 'cache' can be "
942 "specified only once\n"));
949 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
950 (void) fprintf(stderr,
951 gettext("invalid vdev "
952 "specification: unsupported 'log' "
953 "device: %s\n"), type);
959 for (c = 1; c < argc; c++) {
960 if (is_grouping(argv[c], NULL) != NULL)
963 child = realloc(child,
964 children * sizeof (nvlist_t *));
967 if ((nv = make_leaf_vdev(argv[c], B_FALSE))
970 child[children - 1] = nv;
973 if (children < mindev) {
974 (void) fprintf(stderr, gettext("invalid vdev "
975 "specification: %s requires at least %d "
976 "devices\n"), argv[0], mindev);
983 if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
987 } else if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
992 verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
994 verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
996 verify(nvlist_add_uint64(nv,
997 ZPOOL_CONFIG_IS_LOG, is_log) == 0);
998 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
999 verify(nvlist_add_uint64(nv,
1000 ZPOOL_CONFIG_NPARITY,
1003 verify(nvlist_add_nvlist_array(nv,
1004 ZPOOL_CONFIG_CHILDREN, child,
1007 for (c = 0; c < children; c++)
1008 nvlist_free(child[c]);
1013 * We have a device. Pass off to make_leaf_vdev() to
1014 * construct the appropriate nvlist describing the vdev.
1016 if ((nv = make_leaf_vdev(argv[0], is_log)) == NULL)
1025 top = realloc(top, toplevels * sizeof (nvlist_t *));
1028 top[toplevels - 1] = nv;
1031 if (toplevels == 0 && nspares == 0 && nl2cache == 0) {
1032 (void) fprintf(stderr, gettext("invalid vdev "
1033 "specification: at least one toplevel vdev must be "
1038 if (seen_logs && nlogs == 0) {
1039 (void) fprintf(stderr, gettext("invalid vdev specification: "
1040 "log requires at least 1 device\n"));
1045 * Finally, create nvroot and add all top-level vdevs to it.
1047 verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
1048 verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
1049 VDEV_TYPE_ROOT) == 0);
1050 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1051 top, toplevels) == 0);
1053 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1054 spares, nspares) == 0);
1056 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1057 l2cache, nl2cache) == 0);
1059 for (t = 0; t < toplevels; t++)
1060 nvlist_free(top[t]);
1061 for (t = 0; t < nspares; t++)
1062 nvlist_free(spares[t]);
1063 for (t = 0; t < nl2cache; t++)
1064 nvlist_free(l2cache[t]);
1076 * Get and validate the contents of the given vdev specification. This ensures
1077 * that the nvlist returned is well-formed, that all the devices exist, and that
1078 * they are not currently in use by any other known consumer. The 'poolconfig'
1079 * parameter is the current configuration of the pool when adding devices
1080 * existing pool, and is used to perform additional checks, such as changing the
1081 * replication level of the pool. It can be 'NULL' to indicate that this is a
1082 * new pool. The 'force' flag controls whether devices should be forcefully
1083 * added, even if they appear in use.
1086 make_root_vdev(zpool_handle_t *zhp, int force, int check_rep,
1087 boolean_t isreplacing, boolean_t dryrun, int argc, char **argv)
1090 nvlist_t *poolconfig = NULL;
1094 * Construct the vdev specification. If this is successful, we know
1095 * that we have a valid specification, and that all devices can be
1098 if ((newroot = construct_spec(argc, argv)) == NULL)
1101 if (zhp && ((poolconfig = zpool_get_config(zhp, NULL)) == NULL))
1105 * Validate each device to make sure that its not shared with another
1106 * subsystem. We do this even if 'force' is set, because there are some
1107 * uses (such as a dedicated dump device) that even '-f' cannot
1110 if (check_in_use(poolconfig, newroot, force, isreplacing,
1112 nvlist_free(newroot);
1117 * Check the replication level of the given vdevs and report any errors
1118 * found. We include the existing pool spec, if any, as we need to
1119 * catch changes against the existing replication level.
1121 if (check_rep && check_replication(poolconfig, newroot) != 0) {
1122 nvlist_free(newroot);