4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * Functions to convert between a list of vdevs and an nvlist representing the
31 * configuration. Each entry in the list can be one of:
34 * disk=(path=..., devid=...)
43 * While the underlying implementation supports it, group vdevs cannot contain
44 * other group vdevs. All userland verification of devices is contained within
45 * this file. If successful, the nvlist returned can be passed directly to the
46 * kernel; we've done as much verification as possible in userland.
48 * Hot spares are a special case, and passed down as an array of disk vdevs, at
49 * the same level as the root of the vdev tree.
51 * The only function exported by this file is 'get_vdev_spec'. The function
52 * performs several passes:
54 * 1. Construct the vdev specification. Performs syntax validation and
55 * makes sure each device is valid.
56 * 2. Check for devices in use. Using libdiskmgt, makes sure that no
57 * devices are also in use. Some can be overridden using the 'force'
58 * flag, others cannot.
59 * 3. Check for replication errors if the 'force' flag is not specified.
60 * validates that the replication level is consistent across the
69 #include <libnvpair.h>
76 #include <sys/mntent.h>
81 #include "zpool_util.h"
84 * For any given vdev specification, we can have multiple errors. The
85 * vdev_error() function keeps track of whether we have seen an error yet, and
86 * prints out a header if its the first error we've seen.
93 vdev_error(const char *fmt, ...)
98 (void) fprintf(stderr, gettext("invalid vdev specification\n"));
100 (void) fprintf(stderr, gettext("use '-f' to override "
101 "the following errors:\n"));
103 (void) fprintf(stderr, gettext("the following errors "
104 "must be manually repaired:\n"));
109 (void) vfprintf(stderr, fmt, ap);
114 * Validate a GEOM provider.
117 check_provider(const char *name, boolean_t force, boolean_t isspare)
122 struct gprovider *pp;
125 /* XXX: What to do with isspare? */
127 if (strncmp(name, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
128 name += sizeof(_PATH_DEV) - 1;
130 rv = geom_gettree(&mesh);
134 LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
135 LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
136 LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
137 if (strcmp(pp->lg_name, name) == 0)
145 vdev_error("no such provider %s\n", name);
149 VERIFY(sscanf(pp->lg_mode, "r%dw%de%d", &acr, &acw, &ace) == 3);
150 if (acw == 0 && ace == 0)
153 vdev_error("%s is in use (%s)\n", name, pp->lg_mode);
155 geom_deletetree(&mesh);
160 is_provider(const char *name)
164 fd = g_open(name, 0);
173 * Create a leaf vdev. Determine if this is a GEOM provider.
174 * Valid forms for a leaf vdev are:
176 * /dev/xxx Complete path to a GEOM provider
177 * xxx Shorthand for /dev/xxx
180 make_leaf_vdev(const char *arg)
182 char ident[DISK_IDENT_SIZE], path[MAXPATHLEN];
183 struct stat64 statbuf;
184 nvlist_t *vdev = NULL;
186 boolean_t wholedisk = B_FALSE;
188 if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
189 strlcpy(path, arg, sizeof (path));
191 snprintf(path, sizeof (path), "%s%s", _PATH_DEV, arg);
193 if (is_provider(path))
194 type = VDEV_TYPE_DISK;
196 (void) fprintf(stderr, gettext("cannot use '%s': must be a "
197 "GEOM provider\n"), path);
202 * Finally, we have the complete device or file, and we know that it is
203 * acceptable to use. Construct the nvlist to describe this vdev. All
204 * vdevs have a 'path' element, and devices also have a 'devid' element.
206 verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
207 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
208 verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
209 if (strcmp(type, VDEV_TYPE_DISK) == 0)
210 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
211 (uint64_t)B_FALSE) == 0);
214 * For a whole disk, defer getting its devid until after labeling it.
216 if (1 || (S_ISBLK(statbuf.st_mode) && !wholedisk)) {
218 * Get the devid for the device.
222 char *minor = NULL, *devid_str = NULL;
224 if ((fd = open(path, O_RDONLY)) < 0) {
225 (void) fprintf(stderr, gettext("cannot open '%s': "
226 "%s\n"), path, strerror(errno));
231 if (devid_get(fd, &devid) == 0) {
232 if (devid_get_minor_name(fd, &minor) == 0 &&
233 (devid_str = devid_str_encode(devid, minor)) !=
235 verify(nvlist_add_string(vdev,
236 ZPOOL_CONFIG_DEVID, devid_str) == 0);
238 if (devid_str != NULL)
239 devid_str_free(devid_str);
241 devid_str_free(minor);
252 * Go through and verify the replication level of the pool is consistent.
253 * Performs the following checks:
255 * For the new spec, verifies that devices in mirrors and raidz are the
258 * If the current configuration already has inconsistent replication
259 * levels, ignore any other potential problems in the new spec.
261 * Otherwise, make sure that the current spec (if there is one) and the new
262 * spec have consistent replication levels.
264 typedef struct replication_level {
266 uint64_t zprl_children;
267 uint64_t zprl_parity;
268 } replication_level_t;
271 * Given a list of toplevel vdevs, return the current replication level. If
272 * the config is inconsistent, then NULL is returned. If 'fatal' is set, then
273 * an error message will be displayed for each self-inconsistent vdev.
275 replication_level_t *
276 get_replication(nvlist_t *nvroot, boolean_t fatal)
284 replication_level_t lastrep, rep, *ret;
285 boolean_t dontreport;
287 ret = safe_malloc(sizeof (replication_level_t));
289 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
290 &top, &toplevels) == 0);
292 lastrep.zprl_type = NULL;
293 for (t = 0; t < toplevels; t++) {
296 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
298 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
299 &child, &children) != 0) {
301 * This is a 'file' or 'disk' vdev.
303 rep.zprl_type = type;
304 rep.zprl_children = 1;
310 * This is a mirror or RAID-Z vdev. Go through and make
311 * sure the contents are all the same (files vs. disks),
312 * keeping track of the number of elements in the
315 * We also check that the size of each vdev (if it can
316 * be determined) is the same.
318 rep.zprl_type = type;
319 rep.zprl_children = 0;
321 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
322 verify(nvlist_lookup_uint64(nv,
323 ZPOOL_CONFIG_NPARITY,
324 &rep.zprl_parity) == 0);
325 assert(rep.zprl_parity != 0);
331 * The 'dontreport' variable indicatest that we've
332 * already reported an error for this spec, so don't
333 * bother doing it again.
338 for (c = 0; c < children; c++) {
339 nvlist_t *cnv = child[c];
341 struct stat64 statbuf;
342 uint64_t size = -1ULL;
348 verify(nvlist_lookup_string(cnv,
349 ZPOOL_CONFIG_TYPE, &childtype) == 0);
352 * If this is a a replacing or spare vdev, then
353 * get the real first child of the vdev.
355 if (strcmp(childtype,
356 VDEV_TYPE_REPLACING) == 0 ||
357 strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
361 verify(nvlist_lookup_nvlist_array(cnv,
362 ZPOOL_CONFIG_CHILDREN, &rchild,
364 assert(rchildren == 2);
367 verify(nvlist_lookup_string(cnv,
372 verify(nvlist_lookup_string(cnv,
373 ZPOOL_CONFIG_PATH, &path) == 0);
376 * If we have a raidz/mirror that combines disks
377 * with files, report it as an error.
379 if (!dontreport && type != NULL &&
380 strcmp(type, childtype) != 0) {
386 "mismatched replication "
387 "level: %s contains both "
388 "files and devices\n"),
396 * According to stat(2), the value of 'st_size'
397 * is undefined for block devices and character
398 * devices. But there is no effective way to
399 * determine the real size in userland.
401 * Instead, we'll take advantage of an
402 * implementation detail of spec_size(). If the
403 * device is currently open, then we (should)
404 * return a valid size.
406 * If we still don't get a valid size (indicated
407 * by a size of 0 or MAXOFFSET_T), then ignore
408 * this device altogether.
410 if ((fd = open(path, O_RDONLY)) >= 0) {
411 err = fstat64(fd, &statbuf);
414 err = stat64(path, &statbuf);
417 if (err != 0 || statbuf.st_size == 0)
420 size = statbuf.st_size;
423 * Also check the size of each device. If they
424 * differ, then report an error.
426 if (!dontreport && vdev_size != -1ULL &&
433 "%s contains devices of "
434 "different sizes\n"),
447 * At this point, we have the replication of the last toplevel
448 * vdev in 'rep'. Compare it to 'lastrep' to see if its
451 if (lastrep.zprl_type != NULL) {
452 if (strcmp(lastrep.zprl_type, rep.zprl_type) != 0) {
458 "mismatched replication level: "
459 "both %s and %s vdevs are "
461 lastrep.zprl_type, rep.zprl_type);
464 } else if (lastrep.zprl_parity != rep.zprl_parity) {
470 "mismatched replication level: "
471 "both %llu and %llu device parity "
472 "%s vdevs are present\n"),
478 } else if (lastrep.zprl_children != rep.zprl_children) {
484 "mismatched replication level: "
485 "both %llu-way and %llu-way %s "
486 "vdevs are present\n"),
487 lastrep.zprl_children,
504 * Check the replication level of the vdev spec against the current pool. Calls
505 * get_replication() to make sure the new spec is self-consistent. If the pool
506 * has a consistent replication level, then we ignore any errors. Otherwise,
507 * report any difference between the two.
510 check_replication(nvlist_t *config, nvlist_t *newroot)
512 replication_level_t *current = NULL, *new;
516 * If we have a current pool configuration, check to see if it's
517 * self-consistent. If not, simply return success.
519 if (config != NULL) {
522 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
524 if ((current = get_replication(nvroot, B_FALSE)) == NULL)
529 * Get the replication level of the new vdev spec, reporting any
530 * inconsistencies found.
532 if ((new = get_replication(newroot, B_TRUE)) == NULL) {
538 * Check to see if the new vdev spec matches the replication level of
542 if (current != NULL) {
543 if (strcmp(current->zprl_type, new->zprl_type) != 0) {
545 "mismatched replication level: pool uses %s "
546 "and new vdev is %s\n"),
547 current->zprl_type, new->zprl_type);
549 } else if (current->zprl_parity != new->zprl_parity) {
551 "mismatched replication level: pool uses %llu "
552 "device parity and new vdev uses %llu\n"),
553 current->zprl_parity, new->zprl_parity);
555 } else if (current->zprl_children != new->zprl_children) {
557 "mismatched replication level: pool uses %llu-way "
558 "%s and new vdev uses %llu-way %s\n"),
559 current->zprl_children, current->zprl_type,
560 new->zprl_children, new->zprl_type);
573 * Determine if the given path is a hot spare within the given configuration.
576 is_spare(nvlist_t *config, const char *path)
582 uint64_t guid, spareguid;
588 if ((fd = open(path, O_RDONLY)) < 0)
591 if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 ||
593 state != POOL_STATE_SPARE ||
594 zpool_read_label(fd, &label) != 0) {
602 verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0);
605 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
607 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
608 &spares, &nspares) == 0) {
609 for (i = 0; i < nspares; i++) {
610 verify(nvlist_lookup_uint64(spares[i],
611 ZPOOL_CONFIG_GUID, &spareguid) == 0);
612 if (spareguid == guid)
621 * Go through and find any devices that are in use. We rely on libdiskmgt for
622 * the majority of this task.
625 check_in_use(nvlist_t *config, nvlist_t *nv, int force, int isreplacing,
632 char buf[MAXPATHLEN];
635 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
637 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
638 &child, &children) != 0) {
640 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
643 * As a generic check, we look to see if this is a replace of a
644 * hot spare within the same pool. If so, we allow it
645 * regardless of what libdiskmgt or zpool_in_use() says.
648 (void) strlcpy(buf, path, sizeof (buf));
649 if (is_spare(config, buf))
653 if (strcmp(type, VDEV_TYPE_DISK) == 0)
654 ret = check_provider(path, force, isspare);
659 for (c = 0; c < children; c++)
660 if ((ret = check_in_use(config, child[c], force,
661 isreplacing, B_FALSE)) != 0)
664 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
665 &child, &children) == 0)
666 for (c = 0; c < children; c++)
667 if ((ret = check_in_use(config, child[c], force,
668 isreplacing, B_TRUE)) != 0)
675 is_grouping(const char *type, int *mindev)
677 if (strcmp(type, "raidz") == 0 || strcmp(type, "raidz1") == 0) {
680 return (VDEV_TYPE_RAIDZ);
683 if (strcmp(type, "raidz2") == 0) {
686 return (VDEV_TYPE_RAIDZ);
689 if (strcmp(type, "mirror") == 0) {
692 return (VDEV_TYPE_MIRROR);
695 if (strcmp(type, "spare") == 0) {
698 return (VDEV_TYPE_SPARE);
705 * Construct a syntactically valid vdev specification,
706 * and ensure that all devices and files exist and can be opened.
707 * Note: we don't bother freeing anything in the error paths
708 * because the program is just going to exit anyway.
711 construct_spec(int argc, char **argv)
713 nvlist_t *nvroot, *nv, **top, **spares;
714 int t, toplevels, mindev, nspares;
726 * If it's a mirror or raidz, the subsequent arguments are
727 * its leaves -- until we encounter the next mirror or raidz.
729 if ((type = is_grouping(argv[0], &mindev)) != NULL) {
730 nvlist_t **child = NULL;
733 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
735 (void) fprintf(stderr, gettext("invalid vdev "
736 "specification: 'spare' can be "
737 "specified only once\n"));
741 for (c = 1; c < argc; c++) {
742 if (is_grouping(argv[c], NULL) != NULL)
745 child = realloc(child,
746 children * sizeof (nvlist_t *));
749 if ((nv = make_leaf_vdev(argv[c])) == NULL)
751 child[children - 1] = nv;
754 if (children < mindev) {
755 (void) fprintf(stderr, gettext("invalid vdev "
756 "specification: %s requires at least %d "
757 "devices\n"), argv[0], mindev);
764 if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
769 verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
771 verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
773 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
774 verify(nvlist_add_uint64(nv,
775 ZPOOL_CONFIG_NPARITY,
778 verify(nvlist_add_nvlist_array(nv,
779 ZPOOL_CONFIG_CHILDREN, child,
782 for (c = 0; c < children; c++)
783 nvlist_free(child[c]);
788 * We have a device. Pass off to make_leaf_vdev() to
789 * construct the appropriate nvlist describing the vdev.
791 if ((nv = make_leaf_vdev(argv[0])) == NULL)
798 top = realloc(top, toplevels * sizeof (nvlist_t *));
801 top[toplevels - 1] = nv;
804 if (toplevels == 0 && nspares == 0) {
805 (void) fprintf(stderr, gettext("invalid vdev "
806 "specification: at least one toplevel vdev must be "
812 * Finally, create nvroot and add all top-level vdevs to it.
814 verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
815 verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
816 VDEV_TYPE_ROOT) == 0);
817 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
818 top, toplevels) == 0);
820 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
821 spares, nspares) == 0);
823 for (t = 0; t < toplevels; t++)
825 for (t = 0; t < nspares; t++)
826 nvlist_free(spares[t]);
835 * Get and validate the contents of the given vdev specification. This ensures
836 * that the nvlist returned is well-formed, that all the devices exist, and that
837 * they are not currently in use by any other known consumer. The 'poolconfig'
838 * parameter is the current configuration of the pool when adding devices
839 * existing pool, and is used to perform additional checks, such as changing the
840 * replication level of the pool. It can be 'NULL' to indicate that this is a
841 * new pool. The 'force' flag controls whether devices should be forcefully
842 * added, even if they appear in use.
845 make_root_vdev(nvlist_t *poolconfig, int force, int check_rep,
846 boolean_t isreplacing, int argc, char **argv)
853 * Construct the vdev specification. If this is successful, we know
854 * that we have a valid specification, and that all devices can be
857 if ((newroot = construct_spec(argc, argv)) == NULL)
861 * Validate each device to make sure that its not shared with another
862 * subsystem. We do this even if 'force' is set, because there are some
863 * uses (such as a dedicated dump device) that even '-f' cannot
866 if (check_in_use(poolconfig, newroot, force, isreplacing,
868 nvlist_free(newroot);
873 * Check the replication level of the given vdevs and report any errors
874 * found. We include the existing pool spec, if any, as we need to
875 * catch changes against the existing replication level.
877 if (check_rep && check_replication(poolconfig, newroot) != 0) {
878 nvlist_free(newroot);