]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - cddl/contrib/opensolaris/cmd/zpool/zpool_vdev.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / cddl / contrib / opensolaris / cmd / zpool / zpool_vdev.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26
27 #pragma ident   "%Z%%M% %I%     %E% SMI"
28
29 /*
30  * Functions to convert between a list of vdevs and an nvlist representing the
31  * configuration.  Each entry in the list can be one of:
32  *
33  *      Device vdevs
34  *              disk=(path=..., devid=...)
35  *              file=(path=...)
36  *
37  *      Group vdevs
38  *              raidz[1|2]=(...)
39  *              mirror=(...)
40  *
41  *      Hot spares
42  *
43  * While the underlying implementation supports it, group vdevs cannot contain
44  * other group vdevs.  All userland verification of devices is contained within
45  * this file.  If successful, the nvlist returned can be passed directly to the
46  * kernel; we've done as much verification as possible in userland.
47  *
48  * Hot spares are a special case, and passed down as an array of disk vdevs, at
49  * the same level as the root of the vdev tree.
50  *
51  * The only function exported by this file is 'get_vdev_spec'.  The function
52  * performs several passes:
53  *
54  *      1. Construct the vdev specification.  Performs syntax validation and
55  *         makes sure each device is valid.
56  *      2. Check for devices in use.  Using libdiskmgt, makes sure that no
57  *         devices are also in use.  Some can be overridden using the 'force'
58  *         flag, others cannot.
59  *      3. Check for replication errors if the 'force' flag is not specified.
60  *         validates that the replication level is consistent across the
61  *         entire pool.
62  */
63
64 #include <assert.h>
65 #include <devid.h>
66 #include <errno.h>
67 #include <fcntl.h>
68 #include <libintl.h>
69 #include <libnvpair.h>
70 #include <stdio.h>
71 #include <string.h>
72 #include <unistd.h>
73 #include <paths.h>
74 #include <sys/stat.h>
75 #include <sys/disk.h>
76 #include <sys/mntent.h>
77 #include <libgeom.h>
78
79 #include <libzfs.h>
80
81 #include "zpool_util.h"
82
83 /*
84  * For any given vdev specification, we can have multiple errors.  The
85  * vdev_error() function keeps track of whether we have seen an error yet, and
86  * prints out a header if its the first error we've seen.
87  */
88 boolean_t error_seen;
89 boolean_t is_force;
90
91 /*PRINTFLIKE1*/
92 static void
93 vdev_error(const char *fmt, ...)
94 {
95         va_list ap;
96
97         if (!error_seen) {
98                 (void) fprintf(stderr, gettext("invalid vdev specification\n"));
99                 if (!is_force)
100                         (void) fprintf(stderr, gettext("use '-f' to override "
101                             "the following errors:\n"));
102                 else
103                         (void) fprintf(stderr, gettext("the following errors "
104                             "must be manually repaired:\n"));
105                 error_seen = B_TRUE;
106         }
107
108         va_start(ap, fmt);
109         (void) vfprintf(stderr, fmt, ap);
110         va_end(ap);
111 }
112
113 /*
114  * Validate a GEOM provider.
115  */
116 static int
117 check_provider(const char *name, boolean_t force, boolean_t isspare)
118 {
119         struct gmesh mesh;
120         struct gclass *mp;
121         struct ggeom *gp;
122         struct gprovider *pp;
123         int rv;
124
125         /* XXX: What to do with isspare? */
126
127         if (strncmp(name, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
128                 name += sizeof(_PATH_DEV) - 1;
129
130         rv = geom_gettree(&mesh);
131         assert(rv == 0);
132
133         pp = NULL;
134         LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
135                 LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
136                         LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
137                                 if (strcmp(pp->lg_name, name) == 0)
138                                         goto out;
139                         }
140                 }
141         }
142 out:
143         rv = -1;
144         if (pp == NULL)
145                 vdev_error("no such provider %s\n", name);
146         else {
147                 int acr, acw, ace;
148
149                 VERIFY(sscanf(pp->lg_mode, "r%dw%de%d", &acr, &acw, &ace) == 3);
150                 if (acw == 0 && ace == 0)
151                         rv = 0;
152                 else
153                         vdev_error("%s is in use (%s)\n", name, pp->lg_mode);
154         }
155         geom_deletetree(&mesh);
156         return (rv);
157 }
158
159 static boolean_t
160 is_provider(const char *name)
161 {
162         int fd;
163
164         fd = g_open(name, 0);
165         if (fd >= 0) {
166                 g_close(fd);
167                 return (B_TRUE);
168         }
169         return (B_FALSE);
170
171 }
172 /*
173  * Create a leaf vdev.  Determine if this is a GEOM provider.
174  * Valid forms for a leaf vdev are:
175  *
176  *      /dev/xxx        Complete path to a GEOM provider
177  *      xxx             Shorthand for /dev/xxx
178  */
179 nvlist_t *
180 make_leaf_vdev(const char *arg)
181 {
182         char ident[DISK_IDENT_SIZE], path[MAXPATHLEN];
183         struct stat64 statbuf;
184         nvlist_t *vdev = NULL;
185         char *type = NULL;
186         boolean_t wholedisk = B_FALSE;
187
188         if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
189                 strlcpy(path, arg, sizeof (path));
190         else
191                 snprintf(path, sizeof (path), "%s%s", _PATH_DEV, arg);
192
193         if (is_provider(path))
194                 type = VDEV_TYPE_DISK;
195         else {
196                 (void) fprintf(stderr, gettext("cannot use '%s': must be a "
197                     "GEOM provider\n"), path);
198                 return (NULL);
199         }
200
201         /*
202          * Finally, we have the complete device or file, and we know that it is
203          * acceptable to use.  Construct the nvlist to describe this vdev.  All
204          * vdevs have a 'path' element, and devices also have a 'devid' element.
205          */
206         verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
207         verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
208         verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
209         if (strcmp(type, VDEV_TYPE_DISK) == 0)
210                 verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
211                     (uint64_t)B_FALSE) == 0);
212
213         /*
214          * For a whole disk, defer getting its devid until after labeling it.
215          */
216         if (1 || (S_ISBLK(statbuf.st_mode) && !wholedisk)) {
217                 /*
218                  * Get the devid for the device.
219                  */
220                 int fd;
221                 ddi_devid_t devid;
222                 char *minor = NULL, *devid_str = NULL;
223
224                 if ((fd = open(path, O_RDONLY)) < 0) {
225                         (void) fprintf(stderr, gettext("cannot open '%s': "
226                             "%s\n"), path, strerror(errno));
227                         nvlist_free(vdev);
228                         return (NULL);
229                 }
230
231                 if (devid_get(fd, &devid) == 0) {
232                         if (devid_get_minor_name(fd, &minor) == 0 &&
233                             (devid_str = devid_str_encode(devid, minor)) !=
234                             NULL) {
235                                 verify(nvlist_add_string(vdev,
236                                     ZPOOL_CONFIG_DEVID, devid_str) == 0);
237                         }
238                         if (devid_str != NULL)
239                                 devid_str_free(devid_str);
240                         if (minor != NULL)
241                                 devid_str_free(minor);
242                         devid_free(devid);
243                 }
244
245                 (void) close(fd);
246         }
247
248         return (vdev);
249 }
250
251 /*
252  * Go through and verify the replication level of the pool is consistent.
253  * Performs the following checks:
254  *
255  *      For the new spec, verifies that devices in mirrors and raidz are the
256  *      same size.
257  *
258  *      If the current configuration already has inconsistent replication
259  *      levels, ignore any other potential problems in the new spec.
260  *
261  *      Otherwise, make sure that the current spec (if there is one) and the new
262  *      spec have consistent replication levels.
263  */
264 typedef struct replication_level {
265         char *zprl_type;
266         uint64_t zprl_children;
267         uint64_t zprl_parity;
268 } replication_level_t;
269
270 /*
271  * Given a list of toplevel vdevs, return the current replication level.  If
272  * the config is inconsistent, then NULL is returned.  If 'fatal' is set, then
273  * an error message will be displayed for each self-inconsistent vdev.
274  */
275 replication_level_t *
276 get_replication(nvlist_t *nvroot, boolean_t fatal)
277 {
278         nvlist_t **top;
279         uint_t t, toplevels;
280         nvlist_t **child;
281         uint_t c, children;
282         nvlist_t *nv;
283         char *type;
284         replication_level_t lastrep, rep, *ret;
285         boolean_t dontreport;
286
287         ret = safe_malloc(sizeof (replication_level_t));
288
289         verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
290             &top, &toplevels) == 0);
291
292         lastrep.zprl_type = NULL;
293         for (t = 0; t < toplevels; t++) {
294                 nv = top[t];
295
296                 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
297
298                 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
299                     &child, &children) != 0) {
300                         /*
301                          * This is a 'file' or 'disk' vdev.
302                          */
303                         rep.zprl_type = type;
304                         rep.zprl_children = 1;
305                         rep.zprl_parity = 0;
306                 } else {
307                         uint64_t vdev_size;
308
309                         /*
310                          * This is a mirror or RAID-Z vdev.  Go through and make
311                          * sure the contents are all the same (files vs. disks),
312                          * keeping track of the number of elements in the
313                          * process.
314                          *
315                          * We also check that the size of each vdev (if it can
316                          * be determined) is the same.
317                          */
318                         rep.zprl_type = type;
319                         rep.zprl_children = 0;
320
321                         if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
322                                 verify(nvlist_lookup_uint64(nv,
323                                     ZPOOL_CONFIG_NPARITY,
324                                     &rep.zprl_parity) == 0);
325                                 assert(rep.zprl_parity != 0);
326                         } else {
327                                 rep.zprl_parity = 0;
328                         }
329
330                         /*
331                          * The 'dontreport' variable indicatest that we've
332                          * already reported an error for this spec, so don't
333                          * bother doing it again.
334                          */
335                         type = NULL;
336                         dontreport = 0;
337                         vdev_size = -1ULL;
338                         for (c = 0; c < children; c++) {
339                                 nvlist_t *cnv = child[c];
340                                 char *path;
341                                 struct stat64 statbuf;
342                                 uint64_t size = -1ULL;
343                                 char *childtype;
344                                 int fd, err;
345
346                                 rep.zprl_children++;
347
348                                 verify(nvlist_lookup_string(cnv,
349                                     ZPOOL_CONFIG_TYPE, &childtype) == 0);
350
351                                 /*
352                                  * If this is a a replacing or spare vdev, then
353                                  * get the real first child of the vdev.
354                                  */
355                                 if (strcmp(childtype,
356                                     VDEV_TYPE_REPLACING) == 0 ||
357                                     strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
358                                         nvlist_t **rchild;
359                                         uint_t rchildren;
360
361                                         verify(nvlist_lookup_nvlist_array(cnv,
362                                             ZPOOL_CONFIG_CHILDREN, &rchild,
363                                             &rchildren) == 0);
364                                         assert(rchildren == 2);
365                                         cnv = rchild[0];
366
367                                         verify(nvlist_lookup_string(cnv,
368                                             ZPOOL_CONFIG_TYPE,
369                                             &childtype) == 0);
370                                 }
371
372                                 verify(nvlist_lookup_string(cnv,
373                                     ZPOOL_CONFIG_PATH, &path) == 0);
374
375                                 /*
376                                  * If we have a raidz/mirror that combines disks
377                                  * with files, report it as an error.
378                                  */
379                                 if (!dontreport && type != NULL &&
380                                     strcmp(type, childtype) != 0) {
381                                         if (ret != NULL)
382                                                 free(ret);
383                                         ret = NULL;
384                                         if (fatal)
385                                                 vdev_error(gettext(
386                                                     "mismatched replication "
387                                                     "level: %s contains both "
388                                                     "files and devices\n"),
389                                                     rep.zprl_type);
390                                         else
391                                                 return (NULL);
392                                         dontreport = B_TRUE;
393                                 }
394
395                                 /*
396                                  * According to stat(2), the value of 'st_size'
397                                  * is undefined for block devices and character
398                                  * devices.  But there is no effective way to
399                                  * determine the real size in userland.
400                                  *
401                                  * Instead, we'll take advantage of an
402                                  * implementation detail of spec_size().  If the
403                                  * device is currently open, then we (should)
404                                  * return a valid size.
405                                  *
406                                  * If we still don't get a valid size (indicated
407                                  * by a size of 0 or MAXOFFSET_T), then ignore
408                                  * this device altogether.
409                                  */
410                                 if ((fd = open(path, O_RDONLY)) >= 0) {
411                                         err = fstat64(fd, &statbuf);
412                                         (void) close(fd);
413                                 } else {
414                                         err = stat64(path, &statbuf);
415                                 }
416
417                                 if (err != 0 || statbuf.st_size == 0)
418                                         continue;
419
420                                 size = statbuf.st_size;
421
422                                 /*
423                                  * Also check the size of each device.  If they
424                                  * differ, then report an error.
425                                  */
426                                 if (!dontreport && vdev_size != -1ULL &&
427                                     size != vdev_size) {
428                                         if (ret != NULL)
429                                                 free(ret);
430                                         ret = NULL;
431                                         if (fatal)
432                                                 vdev_error(gettext(
433                                                     "%s contains devices of "
434                                                     "different sizes\n"),
435                                                     rep.zprl_type);
436                                         else
437                                                 return (NULL);
438                                         dontreport = B_TRUE;
439                                 }
440
441                                 type = childtype;
442                                 vdev_size = size;
443                         }
444                 }
445
446                 /*
447                  * At this point, we have the replication of the last toplevel
448                  * vdev in 'rep'.  Compare it to 'lastrep' to see if its
449                  * different.
450                  */
451                 if (lastrep.zprl_type != NULL) {
452                         if (strcmp(lastrep.zprl_type, rep.zprl_type) != 0) {
453                                 if (ret != NULL)
454                                         free(ret);
455                                 ret = NULL;
456                                 if (fatal)
457                                         vdev_error(gettext(
458                                             "mismatched replication level: "
459                                             "both %s and %s vdevs are "
460                                             "present\n"),
461                                             lastrep.zprl_type, rep.zprl_type);
462                                 else
463                                         return (NULL);
464                         } else if (lastrep.zprl_parity != rep.zprl_parity) {
465                                 if (ret)
466                                         free(ret);
467                                 ret = NULL;
468                                 if (fatal)
469                                         vdev_error(gettext(
470                                             "mismatched replication level: "
471                                             "both %llu and %llu device parity "
472                                             "%s vdevs are present\n"),
473                                             lastrep.zprl_parity,
474                                             rep.zprl_parity,
475                                             rep.zprl_type);
476                                 else
477                                         return (NULL);
478                         } else if (lastrep.zprl_children != rep.zprl_children) {
479                                 if (ret)
480                                         free(ret);
481                                 ret = NULL;
482                                 if (fatal)
483                                         vdev_error(gettext(
484                                             "mismatched replication level: "
485                                             "both %llu-way and %llu-way %s "
486                                             "vdevs are present\n"),
487                                             lastrep.zprl_children,
488                                             rep.zprl_children,
489                                             rep.zprl_type);
490                                 else
491                                         return (NULL);
492                         }
493                 }
494                 lastrep = rep;
495         }
496
497         if (ret != NULL)
498                 *ret = rep;
499
500         return (ret);
501 }
502
503 /*
504  * Check the replication level of the vdev spec against the current pool.  Calls
505  * get_replication() to make sure the new spec is self-consistent.  If the pool
506  * has a consistent replication level, then we ignore any errors.  Otherwise,
507  * report any difference between the two.
508  */
509 int
510 check_replication(nvlist_t *config, nvlist_t *newroot)
511 {
512         replication_level_t *current = NULL, *new;
513         int ret;
514
515         /*
516          * If we have a current pool configuration, check to see if it's
517          * self-consistent.  If not, simply return success.
518          */
519         if (config != NULL) {
520                 nvlist_t *nvroot;
521
522                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
523                     &nvroot) == 0);
524                 if ((current = get_replication(nvroot, B_FALSE)) == NULL)
525                         return (0);
526         }
527
528         /*
529          * Get the replication level of the new vdev spec, reporting any
530          * inconsistencies found.
531          */
532         if ((new = get_replication(newroot, B_TRUE)) == NULL) {
533                 free(current);
534                 return (-1);
535         }
536
537         /*
538          * Check to see if the new vdev spec matches the replication level of
539          * the current pool.
540          */
541         ret = 0;
542         if (current != NULL) {
543                 if (strcmp(current->zprl_type, new->zprl_type) != 0) {
544                         vdev_error(gettext(
545                             "mismatched replication level: pool uses %s "
546                             "and new vdev is %s\n"),
547                             current->zprl_type, new->zprl_type);
548                         ret = -1;
549                 } else if (current->zprl_parity != new->zprl_parity) {
550                         vdev_error(gettext(
551                             "mismatched replication level: pool uses %llu "
552                             "device parity and new vdev uses %llu\n"),
553                             current->zprl_parity, new->zprl_parity);
554                         ret = -1;
555                 } else if (current->zprl_children != new->zprl_children) {
556                         vdev_error(gettext(
557                             "mismatched replication level: pool uses %llu-way "
558                             "%s and new vdev uses %llu-way %s\n"),
559                             current->zprl_children, current->zprl_type,
560                             new->zprl_children, new->zprl_type);
561                         ret = -1;
562                 }
563         }
564
565         free(new);
566         if (current != NULL)
567                 free(current);
568
569         return (ret);
570 }
571
572 /*
573  * Determine if the given path is a hot spare within the given configuration.
574  */
575 static boolean_t
576 is_spare(nvlist_t *config, const char *path)
577 {
578         int fd;
579         pool_state_t state;
580         char *name = NULL;
581         nvlist_t *label;
582         uint64_t guid, spareguid;
583         nvlist_t *nvroot;
584         nvlist_t **spares;
585         uint_t i, nspares;
586         boolean_t inuse;
587
588         if ((fd = open(path, O_RDONLY)) < 0)
589                 return (B_FALSE);
590
591         if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 ||
592             !inuse ||
593             state != POOL_STATE_SPARE ||
594             zpool_read_label(fd, &label) != 0) {
595                 free(name);
596                 (void) close(fd);
597                 return (B_FALSE);
598         }
599         free(name);
600
601         (void) close(fd);
602         verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0);
603         nvlist_free(label);
604
605         verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
606             &nvroot) == 0);
607         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
608             &spares, &nspares) == 0) {
609                 for (i = 0; i < nspares; i++) {
610                         verify(nvlist_lookup_uint64(spares[i],
611                             ZPOOL_CONFIG_GUID, &spareguid) == 0);
612                         if (spareguid == guid)
613                                 return (B_TRUE);
614                 }
615         }
616
617         return (B_FALSE);
618 }
619
620 /*
621  * Go through and find any devices that are in use.  We rely on libdiskmgt for
622  * the majority of this task.
623  */
624 int
625 check_in_use(nvlist_t *config, nvlist_t *nv, int force, int isreplacing,
626     int isspare)
627 {
628         nvlist_t **child;
629         uint_t c, children;
630         char *type, *path;
631         int ret;
632         char buf[MAXPATHLEN];
633         uint64_t wholedisk;
634
635         verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
636
637         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
638             &child, &children) != 0) {
639
640                 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
641
642                 /*
643                  * As a generic check, we look to see if this is a replace of a
644                  * hot spare within the same pool.  If so, we allow it
645                  * regardless of what libdiskmgt or zpool_in_use() says.
646                  */
647                 if (isreplacing) {
648                         (void) strlcpy(buf, path, sizeof (buf));
649                         if (is_spare(config, buf))
650                                 return (0);
651                 }
652
653                 if (strcmp(type, VDEV_TYPE_DISK) == 0)
654                         ret = check_provider(path, force, isspare);
655
656                 return (ret);
657         }
658
659         for (c = 0; c < children; c++)
660                 if ((ret = check_in_use(config, child[c], force,
661                     isreplacing, B_FALSE)) != 0)
662                         return (ret);
663
664         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
665             &child, &children) == 0)
666                 for (c = 0; c < children; c++)
667                         if ((ret = check_in_use(config, child[c], force,
668                             isreplacing, B_TRUE)) != 0)
669                                 return (ret);
670
671         return (0);
672 }
673
674 const char *
675 is_grouping(const char *type, int *mindev)
676 {
677         if (strcmp(type, "raidz") == 0 || strcmp(type, "raidz1") == 0) {
678                 if (mindev != NULL)
679                         *mindev = 2;
680                 return (VDEV_TYPE_RAIDZ);
681         }
682
683         if (strcmp(type, "raidz2") == 0) {
684                 if (mindev != NULL)
685                         *mindev = 3;
686                 return (VDEV_TYPE_RAIDZ);
687         }
688
689         if (strcmp(type, "mirror") == 0) {
690                 if (mindev != NULL)
691                         *mindev = 2;
692                 return (VDEV_TYPE_MIRROR);
693         }
694
695         if (strcmp(type, "spare") == 0) {
696                 if (mindev != NULL)
697                         *mindev = 1;
698                 return (VDEV_TYPE_SPARE);
699         }
700
701         return (NULL);
702 }
703
704 /*
705  * Construct a syntactically valid vdev specification,
706  * and ensure that all devices and files exist and can be opened.
707  * Note: we don't bother freeing anything in the error paths
708  * because the program is just going to exit anyway.
709  */
710 nvlist_t *
711 construct_spec(int argc, char **argv)
712 {
713         nvlist_t *nvroot, *nv, **top, **spares;
714         int t, toplevels, mindev, nspares;
715         const char *type;
716
717         top = NULL;
718         toplevels = 0;
719         spares = NULL;
720         nspares = 0;
721
722         while (argc > 0) {
723                 nv = NULL;
724
725                 /*
726                  * If it's a mirror or raidz, the subsequent arguments are
727                  * its leaves -- until we encounter the next mirror or raidz.
728                  */
729                 if ((type = is_grouping(argv[0], &mindev)) != NULL) {
730                         nvlist_t **child = NULL;
731                         int c, children = 0;
732
733                         if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
734                             spares != NULL) {
735                                 (void) fprintf(stderr, gettext("invalid vdev "
736                                     "specification: 'spare' can be "
737                                     "specified only once\n"));
738                                 return (NULL);
739                         }
740
741                         for (c = 1; c < argc; c++) {
742                                 if (is_grouping(argv[c], NULL) != NULL)
743                                         break;
744                                 children++;
745                                 child = realloc(child,
746                                     children * sizeof (nvlist_t *));
747                                 if (child == NULL)
748                                         zpool_no_memory();
749                                 if ((nv = make_leaf_vdev(argv[c])) == NULL)
750                                         return (NULL);
751                                 child[children - 1] = nv;
752                         }
753
754                         if (children < mindev) {
755                                 (void) fprintf(stderr, gettext("invalid vdev "
756                                     "specification: %s requires at least %d "
757                                     "devices\n"), argv[0], mindev);
758                                 return (NULL);
759                         }
760
761                         argc -= c;
762                         argv += c;
763
764                         if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
765                                 spares = child;
766                                 nspares = children;
767                                 continue;
768                         } else {
769                                 verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
770                                     0) == 0);
771                                 verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
772                                     type) == 0);
773                                 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
774                                         verify(nvlist_add_uint64(nv,
775                                             ZPOOL_CONFIG_NPARITY,
776                                             mindev - 1) == 0);
777                                 }
778                                 verify(nvlist_add_nvlist_array(nv,
779                                     ZPOOL_CONFIG_CHILDREN, child,
780                                     children) == 0);
781
782                                 for (c = 0; c < children; c++)
783                                         nvlist_free(child[c]);
784                                 free(child);
785                         }
786                 } else {
787                         /*
788                          * We have a device.  Pass off to make_leaf_vdev() to
789                          * construct the appropriate nvlist describing the vdev.
790                          */
791                         if ((nv = make_leaf_vdev(argv[0])) == NULL)
792                                 return (NULL);
793                         argc--;
794                         argv++;
795                 }
796
797                 toplevels++;
798                 top = realloc(top, toplevels * sizeof (nvlist_t *));
799                 if (top == NULL)
800                         zpool_no_memory();
801                 top[toplevels - 1] = nv;
802         }
803
804         if (toplevels == 0 && nspares == 0) {
805                 (void) fprintf(stderr, gettext("invalid vdev "
806                     "specification: at least one toplevel vdev must be "
807                     "specified\n"));
808                 return (NULL);
809         }
810
811         /*
812          * Finally, create nvroot and add all top-level vdevs to it.
813          */
814         verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
815         verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
816             VDEV_TYPE_ROOT) == 0);
817         verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
818             top, toplevels) == 0);
819         if (nspares != 0)
820                 verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
821                     spares, nspares) == 0);
822
823         for (t = 0; t < toplevels; t++)
824                 nvlist_free(top[t]);
825         for (t = 0; t < nspares; t++)
826                 nvlist_free(spares[t]);
827         if (spares)
828                 free(spares);
829         free(top);
830
831         return (nvroot);
832 }
833
834 /*
835  * Get and validate the contents of the given vdev specification.  This ensures
836  * that the nvlist returned is well-formed, that all the devices exist, and that
837  * they are not currently in use by any other known consumer.  The 'poolconfig'
838  * parameter is the current configuration of the pool when adding devices
839  * existing pool, and is used to perform additional checks, such as changing the
840  * replication level of the pool.  It can be 'NULL' to indicate that this is a
841  * new pool.  The 'force' flag controls whether devices should be forcefully
842  * added, even if they appear in use.
843  */
844 nvlist_t *
845 make_root_vdev(nvlist_t *poolconfig, int force, int check_rep,
846     boolean_t isreplacing, int argc, char **argv)
847 {
848         nvlist_t *newroot;
849
850         is_force = force;
851
852         /*
853          * Construct the vdev specification.  If this is successful, we know
854          * that we have a valid specification, and that all devices can be
855          * opened.
856          */
857         if ((newroot = construct_spec(argc, argv)) == NULL)
858                 return (NULL);
859
860         /*
861          * Validate each device to make sure that its not shared with another
862          * subsystem.  We do this even if 'force' is set, because there are some
863          * uses (such as a dedicated dump device) that even '-f' cannot
864          * override.
865          */
866         if (check_in_use(poolconfig, newroot, force, isreplacing,
867             B_FALSE) != 0) {
868                 nvlist_free(newroot);
869                 return (NULL);
870         }
871
872         /*
873          * Check the replication level of the given vdevs and report any errors
874          * found.  We include the existing pool spec, if any, as we need to
875          * catch changes against the existing replication level.
876          */
877         if (check_rep && check_replication(poolconfig, newroot) != 0) {
878                 nvlist_free(newroot);
879                 return (NULL);
880         }
881
882         return (newroot);
883 }