]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / cddl / contrib / opensolaris / lib / libzfs / common / libzfs_pool.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26
27 #pragma ident   "%Z%%M% %I%     %E% SMI"
28
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <assert.h>
32 #include <ctype.h>
33 #include <errno.h>
34 #include <devid.h>
35 #include <dirent.h>
36 #include <fcntl.h>
37 #include <libintl.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <strings.h>
41 #include <unistd.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/zio.h>
44 #include <strings.h>
45 #include <umem.h>
46
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50
51 /*
52  * Validate the given pool name, optionally putting an extended error message in
53  * 'buf'.
54  */
55 static boolean_t
56 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
57 {
58         namecheck_err_t why;
59         char what;
60         int ret;
61
62         ret = pool_namecheck(pool, &why, &what);
63
64         /*
65          * The rules for reserved pool names were extended at a later point.
66          * But we need to support users with existing pools that may now be
67          * invalid.  So we only check for this expanded set of names during a
68          * create (or import), and only in userland.
69          */
70         if (ret == 0 && !isopen &&
71             (strncmp(pool, "mirror", 6) == 0 ||
72             strncmp(pool, "raidz", 5) == 0 ||
73             strncmp(pool, "spare", 5) == 0)) {
74                 zfs_error_aux(hdl,
75                     dgettext(TEXT_DOMAIN, "name is reserved"));
76                 return (B_FALSE);
77         }
78
79
80         if (ret != 0) {
81                 if (hdl != NULL) {
82                         switch (why) {
83                         case NAME_ERR_TOOLONG:
84                                 zfs_error_aux(hdl,
85                                     dgettext(TEXT_DOMAIN, "name is too long"));
86                                 break;
87
88                         case NAME_ERR_INVALCHAR:
89                                 zfs_error_aux(hdl,
90                                     dgettext(TEXT_DOMAIN, "invalid character "
91                                     "'%c' in pool name"), what);
92                                 break;
93
94                         case NAME_ERR_NOLETTER:
95                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
96                                     "name must begin with a letter"));
97                                 break;
98
99                         case NAME_ERR_RESERVED:
100                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
101                                     "name is reserved"));
102                                 break;
103
104                         case NAME_ERR_DISKLIKE:
105                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
106                                     "pool name is reserved"));
107                                 break;
108
109                         case NAME_ERR_LEADING_SLASH:
110                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
111                                     "leading slash in name"));
112                                 break;
113
114                         case NAME_ERR_EMPTY_COMPONENT:
115                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
116                                     "empty component in name"));
117                                 break;
118
119                         case NAME_ERR_TRAILING_SLASH:
120                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
121                                     "trailing slash in name"));
122                                 break;
123
124                         case NAME_ERR_MULTIPLE_AT:
125                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
126                                     "multiple '@' delimiters in name"));
127                                 break;
128
129                         }
130                 }
131                 return (B_FALSE);
132         }
133
134         return (B_TRUE);
135 }
136
137 static int
138 zpool_get_all_props(zpool_handle_t *zhp)
139 {
140         zfs_cmd_t zc = { 0 };
141         libzfs_handle_t *hdl = zhp->zpool_hdl;
142
143         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
144
145         if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
146                 return (-1);
147
148         while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
149                 if (errno == ENOMEM) {
150                         if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
151                                 zcmd_free_nvlists(&zc);
152                                 return (-1);
153                         }
154                 } else {
155                         zcmd_free_nvlists(&zc);
156                         return (-1);
157                 }
158         }
159
160         if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
161                 zcmd_free_nvlists(&zc);
162                 return (-1);
163         }
164
165         zcmd_free_nvlists(&zc);
166
167         return (0);
168 }
169
170 /*
171  * Open a handle to the given pool, even if the pool is currently in the FAULTED
172  * state.
173  */
174 zpool_handle_t *
175 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
176 {
177         zpool_handle_t *zhp;
178         boolean_t missing;
179
180         /*
181          * Make sure the pool name is valid.
182          */
183         if (!zpool_name_valid(hdl, B_TRUE, pool)) {
184                 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
185                     dgettext(TEXT_DOMAIN, "cannot open '%s'"),
186                     pool);
187                 return (NULL);
188         }
189
190         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
191                 return (NULL);
192
193         zhp->zpool_hdl = hdl;
194         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
195
196         if (zpool_refresh_stats(zhp, &missing) != 0) {
197                 zpool_close(zhp);
198                 return (NULL);
199         }
200
201         if (missing) {
202                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
203                     "no such pool"));
204                 (void) zfs_error_fmt(hdl, EZFS_NOENT,
205                     dgettext(TEXT_DOMAIN, "cannot open '%s'"),
206                     pool);
207                 zpool_close(zhp);
208                 return (NULL);
209         }
210
211         return (zhp);
212 }
213
214 /*
215  * Like the above, but silent on error.  Used when iterating over pools (because
216  * the configuration cache may be out of date).
217  */
218 int
219 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
220 {
221         zpool_handle_t *zhp;
222         boolean_t missing;
223
224         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
225                 return (-1);
226
227         zhp->zpool_hdl = hdl;
228         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
229
230         if (zpool_refresh_stats(zhp, &missing) != 0) {
231                 zpool_close(zhp);
232                 return (-1);
233         }
234
235         if (missing) {
236                 zpool_close(zhp);
237                 *ret = NULL;
238                 return (0);
239         }
240
241         *ret = zhp;
242         return (0);
243 }
244
245 /*
246  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
247  * state.
248  */
249 zpool_handle_t *
250 zpool_open(libzfs_handle_t *hdl, const char *pool)
251 {
252         zpool_handle_t *zhp;
253
254         if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
255                 return (NULL);
256
257         if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
258                 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
259                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
260                 zpool_close(zhp);
261                 return (NULL);
262         }
263
264         return (zhp);
265 }
266
267 /*
268  * Close the handle.  Simply frees the memory associated with the handle.
269  */
270 void
271 zpool_close(zpool_handle_t *zhp)
272 {
273         if (zhp->zpool_config)
274                 nvlist_free(zhp->zpool_config);
275         if (zhp->zpool_old_config)
276                 nvlist_free(zhp->zpool_old_config);
277         if (zhp->zpool_props)
278                 nvlist_free(zhp->zpool_props);
279         free(zhp);
280 }
281
282 /*
283  * Return the name of the pool.
284  */
285 const char *
286 zpool_get_name(zpool_handle_t *zhp)
287 {
288         return (zhp->zpool_name);
289 }
290
291 /*
292  * Return the GUID of the pool.
293  */
294 uint64_t
295 zpool_get_guid(zpool_handle_t *zhp)
296 {
297         uint64_t guid;
298
299         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
300             &guid) == 0);
301         return (guid);
302 }
303
304 /*
305  * Return the version of the pool.
306  */
307 uint64_t
308 zpool_get_version(zpool_handle_t *zhp)
309 {
310         uint64_t version;
311
312         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
313             &version) == 0);
314
315         return (version);
316 }
317
318 /*
319  * Return the amount of space currently consumed by the pool.
320  */
321 uint64_t
322 zpool_get_space_used(zpool_handle_t *zhp)
323 {
324         nvlist_t *nvroot;
325         vdev_stat_t *vs;
326         uint_t vsc;
327
328         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
329             &nvroot) == 0);
330         verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
331             (uint64_t **)&vs, &vsc) == 0);
332
333         return (vs->vs_alloc);
334 }
335
336 /*
337  * Return the total space in the pool.
338  */
339 uint64_t
340 zpool_get_space_total(zpool_handle_t *zhp)
341 {
342         nvlist_t *nvroot;
343         vdev_stat_t *vs;
344         uint_t vsc;
345
346         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
347             &nvroot) == 0);
348         verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
349             (uint64_t **)&vs, &vsc) == 0);
350
351         return (vs->vs_space);
352 }
353
354 /*
355  * Return the alternate root for this pool, if any.
356  */
357 int
358 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
359 {
360         zfs_cmd_t zc = { 0 };
361
362         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
363         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
364             zc.zc_value[0] == '\0')
365                 return (-1);
366
367         (void) strlcpy(buf, zc.zc_value, buflen);
368
369         return (0);
370 }
371
372 /*
373  * Return the state of the pool (ACTIVE or UNAVAILABLE)
374  */
375 int
376 zpool_get_state(zpool_handle_t *zhp)
377 {
378         return (zhp->zpool_state);
379 }
380
381 /*
382  * Create the named pool, using the provided vdev list.  It is assumed
383  * that the consumer has already validated the contents of the nvlist, so we
384  * don't have to worry about error semantics.
385  */
386 int
387 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
388     const char *altroot)
389 {
390         zfs_cmd_t zc = { 0 };
391         char msg[1024];
392
393         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
394             "cannot create '%s'"), pool);
395
396         if (!zpool_name_valid(hdl, B_FALSE, pool))
397                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
398
399         if (altroot != NULL && altroot[0] != '/')
400                 return (zfs_error_fmt(hdl, EZFS_BADPATH,
401                     dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
402
403         if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
404                 return (-1);
405
406         (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
407
408         if (altroot != NULL)
409                 (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
410
411         if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
412                 zcmd_free_nvlists(&zc);
413
414                 switch (errno) {
415                 case EBUSY:
416                         /*
417                          * This can happen if the user has specified the same
418                          * device multiple times.  We can't reliably detect this
419                          * until we try to add it and see we already have a
420                          * label.
421                          */
422                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
423                             "one or more vdevs refer to the same device"));
424                         return (zfs_error(hdl, EZFS_BADDEV, msg));
425
426                 case EOVERFLOW:
427                         /*
428                          * This occurs when one of the devices is below
429                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
430                          * device was the problem device since there's no
431                          * reliable way to determine device size from userland.
432                          */
433                         {
434                                 char buf[64];
435
436                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
437
438                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
439                                     "one or more devices is less than the "
440                                     "minimum size (%s)"), buf);
441                         }
442                         return (zfs_error(hdl, EZFS_BADDEV, msg));
443
444                 case ENOSPC:
445                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
446                             "one or more devices is out of space"));
447                         return (zfs_error(hdl, EZFS_BADDEV, msg));
448
449                 default:
450                         return (zpool_standard_error(hdl, errno, msg));
451                 }
452         }
453
454         zcmd_free_nvlists(&zc);
455
456         /*
457          * If this is an alternate root pool, then we automatically set the
458          * mountpoint of the root dataset to be '/'.
459          */
460         if (altroot != NULL) {
461                 zfs_handle_t *zhp;
462
463                 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
464                 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
465                     "/") == 0);
466
467                 zfs_close(zhp);
468         }
469
470         return (0);
471 }
472
473 /*
474  * Destroy the given pool.  It is up to the caller to ensure that there are no
475  * datasets left in the pool.
476  */
477 int
478 zpool_destroy(zpool_handle_t *zhp)
479 {
480         zfs_cmd_t zc = { 0 };
481         zfs_handle_t *zfp = NULL;
482         libzfs_handle_t *hdl = zhp->zpool_hdl;
483         char msg[1024];
484
485         if (zhp->zpool_state == POOL_STATE_ACTIVE &&
486             (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
487             ZFS_TYPE_FILESYSTEM)) == NULL)
488                 return (-1);
489
490         if (zpool_remove_zvol_links(zhp) != 0)
491                 return (-1);
492
493         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
494
495         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
496                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
497                     "cannot destroy '%s'"), zhp->zpool_name);
498
499                 if (errno == EROFS) {
500                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501                             "one or more devices is read only"));
502                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
503                 } else {
504                         (void) zpool_standard_error(hdl, errno, msg);
505                 }
506
507                 if (zfp)
508                         zfs_close(zfp);
509                 return (-1);
510         }
511
512         if (zfp) {
513                 remove_mountpoint(zfp);
514                 zfs_close(zfp);
515         }
516
517         return (0);
518 }
519
520 /*
521  * Add the given vdevs to the pool.  The caller must have already performed the
522  * necessary verification to ensure that the vdev specification is well-formed.
523  */
524 int
525 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
526 {
527         zfs_cmd_t zc = { 0 };
528         int ret;
529         libzfs_handle_t *hdl = zhp->zpool_hdl;
530         char msg[1024];
531         nvlist_t **spares;
532         uint_t nspares;
533
534         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
535             "cannot add to '%s'"), zhp->zpool_name);
536
537         if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
538             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
539             &spares, &nspares) == 0) {
540                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
541                     "upgraded to add hot spares"));
542                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
543         }
544
545         if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
546                 return (-1);
547         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
548
549         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
550                 switch (errno) {
551                 case EBUSY:
552                         /*
553                          * This can happen if the user has specified the same
554                          * device multiple times.  We can't reliably detect this
555                          * until we try to add it and see we already have a
556                          * label.
557                          */
558                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559                             "one or more vdevs refer to the same device"));
560                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
561                         break;
562
563                 case EOVERFLOW:
564                         /*
565                          * This occurrs when one of the devices is below
566                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
567                          * device was the problem device since there's no
568                          * reliable way to determine device size from userland.
569                          */
570                         {
571                                 char buf[64];
572
573                                 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
574
575                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576                                     "device is less than the minimum "
577                                     "size (%s)"), buf);
578                         }
579                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
580                         break;
581
582                 case ENOTSUP:
583                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584                             "pool must be upgraded to add raidz2 vdevs"));
585                         (void) zfs_error(hdl, EZFS_BADVERSION, msg);
586                         break;
587
588                 case EDOM:
589                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590                             "root pool can not have concatenated devices"));
591                         (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
592                         break;
593
594                 default:
595                         (void) zpool_standard_error(hdl, errno, msg);
596                 }
597
598                 ret = -1;
599         } else {
600                 ret = 0;
601         }
602
603         zcmd_free_nvlists(&zc);
604
605         return (ret);
606 }
607
608 /*
609  * Exports the pool from the system.  The caller must ensure that there are no
610  * mounted datasets in the pool.
611  */
612 int
613 zpool_export(zpool_handle_t *zhp)
614 {
615         zfs_cmd_t zc = { 0 };
616
617         if (zpool_remove_zvol_links(zhp) != 0)
618                 return (-1);
619
620         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
621
622         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
623                 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
624                     dgettext(TEXT_DOMAIN, "cannot export '%s'"),
625                     zhp->zpool_name));
626         return (0);
627 }
628
629 /*
630  * Import the given pool using the known configuration.  The configuration
631  * should have come from zpool_find_import().  The 'newname' and 'altroot'
632  * parameters control whether the pool is imported with a different name or with
633  * an alternate root, respectively.
634  */
635 int
636 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
637     const char *altroot)
638 {
639         zfs_cmd_t zc = { 0 };
640         char *thename;
641         char *origname;
642         int ret;
643
644         verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
645             &origname) == 0);
646
647         if (newname != NULL) {
648                 if (!zpool_name_valid(hdl, B_FALSE, newname))
649                         return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
650                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
651                             newname));
652                 thename = (char *)newname;
653         } else {
654                 thename = origname;
655         }
656
657         if (altroot != NULL && altroot[0] != '/')
658                 return (zfs_error_fmt(hdl, EZFS_BADPATH,
659                     dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
660                     altroot));
661
662         (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
663
664         if (altroot != NULL)
665                 (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
666         else
667                 zc.zc_value[0] = '\0';
668
669         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
670             &zc.zc_guid) == 0);
671
672         if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
673                 return (-1);
674
675         ret = 0;
676         if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
677                 char desc[1024];
678                 if (newname == NULL)
679                         (void) snprintf(desc, sizeof (desc),
680                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
681                             thename);
682                 else
683                         (void) snprintf(desc, sizeof (desc),
684                             dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
685                             origname, thename);
686
687                 switch (errno) {
688                 case ENOTSUP:
689                         /*
690                          * Unsupported version.
691                          */
692                         (void) zfs_error(hdl, EZFS_BADVERSION, desc);
693                         break;
694
695                 case EINVAL:
696                         (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
697                         break;
698
699                 default:
700                         (void) zpool_standard_error(hdl, errno, desc);
701                 }
702
703                 ret = -1;
704         } else {
705                 zpool_handle_t *zhp;
706                 /*
707                  * This should never fail, but play it safe anyway.
708                  */
709                 if (zpool_open_silent(hdl, thename, &zhp) != 0) {
710                         ret = -1;
711                 } else if (zhp != NULL) {
712                         ret = zpool_create_zvol_links(zhp);
713                         zpool_close(zhp);
714                 }
715         }
716
717         zcmd_free_nvlists(&zc);
718         return (ret);
719 }
720
721 /*
722  * Scrub the pool.
723  */
724 int
725 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
726 {
727         zfs_cmd_t zc = { 0 };
728         char msg[1024];
729         libzfs_handle_t *hdl = zhp->zpool_hdl;
730
731         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
732         zc.zc_cookie = type;
733
734         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
735                 return (0);
736
737         (void) snprintf(msg, sizeof (msg),
738             dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
739
740         if (errno == EBUSY)
741                 return (zfs_error(hdl, EZFS_RESILVERING, msg));
742         else
743                 return (zpool_standard_error(hdl, errno, msg));
744 }
745
746 /*
747  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
748  * spare; but FALSE if its an INUSE spare.
749  */
750 static nvlist_t *
751 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
752     boolean_t *avail_spare)
753 {
754         uint_t c, children;
755         nvlist_t **child;
756         uint64_t theguid, present;
757         char *path;
758         uint64_t wholedisk = 0;
759         nvlist_t *ret;
760
761         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
762
763         if (search == NULL &&
764             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
765                 /*
766                  * If the device has never been present since import, the only
767                  * reliable way to match the vdev is by GUID.
768                  */
769                 if (theguid == guid)
770                         return (nv);
771         } else if (search != NULL &&
772             nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
773                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
774                     &wholedisk);
775                 if (wholedisk) {
776                         /*
777                          * For whole disks, the internal path has 's0', but the
778                          * path passed in by the user doesn't.
779                          */
780                         if (strlen(search) == strlen(path) - 2 &&
781                             strncmp(search, path, strlen(search)) == 0)
782                                 return (nv);
783                 } else if (strcmp(search, path) == 0) {
784                         return (nv);
785                 }
786         }
787
788         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
789             &child, &children) != 0)
790                 return (NULL);
791
792         for (c = 0; c < children; c++)
793                 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
794                     avail_spare)) != NULL)
795                         return (ret);
796
797         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
798             &child, &children) == 0) {
799                 for (c = 0; c < children; c++) {
800                         if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
801                             avail_spare)) != NULL) {
802                                 *avail_spare = B_TRUE;
803                                 return (ret);
804                         }
805                 }
806         }
807
808         return (NULL);
809 }
810
811 nvlist_t *
812 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
813 {
814         char buf[MAXPATHLEN];
815         const char *search;
816         char *end;
817         nvlist_t *nvroot;
818         uint64_t guid;
819
820         guid = strtoull(path, &end, 10);
821         if (guid != 0 && *end == '\0') {
822                 search = NULL;
823         } else if (path[0] != '/') {
824                 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
825                 search = buf;
826         } else {
827                 search = path;
828         }
829
830         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
831             &nvroot) == 0);
832
833         *avail_spare = B_FALSE;
834         return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
835 }
836
837 /*
838  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
839  */
840 static boolean_t
841 is_spare(zpool_handle_t *zhp, uint64_t guid)
842 {
843         uint64_t spare_guid;
844         nvlist_t *nvroot;
845         nvlist_t **spares;
846         uint_t nspares;
847         int i;
848
849         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
850             &nvroot) == 0);
851         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
852             &spares, &nspares) == 0) {
853                 for (i = 0; i < nspares; i++) {
854                         verify(nvlist_lookup_uint64(spares[i],
855                             ZPOOL_CONFIG_GUID, &spare_guid) == 0);
856                         if (guid == spare_guid)
857                                 return (B_TRUE);
858                 }
859         }
860
861         return (B_FALSE);
862 }
863
864 /*
865  * Bring the specified vdev online
866  */
867 int
868 zpool_vdev_online(zpool_handle_t *zhp, const char *path)
869 {
870         zfs_cmd_t zc = { 0 };
871         char msg[1024];
872         nvlist_t *tgt;
873         boolean_t avail_spare;
874         libzfs_handle_t *hdl = zhp->zpool_hdl;
875
876         (void) snprintf(msg, sizeof (msg),
877             dgettext(TEXT_DOMAIN, "cannot online %s"), path);
878
879         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
880         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
881                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
882
883         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
884
885         if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
886                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
887
888         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
889                 return (0);
890
891         return (zpool_standard_error(hdl, errno, msg));
892 }
893
894 /*
895  * Take the specified vdev offline
896  */
897 int
898 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
899 {
900         zfs_cmd_t zc = { 0 };
901         char msg[1024];
902         nvlist_t *tgt;
903         boolean_t avail_spare;
904         libzfs_handle_t *hdl = zhp->zpool_hdl;
905
906         (void) snprintf(msg, sizeof (msg),
907             dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
908
909         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
910         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
911                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
912
913         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
914
915         if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
916                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
917
918         zc.zc_cookie = istmp;
919
920         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
921                 return (0);
922
923         switch (errno) {
924         case EBUSY:
925
926                 /*
927                  * There are no other replicas of this device.
928                  */
929                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
930
931         default:
932                 return (zpool_standard_error(hdl, errno, msg));
933         }
934 }
935
936 /*
937  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
938  * a hot spare.
939  */
940 static boolean_t
941 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
942 {
943         nvlist_t **child;
944         uint_t c, children;
945         char *type;
946
947         if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
948             &children) == 0) {
949                 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
950                     &type) == 0);
951
952                 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
953                     children == 2 && child[which] == tgt)
954                         return (B_TRUE);
955
956                 for (c = 0; c < children; c++)
957                         if (is_replacing_spare(child[c], tgt, which))
958                                 return (B_TRUE);
959         }
960
961         return (B_FALSE);
962 }
963
964 /*
965  * Attach new_disk (fully described by nvroot) to old_disk.
966  * If 'replacing' is specified, tne new disk will replace the old one.
967  */
968 int
969 zpool_vdev_attach(zpool_handle_t *zhp,
970     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
971 {
972         zfs_cmd_t zc = { 0 };
973         char msg[1024];
974         int ret;
975         nvlist_t *tgt;
976         boolean_t avail_spare;
977         uint64_t val;
978         char *path;
979         nvlist_t **child;
980         uint_t children;
981         nvlist_t *config_root;
982         libzfs_handle_t *hdl = zhp->zpool_hdl;
983
984         if (replacing)
985                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
986                     "cannot replace %s with %s"), old_disk, new_disk);
987         else
988                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
989                     "cannot attach %s to %s"), new_disk, old_disk);
990
991         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
992         if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
993                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
994
995         if (avail_spare)
996                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
997
998         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
999         zc.zc_cookie = replacing;
1000
1001         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1002             &child, &children) != 0 || children != 1) {
1003                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004                     "new device must be a single disk"));
1005                 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1006         }
1007
1008         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1009             ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1010
1011         /*
1012          * If the target is a hot spare that has been swapped in, we can only
1013          * replace it with another hot spare.
1014          */
1015         if (replacing &&
1016             nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1017             nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1018             (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1019             !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1020                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1021                     "can only be replaced by another hot spare"));
1022                 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1023         }
1024
1025         /*
1026          * If we are attempting to replace a spare, it canot be applied to an
1027          * already spared device.
1028          */
1029         if (replacing &&
1030             nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1031             zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1032             is_replacing_spare(config_root, tgt, 0)) {
1033                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1034                     "device has already been replaced with a spare"));
1035                 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1036         }
1037
1038         if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1039                 return (-1);
1040
1041         ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1042
1043         zcmd_free_nvlists(&zc);
1044
1045         if (ret == 0)
1046                 return (0);
1047
1048         switch (errno) {
1049         case ENOTSUP:
1050                 /*
1051                  * Can't attach to or replace this type of vdev.
1052                  */
1053                 if (replacing)
1054                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1055                             "cannot replace a replacing device"));
1056                 else
1057                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1058                             "can only attach to mirrors and top-level "
1059                             "disks"));
1060                 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
1061                 break;
1062
1063         case EINVAL:
1064                 /*
1065                  * The new device must be a single disk.
1066                  */
1067                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1068                     "new device must be a single disk"));
1069                 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1070                 break;
1071
1072         case EBUSY:
1073                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1074                     new_disk);
1075                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1076                 break;
1077
1078         case EOVERFLOW:
1079                 /*
1080                  * The new device is too small.
1081                  */
1082                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1083                     "device is too small"));
1084                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1085                 break;
1086
1087         case EDOM:
1088                 /*
1089                  * The new device has a different alignment requirement.
1090                  */
1091                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1092                     "devices have different sector alignment"));
1093                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1094                 break;
1095
1096         case ENAMETOOLONG:
1097                 /*
1098                  * The resulting top-level vdev spec won't fit in the label.
1099                  */
1100                 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1101                 break;
1102
1103         default:
1104                 (void) zpool_standard_error(hdl, errno, msg);
1105         }
1106
1107         return (-1);
1108 }
1109
1110 /*
1111  * Detach the specified device.
1112  */
1113 int
1114 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1115 {
1116         zfs_cmd_t zc = { 0 };
1117         char msg[1024];
1118         nvlist_t *tgt;
1119         boolean_t avail_spare;
1120         libzfs_handle_t *hdl = zhp->zpool_hdl;
1121
1122         (void) snprintf(msg, sizeof (msg),
1123             dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1124
1125         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1126         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1127                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1128
1129         if (avail_spare)
1130                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1131
1132         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1133
1134         if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1135                 return (0);
1136
1137         switch (errno) {
1138
1139         case ENOTSUP:
1140                 /*
1141                  * Can't detach from this type of vdev.
1142                  */
1143                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1144                     "applicable to mirror and replacing vdevs"));
1145                 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1146                 break;
1147
1148         case EBUSY:
1149                 /*
1150                  * There are no other replicas of this device.
1151                  */
1152                 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1153                 break;
1154
1155         default:
1156                 (void) zpool_standard_error(hdl, errno, msg);
1157         }
1158
1159         return (-1);
1160 }
1161
1162 /*
1163  * Remove the given device.  Currently, this is supported only for hot spares.
1164  */
1165 int
1166 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1167 {
1168         zfs_cmd_t zc = { 0 };
1169         char msg[1024];
1170         nvlist_t *tgt;
1171         boolean_t avail_spare;
1172         libzfs_handle_t *hdl = zhp->zpool_hdl;
1173
1174         (void) snprintf(msg, sizeof (msg),
1175             dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1176
1177         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1178         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1179                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1180
1181         if (!avail_spare) {
1182                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1183                     "only inactive hot spares can be removed"));
1184                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1185         }
1186
1187         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1188
1189         if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1190                 return (0);
1191
1192         return (zpool_standard_error(hdl, errno, msg));
1193 }
1194
1195 /*
1196  * Clear the errors for the pool, or the particular device if specified.
1197  */
1198 int
1199 zpool_clear(zpool_handle_t *zhp, const char *path)
1200 {
1201         zfs_cmd_t zc = { 0 };
1202         char msg[1024];
1203         nvlist_t *tgt;
1204         boolean_t avail_spare;
1205         libzfs_handle_t *hdl = zhp->zpool_hdl;
1206
1207         if (path)
1208                 (void) snprintf(msg, sizeof (msg),
1209                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1210                     path);
1211         else
1212                 (void) snprintf(msg, sizeof (msg),
1213                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1214                     zhp->zpool_name);
1215
1216         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1217         if (path) {
1218                 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1219                         return (zfs_error(hdl, EZFS_NODEVICE, msg));
1220
1221                 if (avail_spare)
1222                         return (zfs_error(hdl, EZFS_ISSPARE, msg));
1223
1224                 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1225                     &zc.zc_guid) == 0);
1226         }
1227
1228         if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1229                 return (0);
1230
1231         return (zpool_standard_error(hdl, errno, msg));
1232 }
1233
1234 /*
1235  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1236  * hierarchy.
1237  */
1238 int
1239 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1240     void *data)
1241 {
1242         libzfs_handle_t *hdl = zhp->zpool_hdl;
1243         char (*paths)[MAXPATHLEN];
1244         char path[MAXPATHLEN];
1245         size_t size = 4;
1246         int curr, fd, base, ret = 0;
1247         DIR *dirp;
1248         struct dirent *dp;
1249         struct stat st;
1250
1251         if ((base = open(ZVOL_FULL_DEV_DIR, O_RDONLY)) < 0)
1252                 return (errno == ENOENT ? 0 : -1);
1253
1254         snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
1255             zhp->zpool_name);
1256         if (stat(path, &st) != 0) {
1257                 int err = errno;
1258                 (void) close(base);
1259                 return (err == ENOENT ? 0 : -1);
1260         }
1261
1262         /*
1263          * Oddly this wasn't a directory -- ignore that failure since we
1264          * know there are no links lower in the (non-existant) hierarchy.
1265          */
1266         if (!S_ISDIR(st.st_mode)) {
1267                 (void) close(base);
1268                 return (0);
1269         }
1270
1271         if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1272                 (void) close(base);
1273                 return (-1);
1274         }
1275
1276         (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1277         curr = 0;
1278
1279         while (curr >= 0) {
1280                 snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
1281                     paths[curr]);
1282                 if (lstat(path, &st) != 0)
1283                         goto err;
1284
1285                 if (S_ISDIR(st.st_mode)) {
1286                         if ((dirp = opendir(path)) == NULL) {
1287                                 goto err;
1288                         }
1289
1290                         while ((dp = readdir(dirp)) != NULL) {
1291                                 if (dp->d_name[0] == '.')
1292                                         continue;
1293
1294                                 if (curr + 1 == size) {
1295                                         paths = zfs_realloc(hdl, paths,
1296                                             size * sizeof (paths[0]),
1297                                             size * 2 * sizeof (paths[0]));
1298                                         if (paths == NULL) {
1299                                                 (void) closedir(dirp);
1300                                                 goto err;
1301                                         }
1302
1303                                         size *= 2;
1304                                 }
1305
1306                                 (void) strlcpy(paths[curr + 1], paths[curr],
1307                                     sizeof (paths[curr + 1]));
1308                                 (void) strlcat(paths[curr], "/",
1309                                     sizeof (paths[curr]));
1310                                 (void) strlcat(paths[curr], dp->d_name,
1311                                     sizeof (paths[curr]));
1312                                 curr++;
1313                         }
1314
1315                         (void) closedir(dirp);
1316
1317                 } else {
1318                         if ((ret = cb(paths[curr], data)) != 0)
1319                                 break;
1320                 }
1321
1322                 curr--;
1323         }
1324
1325         free(paths);
1326         (void) close(base);
1327
1328         return (ret);
1329
1330 err:
1331         free(paths);
1332         (void) close(base);
1333         return (-1);
1334 }
1335
1336 typedef struct zvol_cb {
1337         zpool_handle_t *zcb_pool;
1338         boolean_t zcb_create;
1339 } zvol_cb_t;
1340
1341 /*ARGSUSED*/
1342 static int
1343 do_zvol_create(zfs_handle_t *zhp, void *data)
1344 {
1345         int ret;
1346
1347         if (ZFS_IS_VOLUME(zhp))
1348                 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1349
1350         ret = zfs_iter_children(zhp, do_zvol_create, NULL);
1351
1352         zfs_close(zhp);
1353
1354         return (ret);
1355 }
1356
1357 /*
1358  * Iterate over all zvols in the pool and make any necessary minor nodes.
1359  */
1360 int
1361 zpool_create_zvol_links(zpool_handle_t *zhp)
1362 {
1363         zfs_handle_t *zfp;
1364         int ret;
1365
1366         /*
1367          * If the pool is unavailable, just return success.
1368          */
1369         if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1370             zhp->zpool_name)) == NULL)
1371                 return (0);
1372
1373         ret = zfs_iter_children(zfp, do_zvol_create, NULL);
1374
1375         zfs_close(zfp);
1376         return (ret);
1377 }
1378
1379 static int
1380 do_zvol_remove(const char *dataset, void *data)
1381 {
1382         zpool_handle_t *zhp = data;
1383
1384         return (zvol_remove_link(zhp->zpool_hdl, dataset));
1385 }
1386
1387 /*
1388  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1389  * by examining the /dev links so that a corrupted pool doesn't impede this
1390  * operation.
1391  */
1392 int
1393 zpool_remove_zvol_links(zpool_handle_t *zhp)
1394 {
1395         return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1396 }
1397
1398 /*
1399  * Convert from a devid string to a path.
1400  */
1401 static char *
1402 devid_to_path(char *devid_str)
1403 {
1404         ddi_devid_t devid;
1405         char *minor;
1406         char *path;
1407         devid_nmlist_t *list = NULL;
1408         int ret;
1409
1410         if (devid_str_decode(devid_str, &devid, &minor) != 0)
1411                 return (NULL);
1412
1413         ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1414
1415         devid_str_free(minor);
1416         devid_free(devid);
1417
1418         if (ret != 0)
1419                 return (NULL);
1420
1421         if ((path = strdup(list[0].devname)) == NULL)
1422                 return (NULL);
1423
1424         devid_free_nmlist(list);
1425
1426         return (path);
1427 }
1428
1429 /*
1430  * Convert from a path to a devid string.
1431  */
1432 static char *
1433 path_to_devid(const char *path)
1434 {
1435         int fd;
1436         ddi_devid_t devid;
1437         char *minor, *ret;
1438
1439         if ((fd = open(path, O_RDONLY)) < 0)
1440                 return (NULL);
1441
1442         minor = NULL;
1443         ret = NULL;
1444         if (devid_get(fd, &devid) == 0) {
1445                 if (devid_get_minor_name(fd, &minor) == 0)
1446                         ret = devid_str_encode(devid, minor);
1447                 if (minor != NULL)
1448                         devid_str_free(minor);
1449                 devid_free(devid);
1450         }
1451         (void) close(fd);
1452
1453         return (ret);
1454 }
1455
1456 /*
1457  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1458  * ignore any failure here, since a common case is for an unprivileged user to
1459  * type 'zpool status', and we'll display the correct information anyway.
1460  */
1461 static void
1462 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1463 {
1464         zfs_cmd_t zc = { 0 };
1465
1466         (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1467         (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1468         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1469             &zc.zc_guid) == 0);
1470
1471         (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1472 }
1473
1474 /*
1475  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1476  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1477  * We also check if this is a whole disk, in which case we strip off the
1478  * trailing 's0' slice name.
1479  *
1480  * This routine is also responsible for identifying when disks have been
1481  * reconfigured in a new location.  The kernel will have opened the device by
1482  * devid, but the path will still refer to the old location.  To catch this, we
1483  * first do a path -> devid translation (which is fast for the common case).  If
1484  * the devid matches, we're done.  If not, we do a reverse devid -> path
1485  * translation and issue the appropriate ioctl() to update the path of the vdev.
1486  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1487  * of these checks.
1488  */
1489 char *
1490 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1491 {
1492         char *path, *devid;
1493         uint64_t value;
1494         char buf[64];
1495
1496         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1497             &value) == 0) {
1498                 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1499                     &value) == 0);
1500                 (void) snprintf(buf, sizeof (buf), "%llu",
1501                     (u_longlong_t)value);
1502                 path = buf;
1503         } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1504
1505                 if (zhp != NULL &&
1506                     nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1507                         /*
1508                          * Determine if the current path is correct.
1509                          */
1510                         char *newdevid = path_to_devid(path);
1511
1512                         if (newdevid == NULL ||
1513                             strcmp(devid, newdevid) != 0) {
1514                                 char *newpath;
1515
1516                                 if ((newpath = devid_to_path(devid)) != NULL) {
1517                                         /*
1518                                          * Update the path appropriately.
1519                                          */
1520                                         set_path(zhp, nv, newpath);
1521                                         if (nvlist_add_string(nv,
1522                                             ZPOOL_CONFIG_PATH, newpath) == 0)
1523                                                 verify(nvlist_lookup_string(nv,
1524                                                     ZPOOL_CONFIG_PATH,
1525                                                     &path) == 0);
1526                                         free(newpath);
1527                                 }
1528                         }
1529
1530                         if (newdevid)
1531                                 devid_str_free(newdevid);
1532                 }
1533
1534                 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
1535                         path += sizeof(_PATH_DEV) - 1;
1536
1537                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1538                     &value) == 0 && value) {
1539                         char *tmp = zfs_strdup(hdl, path);
1540                         if (tmp == NULL)
1541                                 return (NULL);
1542                         tmp[strlen(path) - 2] = '\0';
1543                         return (tmp);
1544                 }
1545         } else {
1546                 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1547
1548                 /*
1549                  * If it's a raidz device, we need to stick in the parity level.
1550                  */
1551                 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1552                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1553                             &value) == 0);
1554                         (void) snprintf(buf, sizeof (buf), "%s%llu", path,
1555                             (u_longlong_t)value);
1556                         path = buf;
1557                 }
1558         }
1559
1560         return (zfs_strdup(hdl, path));
1561 }
1562
1563 static int
1564 zbookmark_compare(const void *a, const void *b)
1565 {
1566         return (memcmp(a, b, sizeof (zbookmark_t)));
1567 }
1568
1569 /*
1570  * Retrieve the persistent error log, uniquify the members, and return to the
1571  * caller.
1572  */
1573 int
1574 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
1575 {
1576         zfs_cmd_t zc = { 0 };
1577         uint64_t count;
1578         zbookmark_t *zb = NULL;
1579         int i;
1580
1581         /*
1582          * Retrieve the raw error list from the kernel.  If the number of errors
1583          * has increased, allocate more space and continue until we get the
1584          * entire list.
1585          */
1586         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1587             &count) == 0);
1588         if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1589             count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1590                 return (-1);
1591         zc.zc_nvlist_dst_size = count;
1592         (void) strcpy(zc.zc_name, zhp->zpool_name);
1593         for (;;) {
1594                 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1595                     &zc) != 0) {
1596                         free((void *)(uintptr_t)zc.zc_nvlist_dst);
1597                         if (errno == ENOMEM) {
1598                                 count = zc.zc_nvlist_dst_size;
1599                                 if ((zc.zc_nvlist_dst = (uintptr_t)
1600                                     zfs_alloc(zhp->zpool_hdl, count *
1601                                     sizeof (zbookmark_t))) == (uintptr_t)NULL)
1602                                         return (-1);
1603                         } else {
1604                                 return (-1);
1605                         }
1606                 } else {
1607                         break;
1608                 }
1609         }
1610
1611         /*
1612          * Sort the resulting bookmarks.  This is a little confusing due to the
1613          * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1614          * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1615          * _not_ copied as part of the process.  So we point the start of our
1616          * array appropriate and decrement the total number of elements.
1617          */
1618         zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1619             zc.zc_nvlist_dst_size;
1620         count -= zc.zc_nvlist_dst_size;
1621
1622         qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1623
1624         verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
1625
1626         /*
1627          * Fill in the nverrlistp with nvlist's of dataset and object numbers.
1628          */
1629         for (i = 0; i < count; i++) {
1630                 nvlist_t *nv;
1631
1632                 /* ignoring zb_blkid and zb_level for now */
1633                 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
1634                     zb[i-1].zb_object == zb[i].zb_object)
1635                         continue;
1636
1637                 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
1638                         goto nomem;
1639                 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
1640                     zb[i].zb_objset) != 0) {
1641                         nvlist_free(nv);
1642                         goto nomem;
1643                 }
1644                 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
1645                     zb[i].zb_object) != 0) {
1646                         nvlist_free(nv);
1647                         goto nomem;
1648                 }
1649                 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
1650                         nvlist_free(nv);
1651                         goto nomem;
1652                 }
1653                 nvlist_free(nv);
1654         }
1655
1656         free((void *)(uintptr_t)zc.zc_nvlist_dst);
1657         return (0);
1658
1659 nomem:
1660         free((void *)(uintptr_t)zc.zc_nvlist_dst);
1661         return (no_memory(zhp->zpool_hdl));
1662 }
1663
1664 /*
1665  * Upgrade a ZFS pool to the latest on-disk version.
1666  */
1667 int
1668 zpool_upgrade(zpool_handle_t *zhp)
1669 {
1670         zfs_cmd_t zc = { 0 };
1671         libzfs_handle_t *hdl = zhp->zpool_hdl;
1672
1673         (void) strcpy(zc.zc_name, zhp->zpool_name);
1674         if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1675                 return (zpool_standard_error_fmt(hdl, errno,
1676                     dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1677                     zhp->zpool_name));
1678
1679         return (0);
1680 }
1681
1682 /*
1683  * Log command history.
1684  *
1685  * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
1686  * otherwise ('zfs').  'pool_create' is B_TRUE if we are logging the creation
1687  * of the pool; B_FALSE otherwise.  'path' is the pathanme containing the
1688  * poolname.  'argc' and 'argv' are used to construct the command string.
1689  */
1690 void
1691 zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path,
1692         boolean_t pool, boolean_t pool_create)
1693 {
1694         char cmd_buf[HIS_MAX_RECORD_LEN];
1695         char *dspath;
1696         zfs_cmd_t zc = { 0 };
1697         int i;
1698
1699         /* construct the command string */
1700         (void) strcpy(cmd_buf, pool ? "zpool" : "zfs");
1701         for (i = 0; i < argc; i++) {
1702                 if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
1703                         break;
1704                 (void) strcat(cmd_buf, " ");
1705                 (void) strcat(cmd_buf, argv[i]);
1706         }
1707
1708         /* figure out the poolname */
1709         dspath = strpbrk(path, "/@");
1710         if (dspath == NULL) {
1711                 (void) strcpy(zc.zc_name, path);
1712         } else {
1713                 (void) strncpy(zc.zc_name, path, dspath - path);
1714                 zc.zc_name[dspath-path] = '\0';
1715         }
1716
1717         zc.zc_history = (uint64_t)(uintptr_t)cmd_buf;
1718         zc.zc_history_len = strlen(cmd_buf);
1719
1720         /* overloading zc_history_offset */
1721         zc.zc_history_offset = pool_create;
1722
1723         (void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc);
1724 }
1725
1726 /*
1727  * Perform ioctl to get some command history of a pool.
1728  *
1729  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
1730  * logical offset of the history buffer to start reading from.
1731  *
1732  * Upon return, 'off' is the next logical offset to read from and
1733  * 'len' is the actual amount of bytes read into 'buf'.
1734  */
1735 static int
1736 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
1737 {
1738         zfs_cmd_t zc = { 0 };
1739         libzfs_handle_t *hdl = zhp->zpool_hdl;
1740
1741         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1742
1743         zc.zc_history = (uint64_t)(uintptr_t)buf;
1744         zc.zc_history_len = *len;
1745         zc.zc_history_offset = *off;
1746
1747         if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
1748                 switch (errno) {
1749                 case EPERM:
1750                         return (zfs_error_fmt(hdl, EZFS_PERM,
1751                             dgettext(TEXT_DOMAIN,
1752                             "cannot show history for pool '%s'"),
1753                             zhp->zpool_name));
1754                 case ENOENT:
1755                         return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
1756                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
1757                             "'%s'"), zhp->zpool_name));
1758                 case ENOTSUP:
1759                         return (zfs_error_fmt(hdl, EZFS_BADVERSION,
1760                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
1761                             "'%s', pool must be upgraded"), zhp->zpool_name));
1762                 default:
1763                         return (zpool_standard_error_fmt(hdl, errno,
1764                             dgettext(TEXT_DOMAIN,
1765                             "cannot get history for '%s'"), zhp->zpool_name));
1766                 }
1767         }
1768
1769         *len = zc.zc_history_len;
1770         *off = zc.zc_history_offset;
1771
1772         return (0);
1773 }
1774
1775 /*
1776  * Process the buffer of nvlists, unpacking and storing each nvlist record
1777  * into 'records'.  'leftover' is set to the number of bytes that weren't
1778  * processed as there wasn't a complete record.
1779  */
1780 static int
1781 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
1782     nvlist_t ***records, uint_t *numrecords)
1783 {
1784         uint64_t reclen;
1785         nvlist_t *nv;
1786         int i;
1787
1788         while (bytes_read > sizeof (reclen)) {
1789
1790                 /* get length of packed record (stored as little endian) */
1791                 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
1792                         reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
1793
1794                 if (bytes_read < sizeof (reclen) + reclen)
1795                         break;
1796
1797                 /* unpack record */
1798                 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
1799                         return (ENOMEM);
1800                 bytes_read -= sizeof (reclen) + reclen;
1801                 buf += sizeof (reclen) + reclen;
1802
1803                 /* add record to nvlist array */
1804                 (*numrecords)++;
1805                 if (ISP2(*numrecords + 1)) {
1806                         *records = realloc(*records,
1807                             *numrecords * 2 * sizeof (nvlist_t *));
1808                 }
1809                 (*records)[*numrecords - 1] = nv;
1810         }
1811
1812         *leftover = bytes_read;
1813         return (0);
1814 }
1815
1816 #define HIS_BUF_LEN     (128*1024)
1817
1818 /*
1819  * Retrieve the command history of a pool.
1820  */
1821 int
1822 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
1823 {
1824         char buf[HIS_BUF_LEN];
1825         uint64_t off = 0;
1826         nvlist_t **records = NULL;
1827         uint_t numrecords = 0;
1828         int err, i;
1829
1830         do {
1831                 uint64_t bytes_read = sizeof (buf);
1832                 uint64_t leftover;
1833
1834                 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
1835                         break;
1836
1837                 /* if nothing else was read in, we're at EOF, just return */
1838                 if (!bytes_read)
1839                         break;
1840
1841                 if ((err = zpool_history_unpack(buf, bytes_read,
1842                     &leftover, &records, &numrecords)) != 0)
1843                         break;
1844                 off -= leftover;
1845
1846                 /* CONSTCOND */
1847         } while (1);
1848
1849         if (!err) {
1850                 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
1851                 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
1852                     records, numrecords) == 0);
1853         }
1854         for (i = 0; i < numrecords; i++)
1855                 nvlist_free(records[i]);
1856         free(records);
1857
1858         return (err);
1859 }
1860
1861 void
1862 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
1863     char *pathname, size_t len)
1864 {
1865         zfs_cmd_t zc = { 0 };
1866         boolean_t mounted = B_FALSE;
1867         char *mntpnt = NULL;
1868         char dsname[MAXNAMELEN];
1869
1870         if (dsobj == 0) {
1871                 /* special case for the MOS */
1872                 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
1873                 return;
1874         }
1875
1876         /* get the dataset's name */
1877         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1878         zc.zc_obj = dsobj;
1879         if (ioctl(zhp->zpool_hdl->libzfs_fd,
1880             ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
1881                 /* just write out a path of two object numbers */
1882                 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
1883                     dsobj, obj);
1884                 return;
1885         }
1886         (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
1887
1888         /* find out if the dataset is mounted */
1889         mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
1890
1891         /* get the corrupted object's path */
1892         (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
1893         zc.zc_obj = obj;
1894         if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
1895             &zc) == 0) {
1896                 if (mounted) {
1897                         (void) snprintf(pathname, len, "%s%s", mntpnt,
1898                             zc.zc_value);
1899                 } else {
1900                         (void) snprintf(pathname, len, "%s:%s",
1901                             dsname, zc.zc_value);
1902                 }
1903         } else {
1904                 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
1905         }
1906         free(mntpnt);
1907 }
1908
1909 int
1910 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
1911 {
1912         zfs_cmd_t zc = { 0 };
1913         int ret = -1;
1914         char errbuf[1024];
1915         nvlist_t *nvl = NULL;
1916         nvlist_t *realprops;
1917
1918         (void) snprintf(errbuf, sizeof (errbuf),
1919             dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
1920             zhp->zpool_name);
1921
1922         if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
1923                 zfs_error_aux(zhp->zpool_hdl,
1924                     dgettext(TEXT_DOMAIN, "pool must be "
1925                     "upgraded to support pool properties"));
1926                 return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
1927         }
1928
1929         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
1930                 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
1931
1932         if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
1933             nvlist_add_string(nvl, propname, propval) != 0) {
1934                 return (no_memory(zhp->zpool_hdl));
1935         }
1936
1937         if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
1938             zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
1939                 nvlist_free(nvl);
1940                 return (-1);
1941         }
1942
1943         nvlist_free(nvl);
1944         nvl = realprops;
1945
1946         /*
1947          * Execute the corresponding ioctl() to set this property.
1948          */
1949         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1950
1951         if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
1952                 return (-1);
1953
1954         ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc);
1955         zcmd_free_nvlists(&zc);
1956
1957         if (ret)
1958                 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
1959
1960         return (ret);
1961 }
1962
1963 int
1964 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
1965     size_t proplen, zfs_source_t *srctype)
1966 {
1967         uint64_t value;
1968         char msg[1024], *strvalue;
1969         nvlist_t *nvp;
1970         zfs_source_t src = ZFS_SRC_NONE;
1971
1972         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1973             "cannot get property '%s'"), zpool_prop_to_name(prop));
1974
1975         if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
1976                 zfs_error_aux(zhp->zpool_hdl,
1977                     dgettext(TEXT_DOMAIN, "pool must be "
1978                     "upgraded to support pool properties"));
1979                 return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
1980         }
1981
1982         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
1983                 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
1984
1985         /*
1986          * the "name" property is special cased
1987          */
1988         if (!zfs_prop_valid_for_type(prop, ZFS_TYPE_POOL) &&
1989             prop != ZFS_PROP_NAME)
1990                 return (-1);
1991
1992         switch (prop) {
1993         case ZFS_PROP_NAME:
1994                 (void) strlcpy(propbuf, zhp->zpool_name, proplen);
1995                 break;
1996
1997         case ZFS_PROP_BOOTFS:
1998                 if (nvlist_lookup_nvlist(zhp->zpool_props,
1999                     zpool_prop_to_name(prop), &nvp) != 0) {
2000                         strvalue = (char *)zfs_prop_default_string(prop);
2001                         if (strvalue == NULL)
2002                                 strvalue = "-";
2003                         src = ZFS_SRC_DEFAULT;
2004                 } else {
2005                         VERIFY(nvlist_lookup_uint64(nvp,
2006                             ZFS_PROP_SOURCE, &value) == 0);
2007                         src = value;
2008                         VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2009                             &strvalue) == 0);
2010                         if (strlen(strvalue) >= proplen)
2011                                 return (-1);
2012                 }
2013                 (void) strcpy(propbuf, strvalue);
2014                 break;
2015
2016         default:
2017                 return (-1);
2018         }
2019         if (srctype)
2020                 *srctype = src;
2021         return (0);
2022 }
2023
2024 int
2025 zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2026 {
2027         return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2028 }
2029
2030
2031 int
2032 zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2033 {
2034         libzfs_handle_t *hdl = zhp->zpool_hdl;
2035         zpool_proplist_t *entry;
2036         char buf[ZFS_MAXPROPLEN];
2037
2038         if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2039                 return (-1);
2040
2041         for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2042
2043                 if (entry->pl_fixed)
2044                         continue;
2045
2046                 if (entry->pl_prop != ZFS_PROP_INVAL &&
2047                     zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2048                     NULL) == 0) {
2049                         if (strlen(buf) > entry->pl_width)
2050                                 entry->pl_width = strlen(buf);
2051                 }
2052         }
2053
2054         return (0);
2055 }