4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include <sys/types.h>
42 #include <sys/zfs_ioctl.h>
47 #include "zfs_namecheck.h"
49 #include "libzfs_impl.h"
52 * Validate the given pool name, optionally putting an extended error message in
56 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
62 ret = pool_namecheck(pool, &why, &what);
65 * The rules for reserved pool names were extended at a later point.
66 * But we need to support users with existing pools that may now be
67 * invalid. So we only check for this expanded set of names during a
68 * create (or import), and only in userland.
70 if (ret == 0 && !isopen &&
71 (strncmp(pool, "mirror", 6) == 0 ||
72 strncmp(pool, "raidz", 5) == 0 ||
73 strncmp(pool, "spare", 5) == 0)) {
75 dgettext(TEXT_DOMAIN, "name is reserved"));
83 case NAME_ERR_TOOLONG:
85 dgettext(TEXT_DOMAIN, "name is too long"));
88 case NAME_ERR_INVALCHAR:
90 dgettext(TEXT_DOMAIN, "invalid character "
91 "'%c' in pool name"), what);
94 case NAME_ERR_NOLETTER:
95 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
96 "name must begin with a letter"));
99 case NAME_ERR_RESERVED:
100 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
101 "name is reserved"));
104 case NAME_ERR_DISKLIKE:
105 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
106 "pool name is reserved"));
109 case NAME_ERR_LEADING_SLASH:
110 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
111 "leading slash in name"));
114 case NAME_ERR_EMPTY_COMPONENT:
115 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
116 "empty component in name"));
119 case NAME_ERR_TRAILING_SLASH:
120 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
121 "trailing slash in name"));
124 case NAME_ERR_MULTIPLE_AT:
125 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
126 "multiple '@' delimiters in name"));
138 zpool_get_all_props(zpool_handle_t *zhp)
140 zfs_cmd_t zc = { 0 };
141 libzfs_handle_t *hdl = zhp->zpool_hdl;
143 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
145 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
148 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
149 if (errno == ENOMEM) {
150 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
151 zcmd_free_nvlists(&zc);
155 zcmd_free_nvlists(&zc);
160 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
161 zcmd_free_nvlists(&zc);
165 zcmd_free_nvlists(&zc);
171 * Open a handle to the given pool, even if the pool is currently in the FAULTED
175 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
181 * Make sure the pool name is valid.
183 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
184 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
185 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
190 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
193 zhp->zpool_hdl = hdl;
194 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
196 if (zpool_refresh_stats(zhp, &missing) != 0) {
202 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
204 (void) zfs_error_fmt(hdl, EZFS_NOENT,
205 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
215 * Like the above, but silent on error. Used when iterating over pools (because
216 * the configuration cache may be out of date).
219 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
224 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
227 zhp->zpool_hdl = hdl;
228 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
230 if (zpool_refresh_stats(zhp, &missing) != 0) {
246 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
250 zpool_open(libzfs_handle_t *hdl, const char *pool)
254 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
257 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
258 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
259 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
268 * Close the handle. Simply frees the memory associated with the handle.
271 zpool_close(zpool_handle_t *zhp)
273 if (zhp->zpool_config)
274 nvlist_free(zhp->zpool_config);
275 if (zhp->zpool_old_config)
276 nvlist_free(zhp->zpool_old_config);
277 if (zhp->zpool_props)
278 nvlist_free(zhp->zpool_props);
283 * Return the name of the pool.
286 zpool_get_name(zpool_handle_t *zhp)
288 return (zhp->zpool_name);
292 * Return the GUID of the pool.
295 zpool_get_guid(zpool_handle_t *zhp)
299 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
305 * Return the version of the pool.
308 zpool_get_version(zpool_handle_t *zhp)
312 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
319 * Return the amount of space currently consumed by the pool.
322 zpool_get_space_used(zpool_handle_t *zhp)
328 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
330 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
331 (uint64_t **)&vs, &vsc) == 0);
333 return (vs->vs_alloc);
337 * Return the total space in the pool.
340 zpool_get_space_total(zpool_handle_t *zhp)
346 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
348 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
349 (uint64_t **)&vs, &vsc) == 0);
351 return (vs->vs_space);
355 * Return the alternate root for this pool, if any.
358 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
360 zfs_cmd_t zc = { 0 };
362 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
363 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
364 zc.zc_value[0] == '\0')
367 (void) strlcpy(buf, zc.zc_value, buflen);
373 * Return the state of the pool (ACTIVE or UNAVAILABLE)
376 zpool_get_state(zpool_handle_t *zhp)
378 return (zhp->zpool_state);
382 * Create the named pool, using the provided vdev list. It is assumed
383 * that the consumer has already validated the contents of the nvlist, so we
384 * don't have to worry about error semantics.
387 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
390 zfs_cmd_t zc = { 0 };
393 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
394 "cannot create '%s'"), pool);
396 if (!zpool_name_valid(hdl, B_FALSE, pool))
397 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
399 if (altroot != NULL && altroot[0] != '/')
400 return (zfs_error_fmt(hdl, EZFS_BADPATH,
401 dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
403 if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
406 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
409 (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
411 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
412 zcmd_free_nvlists(&zc);
417 * This can happen if the user has specified the same
418 * device multiple times. We can't reliably detect this
419 * until we try to add it and see we already have a
422 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
423 "one or more vdevs refer to the same device"));
424 return (zfs_error(hdl, EZFS_BADDEV, msg));
428 * This occurs when one of the devices is below
429 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
430 * device was the problem device since there's no
431 * reliable way to determine device size from userland.
436 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
438 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
439 "one or more devices is less than the "
440 "minimum size (%s)"), buf);
442 return (zfs_error(hdl, EZFS_BADDEV, msg));
445 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
446 "one or more devices is out of space"));
447 return (zfs_error(hdl, EZFS_BADDEV, msg));
450 return (zpool_standard_error(hdl, errno, msg));
454 zcmd_free_nvlists(&zc);
457 * If this is an alternate root pool, then we automatically set the
458 * mountpoint of the root dataset to be '/'.
460 if (altroot != NULL) {
463 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
464 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
474 * Destroy the given pool. It is up to the caller to ensure that there are no
475 * datasets left in the pool.
478 zpool_destroy(zpool_handle_t *zhp)
480 zfs_cmd_t zc = { 0 };
481 zfs_handle_t *zfp = NULL;
482 libzfs_handle_t *hdl = zhp->zpool_hdl;
485 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
486 (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
487 ZFS_TYPE_FILESYSTEM)) == NULL)
490 if (zpool_remove_zvol_links(zhp) != 0)
493 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
495 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
496 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
497 "cannot destroy '%s'"), zhp->zpool_name);
499 if (errno == EROFS) {
500 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
501 "one or more devices is read only"));
502 (void) zfs_error(hdl, EZFS_BADDEV, msg);
504 (void) zpool_standard_error(hdl, errno, msg);
513 remove_mountpoint(zfp);
521 * Add the given vdevs to the pool. The caller must have already performed the
522 * necessary verification to ensure that the vdev specification is well-formed.
525 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
527 zfs_cmd_t zc = { 0 };
529 libzfs_handle_t *hdl = zhp->zpool_hdl;
534 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
535 "cannot add to '%s'"), zhp->zpool_name);
537 if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
538 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
539 &spares, &nspares) == 0) {
540 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
541 "upgraded to add hot spares"));
542 return (zfs_error(hdl, EZFS_BADVERSION, msg));
545 if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
547 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
549 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
553 * This can happen if the user has specified the same
554 * device multiple times. We can't reliably detect this
555 * until we try to add it and see we already have a
558 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 "one or more vdevs refer to the same device"));
560 (void) zfs_error(hdl, EZFS_BADDEV, msg);
565 * This occurrs when one of the devices is below
566 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
567 * device was the problem device since there's no
568 * reliable way to determine device size from userland.
573 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 "device is less than the minimum "
579 (void) zfs_error(hdl, EZFS_BADDEV, msg);
583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 "pool must be upgraded to add raidz2 vdevs"));
585 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
589 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 "root pool can not have concatenated devices"));
591 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
595 (void) zpool_standard_error(hdl, errno, msg);
603 zcmd_free_nvlists(&zc);
609 * Exports the pool from the system. The caller must ensure that there are no
610 * mounted datasets in the pool.
613 zpool_export(zpool_handle_t *zhp)
615 zfs_cmd_t zc = { 0 };
617 if (zpool_remove_zvol_links(zhp) != 0)
620 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
622 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
623 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
624 dgettext(TEXT_DOMAIN, "cannot export '%s'"),
630 * Import the given pool using the known configuration. The configuration
631 * should have come from zpool_find_import(). The 'newname' and 'altroot'
632 * parameters control whether the pool is imported with a different name or with
633 * an alternate root, respectively.
636 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
639 zfs_cmd_t zc = { 0 };
644 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
647 if (newname != NULL) {
648 if (!zpool_name_valid(hdl, B_FALSE, newname))
649 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
650 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
652 thename = (char *)newname;
657 if (altroot != NULL && altroot[0] != '/')
658 return (zfs_error_fmt(hdl, EZFS_BADPATH,
659 dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
662 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
665 (void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
667 zc.zc_value[0] = '\0';
669 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
672 if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
676 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
679 (void) snprintf(desc, sizeof (desc),
680 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
683 (void) snprintf(desc, sizeof (desc),
684 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
690 * Unsupported version.
692 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
696 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
700 (void) zpool_standard_error(hdl, errno, desc);
707 * This should never fail, but play it safe anyway.
709 if (zpool_open_silent(hdl, thename, &zhp) != 0) {
711 } else if (zhp != NULL) {
712 ret = zpool_create_zvol_links(zhp);
717 zcmd_free_nvlists(&zc);
725 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
727 zfs_cmd_t zc = { 0 };
729 libzfs_handle_t *hdl = zhp->zpool_hdl;
731 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
734 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
737 (void) snprintf(msg, sizeof (msg),
738 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
741 return (zfs_error(hdl, EZFS_RESILVERING, msg));
743 return (zpool_standard_error(hdl, errno, msg));
747 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
748 * spare; but FALSE if its an INUSE spare.
751 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
752 boolean_t *avail_spare)
756 uint64_t theguid, present;
758 uint64_t wholedisk = 0;
761 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
763 if (search == NULL &&
764 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
766 * If the device has never been present since import, the only
767 * reliable way to match the vdev is by GUID.
771 } else if (search != NULL &&
772 nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
773 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
777 * For whole disks, the internal path has 's0', but the
778 * path passed in by the user doesn't.
780 if (strlen(search) == strlen(path) - 2 &&
781 strncmp(search, path, strlen(search)) == 0)
783 } else if (strcmp(search, path) == 0) {
788 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
789 &child, &children) != 0)
792 for (c = 0; c < children; c++)
793 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
794 avail_spare)) != NULL)
797 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
798 &child, &children) == 0) {
799 for (c = 0; c < children; c++) {
800 if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
801 avail_spare)) != NULL) {
802 *avail_spare = B_TRUE;
812 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
814 char buf[MAXPATHLEN];
820 guid = strtoull(path, &end, 10);
821 if (guid != 0 && *end == '\0') {
823 } else if (path[0] != '/') {
824 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
830 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
833 *avail_spare = B_FALSE;
834 return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
838 * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
841 is_spare(zpool_handle_t *zhp, uint64_t guid)
849 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
851 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
852 &spares, &nspares) == 0) {
853 for (i = 0; i < nspares; i++) {
854 verify(nvlist_lookup_uint64(spares[i],
855 ZPOOL_CONFIG_GUID, &spare_guid) == 0);
856 if (guid == spare_guid)
865 * Bring the specified vdev online
868 zpool_vdev_online(zpool_handle_t *zhp, const char *path)
870 zfs_cmd_t zc = { 0 };
873 boolean_t avail_spare;
874 libzfs_handle_t *hdl = zhp->zpool_hdl;
876 (void) snprintf(msg, sizeof (msg),
877 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
879 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
880 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
881 return (zfs_error(hdl, EZFS_NODEVICE, msg));
883 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
885 if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
886 return (zfs_error(hdl, EZFS_ISSPARE, msg));
888 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
891 return (zpool_standard_error(hdl, errno, msg));
895 * Take the specified vdev offline
898 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
900 zfs_cmd_t zc = { 0 };
903 boolean_t avail_spare;
904 libzfs_handle_t *hdl = zhp->zpool_hdl;
906 (void) snprintf(msg, sizeof (msg),
907 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
909 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
910 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
911 return (zfs_error(hdl, EZFS_NODEVICE, msg));
913 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
915 if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
916 return (zfs_error(hdl, EZFS_ISSPARE, msg));
918 zc.zc_cookie = istmp;
920 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
927 * There are no other replicas of this device.
929 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
932 return (zpool_standard_error(hdl, errno, msg));
937 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
941 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
947 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
949 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
952 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
953 children == 2 && child[which] == tgt)
956 for (c = 0; c < children; c++)
957 if (is_replacing_spare(child[c], tgt, which))
965 * Attach new_disk (fully described by nvroot) to old_disk.
966 * If 'replacing' is specified, tne new disk will replace the old one.
969 zpool_vdev_attach(zpool_handle_t *zhp,
970 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
972 zfs_cmd_t zc = { 0 };
976 boolean_t avail_spare;
981 nvlist_t *config_root;
982 libzfs_handle_t *hdl = zhp->zpool_hdl;
985 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
986 "cannot replace %s with %s"), old_disk, new_disk);
988 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
989 "cannot attach %s to %s"), new_disk, old_disk);
991 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
992 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
993 return (zfs_error(hdl, EZFS_NODEVICE, msg));
996 return (zfs_error(hdl, EZFS_ISSPARE, msg));
998 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
999 zc.zc_cookie = replacing;
1001 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1002 &child, &children) != 0 || children != 1) {
1003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004 "new device must be a single disk"));
1005 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1008 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1009 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1012 * If the target is a hot spare that has been swapped in, we can only
1013 * replace it with another hot spare.
1016 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1017 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1018 (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1019 !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1020 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1021 "can only be replaced by another hot spare"));
1022 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1026 * If we are attempting to replace a spare, it canot be applied to an
1027 * already spared device.
1030 nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1031 zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1032 is_replacing_spare(config_root, tgt, 0)) {
1033 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1034 "device has already been replaced with a spare"));
1035 return (zfs_error(hdl, EZFS_BADTARGET, msg));
1038 if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1041 ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1043 zcmd_free_nvlists(&zc);
1051 * Can't attach to or replace this type of vdev.
1054 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1055 "cannot replace a replacing device"));
1057 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1058 "can only attach to mirrors and top-level "
1060 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
1065 * The new device must be a single disk.
1067 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1068 "new device must be a single disk"));
1069 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1073 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1075 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1080 * The new device is too small.
1082 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1083 "device is too small"));
1084 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1089 * The new device has a different alignment requirement.
1091 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1092 "devices have different sector alignment"));
1093 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1098 * The resulting top-level vdev spec won't fit in the label.
1100 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1104 (void) zpool_standard_error(hdl, errno, msg);
1111 * Detach the specified device.
1114 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1116 zfs_cmd_t zc = { 0 };
1119 boolean_t avail_spare;
1120 libzfs_handle_t *hdl = zhp->zpool_hdl;
1122 (void) snprintf(msg, sizeof (msg),
1123 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1125 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1126 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1127 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1130 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1132 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1134 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1141 * Can't detach from this type of vdev.
1143 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1144 "applicable to mirror and replacing vdevs"));
1145 (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1150 * There are no other replicas of this device.
1152 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1156 (void) zpool_standard_error(hdl, errno, msg);
1163 * Remove the given device. Currently, this is supported only for hot spares.
1166 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1168 zfs_cmd_t zc = { 0 };
1171 boolean_t avail_spare;
1172 libzfs_handle_t *hdl = zhp->zpool_hdl;
1174 (void) snprintf(msg, sizeof (msg),
1175 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1177 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1178 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1179 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1183 "only inactive hot spares can be removed"));
1184 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1187 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1189 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1192 return (zpool_standard_error(hdl, errno, msg));
1196 * Clear the errors for the pool, or the particular device if specified.
1199 zpool_clear(zpool_handle_t *zhp, const char *path)
1201 zfs_cmd_t zc = { 0 };
1204 boolean_t avail_spare;
1205 libzfs_handle_t *hdl = zhp->zpool_hdl;
1208 (void) snprintf(msg, sizeof (msg),
1209 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1212 (void) snprintf(msg, sizeof (msg),
1213 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1216 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1218 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1219 return (zfs_error(hdl, EZFS_NODEVICE, msg));
1222 return (zfs_error(hdl, EZFS_ISSPARE, msg));
1224 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1228 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1231 return (zpool_standard_error(hdl, errno, msg));
1235 * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1239 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1242 libzfs_handle_t *hdl = zhp->zpool_hdl;
1243 char (*paths)[MAXPATHLEN];
1244 char path[MAXPATHLEN];
1246 int curr, fd, base, ret = 0;
1251 if ((base = open(ZVOL_FULL_DEV_DIR, O_RDONLY)) < 0)
1252 return (errno == ENOENT ? 0 : -1);
1254 snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
1256 if (stat(path, &st) != 0) {
1259 return (err == ENOENT ? 0 : -1);
1263 * Oddly this wasn't a directory -- ignore that failure since we
1264 * know there are no links lower in the (non-existant) hierarchy.
1266 if (!S_ISDIR(st.st_mode)) {
1271 if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1276 (void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1280 snprintf(path, sizeof(path), "%s/%s", ZVOL_FULL_DEV_DIR,
1282 if (lstat(path, &st) != 0)
1285 if (S_ISDIR(st.st_mode)) {
1286 if ((dirp = opendir(path)) == NULL) {
1290 while ((dp = readdir(dirp)) != NULL) {
1291 if (dp->d_name[0] == '.')
1294 if (curr + 1 == size) {
1295 paths = zfs_realloc(hdl, paths,
1296 size * sizeof (paths[0]),
1297 size * 2 * sizeof (paths[0]));
1298 if (paths == NULL) {
1299 (void) closedir(dirp);
1306 (void) strlcpy(paths[curr + 1], paths[curr],
1307 sizeof (paths[curr + 1]));
1308 (void) strlcat(paths[curr], "/",
1309 sizeof (paths[curr]));
1310 (void) strlcat(paths[curr], dp->d_name,
1311 sizeof (paths[curr]));
1315 (void) closedir(dirp);
1318 if ((ret = cb(paths[curr], data)) != 0)
1336 typedef struct zvol_cb {
1337 zpool_handle_t *zcb_pool;
1338 boolean_t zcb_create;
1343 do_zvol_create(zfs_handle_t *zhp, void *data)
1347 if (ZFS_IS_VOLUME(zhp))
1348 (void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1350 ret = zfs_iter_children(zhp, do_zvol_create, NULL);
1358 * Iterate over all zvols in the pool and make any necessary minor nodes.
1361 zpool_create_zvol_links(zpool_handle_t *zhp)
1367 * If the pool is unavailable, just return success.
1369 if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1370 zhp->zpool_name)) == NULL)
1373 ret = zfs_iter_children(zfp, do_zvol_create, NULL);
1380 do_zvol_remove(const char *dataset, void *data)
1382 zpool_handle_t *zhp = data;
1384 return (zvol_remove_link(zhp->zpool_hdl, dataset));
1388 * Iterate over all zvols in the pool and remove any minor nodes. We iterate
1389 * by examining the /dev links so that a corrupted pool doesn't impede this
1393 zpool_remove_zvol_links(zpool_handle_t *zhp)
1395 return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1399 * Convert from a devid string to a path.
1402 devid_to_path(char *devid_str)
1407 devid_nmlist_t *list = NULL;
1410 if (devid_str_decode(devid_str, &devid, &minor) != 0)
1413 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1415 devid_str_free(minor);
1421 if ((path = strdup(list[0].devname)) == NULL)
1424 devid_free_nmlist(list);
1430 * Convert from a path to a devid string.
1433 path_to_devid(const char *path)
1439 if ((fd = open(path, O_RDONLY)) < 0)
1444 if (devid_get(fd, &devid) == 0) {
1445 if (devid_get_minor_name(fd, &minor) == 0)
1446 ret = devid_str_encode(devid, minor);
1448 devid_str_free(minor);
1457 * Issue the necessary ioctl() to update the stored path value for the vdev. We
1458 * ignore any failure here, since a common case is for an unprivileged user to
1459 * type 'zpool status', and we'll display the correct information anyway.
1462 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1464 zfs_cmd_t zc = { 0 };
1466 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1467 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1468 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1471 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1475 * Given a vdev, return the name to display in iostat. If the vdev has a path,
1476 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1477 * We also check if this is a whole disk, in which case we strip off the
1478 * trailing 's0' slice name.
1480 * This routine is also responsible for identifying when disks have been
1481 * reconfigured in a new location. The kernel will have opened the device by
1482 * devid, but the path will still refer to the old location. To catch this, we
1483 * first do a path -> devid translation (which is fast for the common case). If
1484 * the devid matches, we're done. If not, we do a reverse devid -> path
1485 * translation and issue the appropriate ioctl() to update the path of the vdev.
1486 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1490 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1496 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1498 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1500 (void) snprintf(buf, sizeof (buf), "%llu",
1501 (u_longlong_t)value);
1503 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1506 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1508 * Determine if the current path is correct.
1510 char *newdevid = path_to_devid(path);
1512 if (newdevid == NULL ||
1513 strcmp(devid, newdevid) != 0) {
1516 if ((newpath = devid_to_path(devid)) != NULL) {
1518 * Update the path appropriately.
1520 set_path(zhp, nv, newpath);
1521 if (nvlist_add_string(nv,
1522 ZPOOL_CONFIG_PATH, newpath) == 0)
1523 verify(nvlist_lookup_string(nv,
1531 devid_str_free(newdevid);
1534 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
1535 path += sizeof(_PATH_DEV) - 1;
1537 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1538 &value) == 0 && value) {
1539 char *tmp = zfs_strdup(hdl, path);
1542 tmp[strlen(path) - 2] = '\0';
1546 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1549 * If it's a raidz device, we need to stick in the parity level.
1551 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1552 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1554 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
1555 (u_longlong_t)value);
1560 return (zfs_strdup(hdl, path));
1564 zbookmark_compare(const void *a, const void *b)
1566 return (memcmp(a, b, sizeof (zbookmark_t)));
1570 * Retrieve the persistent error log, uniquify the members, and return to the
1574 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
1576 zfs_cmd_t zc = { 0 };
1578 zbookmark_t *zb = NULL;
1582 * Retrieve the raw error list from the kernel. If the number of errors
1583 * has increased, allocate more space and continue until we get the
1586 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1588 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1589 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1591 zc.zc_nvlist_dst_size = count;
1592 (void) strcpy(zc.zc_name, zhp->zpool_name);
1594 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1596 free((void *)(uintptr_t)zc.zc_nvlist_dst);
1597 if (errno == ENOMEM) {
1598 count = zc.zc_nvlist_dst_size;
1599 if ((zc.zc_nvlist_dst = (uintptr_t)
1600 zfs_alloc(zhp->zpool_hdl, count *
1601 sizeof (zbookmark_t))) == (uintptr_t)NULL)
1612 * Sort the resulting bookmarks. This is a little confusing due to the
1613 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
1614 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1615 * _not_ copied as part of the process. So we point the start of our
1616 * array appropriate and decrement the total number of elements.
1618 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1619 zc.zc_nvlist_dst_size;
1620 count -= zc.zc_nvlist_dst_size;
1622 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1624 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
1627 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
1629 for (i = 0; i < count; i++) {
1632 /* ignoring zb_blkid and zb_level for now */
1633 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
1634 zb[i-1].zb_object == zb[i].zb_object)
1637 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
1639 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
1640 zb[i].zb_objset) != 0) {
1644 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
1645 zb[i].zb_object) != 0) {
1649 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
1656 free((void *)(uintptr_t)zc.zc_nvlist_dst);
1660 free((void *)(uintptr_t)zc.zc_nvlist_dst);
1661 return (no_memory(zhp->zpool_hdl));
1665 * Upgrade a ZFS pool to the latest on-disk version.
1668 zpool_upgrade(zpool_handle_t *zhp)
1670 zfs_cmd_t zc = { 0 };
1671 libzfs_handle_t *hdl = zhp->zpool_hdl;
1673 (void) strcpy(zc.zc_name, zhp->zpool_name);
1674 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1675 return (zpool_standard_error_fmt(hdl, errno,
1676 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1683 * Log command history.
1685 * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
1686 * otherwise ('zfs'). 'pool_create' is B_TRUE if we are logging the creation
1687 * of the pool; B_FALSE otherwise. 'path' is the pathanme containing the
1688 * poolname. 'argc' and 'argv' are used to construct the command string.
1691 zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path,
1692 boolean_t pool, boolean_t pool_create)
1694 char cmd_buf[HIS_MAX_RECORD_LEN];
1696 zfs_cmd_t zc = { 0 };
1699 /* construct the command string */
1700 (void) strcpy(cmd_buf, pool ? "zpool" : "zfs");
1701 for (i = 0; i < argc; i++) {
1702 if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
1704 (void) strcat(cmd_buf, " ");
1705 (void) strcat(cmd_buf, argv[i]);
1708 /* figure out the poolname */
1709 dspath = strpbrk(path, "/@");
1710 if (dspath == NULL) {
1711 (void) strcpy(zc.zc_name, path);
1713 (void) strncpy(zc.zc_name, path, dspath - path);
1714 zc.zc_name[dspath-path] = '\0';
1717 zc.zc_history = (uint64_t)(uintptr_t)cmd_buf;
1718 zc.zc_history_len = strlen(cmd_buf);
1720 /* overloading zc_history_offset */
1721 zc.zc_history_offset = pool_create;
1723 (void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc);
1727 * Perform ioctl to get some command history of a pool.
1729 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
1730 * logical offset of the history buffer to start reading from.
1732 * Upon return, 'off' is the next logical offset to read from and
1733 * 'len' is the actual amount of bytes read into 'buf'.
1736 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
1738 zfs_cmd_t zc = { 0 };
1739 libzfs_handle_t *hdl = zhp->zpool_hdl;
1741 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1743 zc.zc_history = (uint64_t)(uintptr_t)buf;
1744 zc.zc_history_len = *len;
1745 zc.zc_history_offset = *off;
1747 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
1750 return (zfs_error_fmt(hdl, EZFS_PERM,
1751 dgettext(TEXT_DOMAIN,
1752 "cannot show history for pool '%s'"),
1755 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
1756 dgettext(TEXT_DOMAIN, "cannot get history for pool "
1757 "'%s'"), zhp->zpool_name));
1759 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
1760 dgettext(TEXT_DOMAIN, "cannot get history for pool "
1761 "'%s', pool must be upgraded"), zhp->zpool_name));
1763 return (zpool_standard_error_fmt(hdl, errno,
1764 dgettext(TEXT_DOMAIN,
1765 "cannot get history for '%s'"), zhp->zpool_name));
1769 *len = zc.zc_history_len;
1770 *off = zc.zc_history_offset;
1776 * Process the buffer of nvlists, unpacking and storing each nvlist record
1777 * into 'records'. 'leftover' is set to the number of bytes that weren't
1778 * processed as there wasn't a complete record.
1781 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
1782 nvlist_t ***records, uint_t *numrecords)
1788 while (bytes_read > sizeof (reclen)) {
1790 /* get length of packed record (stored as little endian) */
1791 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
1792 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
1794 if (bytes_read < sizeof (reclen) + reclen)
1798 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
1800 bytes_read -= sizeof (reclen) + reclen;
1801 buf += sizeof (reclen) + reclen;
1803 /* add record to nvlist array */
1805 if (ISP2(*numrecords + 1)) {
1806 *records = realloc(*records,
1807 *numrecords * 2 * sizeof (nvlist_t *));
1809 (*records)[*numrecords - 1] = nv;
1812 *leftover = bytes_read;
1816 #define HIS_BUF_LEN (128*1024)
1819 * Retrieve the command history of a pool.
1822 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
1824 char buf[HIS_BUF_LEN];
1826 nvlist_t **records = NULL;
1827 uint_t numrecords = 0;
1831 uint64_t bytes_read = sizeof (buf);
1834 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
1837 /* if nothing else was read in, we're at EOF, just return */
1841 if ((err = zpool_history_unpack(buf, bytes_read,
1842 &leftover, &records, &numrecords)) != 0)
1850 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
1851 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
1852 records, numrecords) == 0);
1854 for (i = 0; i < numrecords; i++)
1855 nvlist_free(records[i]);
1862 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
1863 char *pathname, size_t len)
1865 zfs_cmd_t zc = { 0 };
1866 boolean_t mounted = B_FALSE;
1867 char *mntpnt = NULL;
1868 char dsname[MAXNAMELEN];
1871 /* special case for the MOS */
1872 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
1876 /* get the dataset's name */
1877 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1879 if (ioctl(zhp->zpool_hdl->libzfs_fd,
1880 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
1881 /* just write out a path of two object numbers */
1882 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
1886 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
1888 /* find out if the dataset is mounted */
1889 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
1891 /* get the corrupted object's path */
1892 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
1894 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
1897 (void) snprintf(pathname, len, "%s%s", mntpnt,
1900 (void) snprintf(pathname, len, "%s:%s",
1901 dsname, zc.zc_value);
1904 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
1910 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
1912 zfs_cmd_t zc = { 0 };
1915 nvlist_t *nvl = NULL;
1916 nvlist_t *realprops;
1918 (void) snprintf(errbuf, sizeof (errbuf),
1919 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
1922 if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
1923 zfs_error_aux(zhp->zpool_hdl,
1924 dgettext(TEXT_DOMAIN, "pool must be "
1925 "upgraded to support pool properties"));
1926 return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
1929 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
1930 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
1932 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
1933 nvlist_add_string(nvl, propname, propval) != 0) {
1934 return (no_memory(zhp->zpool_hdl));
1937 if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
1938 zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
1947 * Execute the corresponding ioctl() to set this property.
1949 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1951 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
1954 ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc);
1955 zcmd_free_nvlists(&zc);
1958 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
1964 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
1965 size_t proplen, zfs_source_t *srctype)
1968 char msg[1024], *strvalue;
1970 zfs_source_t src = ZFS_SRC_NONE;
1972 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1973 "cannot get property '%s'"), zpool_prop_to_name(prop));
1975 if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
1976 zfs_error_aux(zhp->zpool_hdl,
1977 dgettext(TEXT_DOMAIN, "pool must be "
1978 "upgraded to support pool properties"));
1979 return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
1982 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
1983 return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
1986 * the "name" property is special cased
1988 if (!zfs_prop_valid_for_type(prop, ZFS_TYPE_POOL) &&
1989 prop != ZFS_PROP_NAME)
1994 (void) strlcpy(propbuf, zhp->zpool_name, proplen);
1997 case ZFS_PROP_BOOTFS:
1998 if (nvlist_lookup_nvlist(zhp->zpool_props,
1999 zpool_prop_to_name(prop), &nvp) != 0) {
2000 strvalue = (char *)zfs_prop_default_string(prop);
2001 if (strvalue == NULL)
2003 src = ZFS_SRC_DEFAULT;
2005 VERIFY(nvlist_lookup_uint64(nvp,
2006 ZFS_PROP_SOURCE, &value) == 0);
2008 VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2010 if (strlen(strvalue) >= proplen)
2013 (void) strcpy(propbuf, strvalue);
2025 zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2027 return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2032 zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2034 libzfs_handle_t *hdl = zhp->zpool_hdl;
2035 zpool_proplist_t *entry;
2036 char buf[ZFS_MAXPROPLEN];
2038 if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2041 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2043 if (entry->pl_fixed)
2046 if (entry->pl_prop != ZFS_PROP_INVAL &&
2047 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2049 if (strlen(buf) > entry->pl_width)
2050 entry->pl_width = strlen(buf);