4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2013 by Delphix. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29 #include <sys/types.h>
41 #include <sys/zfs_ioctl.h>
44 #include "zfs_namecheck.h"
46 #include "libzfs_impl.h"
47 #include "zfs_comutil.h"
48 #include "zfeature_common.h"
50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52 #define DISK_ROOT "/dev/dsk"
53 #define RDISK_ROOT "/dev/rdsk"
54 #define BACKUP_SLICE "s2"
56 typedef struct prop_flags {
57 int create:1; /* Validate property on creation */
58 int import:1; /* Validate property on import */
62 * ====================================================================
63 * zpool property functions
64 * ====================================================================
68 zpool_get_all_props(zpool_handle_t *zhp)
71 libzfs_handle_t *hdl = zhp->zpool_hdl;
73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
79 if (errno == ENOMEM) {
80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
81 zcmd_free_nvlists(&zc);
85 zcmd_free_nvlists(&zc);
90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
91 zcmd_free_nvlists(&zc);
95 zcmd_free_nvlists(&zc);
101 zpool_props_refresh(zpool_handle_t *zhp)
105 old_props = zhp->zpool_props;
107 if (zpool_get_all_props(zhp) != 0)
110 nvlist_free(old_props);
115 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
121 zprop_source_t source;
123 nvl = zhp->zpool_props;
124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
129 source = ZPROP_SRC_DEFAULT;
130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
141 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
145 zprop_source_t source;
147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
149 * zpool_get_all_props() has most likely failed because
150 * the pool is faulted, but if all we need is the top level
151 * vdev's guid then get it from the zhp config nvlist.
153 if ((prop == ZPOOL_PROP_GUID) &&
154 (nvlist_lookup_nvlist(zhp->zpool_config,
155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
160 return (zpool_prop_default_numeric(prop));
163 nvl = zhp->zpool_props;
164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
169 source = ZPROP_SRC_DEFAULT;
170 value = zpool_prop_default_numeric(prop);
180 * Map VDEV STATE to printed strings.
183 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
186 case VDEV_STATE_CLOSED:
187 case VDEV_STATE_OFFLINE:
188 return (gettext("OFFLINE"));
189 case VDEV_STATE_REMOVED:
190 return (gettext("REMOVED"));
191 case VDEV_STATE_CANT_OPEN:
192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
193 return (gettext("FAULTED"));
194 else if (aux == VDEV_AUX_SPLIT_POOL)
195 return (gettext("SPLIT"));
197 return (gettext("UNAVAIL"));
198 case VDEV_STATE_FAULTED:
199 return (gettext("FAULTED"));
200 case VDEV_STATE_DEGRADED:
201 return (gettext("DEGRADED"));
202 case VDEV_STATE_HEALTHY:
203 return (gettext("ONLINE"));
206 return (gettext("UNKNOWN"));
210 * Map POOL STATE to printed strings.
213 zpool_pool_state_to_name(pool_state_t state)
216 case POOL_STATE_ACTIVE:
217 return (gettext("ACTIVE"));
218 case POOL_STATE_EXPORTED:
219 return (gettext("EXPORTED"));
220 case POOL_STATE_DESTROYED:
221 return (gettext("DESTROYED"));
222 case POOL_STATE_SPARE:
223 return (gettext("SPARE"));
224 case POOL_STATE_L2CACHE:
225 return (gettext("L2CACHE"));
226 case POOL_STATE_UNINITIALIZED:
227 return (gettext("UNINITIALIZED"));
228 case POOL_STATE_UNAVAIL:
229 return (gettext("UNAVAIL"));
230 case POOL_STATE_POTENTIALLY_ACTIVE:
231 return (gettext("POTENTIALLY_ACTIVE"));
234 return (gettext("UNKNOWN"));
238 * Get a zpool property value for 'prop' and return the value in
239 * a pre-allocated buffer.
242 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
243 zprop_source_t *srctype)
247 zprop_source_t src = ZPROP_SRC_NONE;
252 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
254 case ZPOOL_PROP_NAME:
255 (void) strlcpy(buf, zpool_get_name(zhp), len);
258 case ZPOOL_PROP_HEALTH:
259 (void) strlcpy(buf, "FAULTED", len);
262 case ZPOOL_PROP_GUID:
263 intval = zpool_get_prop_int(zhp, prop, &src);
264 (void) snprintf(buf, len, "%llu", intval);
267 case ZPOOL_PROP_ALTROOT:
268 case ZPOOL_PROP_CACHEFILE:
269 case ZPOOL_PROP_COMMENT:
270 if (zhp->zpool_props != NULL ||
271 zpool_get_all_props(zhp) == 0) {
273 zpool_get_prop_string(zhp, prop, &src),
281 (void) strlcpy(buf, "-", len);
290 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
291 prop != ZPOOL_PROP_NAME)
294 switch (zpool_prop_get_type(prop)) {
295 case PROP_TYPE_STRING:
296 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
300 case PROP_TYPE_NUMBER:
301 intval = zpool_get_prop_int(zhp, prop, &src);
304 case ZPOOL_PROP_SIZE:
305 case ZPOOL_PROP_ALLOCATED:
306 case ZPOOL_PROP_FREE:
307 case ZPOOL_PROP_FREEING:
308 case ZPOOL_PROP_EXPANDSZ:
309 (void) zfs_nicenum(intval, buf, len);
312 case ZPOOL_PROP_CAPACITY:
313 (void) snprintf(buf, len, "%llu%%",
314 (u_longlong_t)intval);
317 case ZPOOL_PROP_DEDUPRATIO:
318 (void) snprintf(buf, len, "%llu.%02llux",
319 (u_longlong_t)(intval / 100),
320 (u_longlong_t)(intval % 100));
323 case ZPOOL_PROP_HEALTH:
324 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
325 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
326 verify(nvlist_lookup_uint64_array(nvroot,
327 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
330 (void) strlcpy(buf, zpool_state_to_name(intval,
333 case ZPOOL_PROP_VERSION:
334 if (intval >= SPA_VERSION_FEATURES) {
335 (void) snprintf(buf, len, "-");
340 (void) snprintf(buf, len, "%llu", intval);
344 case PROP_TYPE_INDEX:
345 intval = zpool_get_prop_int(zhp, prop, &src);
346 if (zpool_prop_index_to_string(prop, intval, &strval)
349 (void) strlcpy(buf, strval, len);
363 * Check if the bootfs name has the same pool name as it is set to.
364 * Assuming bootfs is a valid dataset name.
367 bootfs_name_valid(const char *pool, char *bootfs)
369 int len = strlen(pool);
371 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
374 if (strncmp(pool, bootfs, len) == 0 &&
375 (bootfs[len] == '/' || bootfs[len] == '\0'))
382 * Inspect the configuration to determine if any of the devices contain
386 pool_uses_efi(nvlist_t *config)
392 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
393 &child, &children) != 0)
394 return (read_efi_label(config, NULL) >= 0);
396 for (c = 0; c < children; c++) {
397 if (pool_uses_efi(child[c]))
405 zpool_is_bootable(zpool_handle_t *zhp)
407 char bootfs[ZPOOL_MAXNAMELEN];
409 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
410 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
411 sizeof (bootfs)) != 0);
416 * Given an nvlist of zpool properties to be set, validate that they are
417 * correct, and parse any numeric properties (index, boolean, etc) if they are
418 * specified as strings.
421 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
422 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
430 struct stat64 statbuf;
434 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
435 (void) no_memory(hdl);
440 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
441 const char *propname = nvpair_name(elem);
443 prop = zpool_name_to_prop(propname);
444 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
446 char *fname = strchr(propname, '@') + 1;
448 err = zfeature_lookup_name(fname, NULL);
450 ASSERT3U(err, ==, ENOENT);
451 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
452 "invalid feature '%s'"), fname);
453 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
457 if (nvpair_type(elem) != DATA_TYPE_STRING) {
458 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
459 "'%s' must be a string"), propname);
460 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
464 (void) nvpair_value_string(elem, &strval);
465 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
466 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
467 "property '%s' can only be set to "
468 "'enabled'"), propname);
469 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
473 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
474 (void) no_memory(hdl);
481 * Make sure this property is valid and applies to this type.
483 if (prop == ZPROP_INVAL) {
484 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
485 "invalid property '%s'"), propname);
486 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
490 if (zpool_prop_readonly(prop)) {
491 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
492 "is readonly"), propname);
493 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
497 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
498 &strval, &intval, errbuf) != 0)
502 * Perform additional checking for specific properties.
505 case ZPOOL_PROP_VERSION:
506 if (intval < version ||
507 !SPA_VERSION_IS_SUPPORTED(intval)) {
508 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
509 "property '%s' number %d is invalid."),
511 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
516 case ZPOOL_PROP_BOOTFS:
517 if (flags.create || flags.import) {
518 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
519 "property '%s' cannot be set at creation "
520 "or import time"), propname);
521 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
525 if (version < SPA_VERSION_BOOTFS) {
526 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
527 "pool must be upgraded to support "
528 "'%s' property"), propname);
529 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
534 * bootfs property value has to be a dataset name and
535 * the dataset has to be in the same pool as it sets to.
537 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
539 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
540 "is an invalid name"), strval);
541 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
545 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
547 "could not open pool '%s'"), poolname);
548 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
551 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
552 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
556 * bootfs property cannot be set on a disk which has
559 if (pool_uses_efi(nvroot)) {
560 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
561 "property '%s' not supported on "
562 "EFI labeled devices"), propname);
563 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
571 case ZPOOL_PROP_ALTROOT:
572 if (!flags.create && !flags.import) {
573 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
574 "property '%s' can only be set during pool "
575 "creation or import"), propname);
576 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
580 if (strval[0] != '/') {
581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
582 "bad alternate root '%s'"), strval);
583 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
588 case ZPOOL_PROP_CACHEFILE:
589 if (strval[0] == '\0')
592 if (strcmp(strval, "none") == 0)
595 if (strval[0] != '/') {
596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
597 "property '%s' must be empty, an "
598 "absolute path, or 'none'"), propname);
599 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
603 slash = strrchr(strval, '/');
605 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
606 strcmp(slash, "/..") == 0) {
607 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
608 "'%s' is not a valid file"), strval);
609 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
615 if (strval[0] != '\0' &&
616 (stat64(strval, &statbuf) != 0 ||
617 !S_ISDIR(statbuf.st_mode))) {
618 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
619 "'%s' is not a valid directory"),
621 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
628 case ZPOOL_PROP_COMMENT:
629 for (check = strval; *check != '\0'; check++) {
630 if (!isprint(*check)) {
632 dgettext(TEXT_DOMAIN,
633 "comment may only have printable "
635 (void) zfs_error(hdl, EZFS_BADPROP,
640 if (strlen(strval) > ZPROP_MAX_COMMENT) {
641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
642 "comment must not exceed %d characters"),
644 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
648 case ZPOOL_PROP_READONLY:
650 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
651 "property '%s' can only be set at "
652 "import time"), propname);
653 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
662 nvlist_free(retprops);
667 * Set zpool property : propname=propval.
670 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
672 zfs_cmd_t zc = { 0 };
675 nvlist_t *nvl = NULL;
678 prop_flags_t flags = { 0 };
680 (void) snprintf(errbuf, sizeof (errbuf),
681 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
684 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
685 return (no_memory(zhp->zpool_hdl));
687 if (nvlist_add_string(nvl, propname, propval) != 0) {
689 return (no_memory(zhp->zpool_hdl));
692 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
693 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
694 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
703 * Execute the corresponding ioctl() to set this property.
705 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
707 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
712 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
714 zcmd_free_nvlists(&zc);
718 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
720 (void) zpool_props_refresh(zhp);
726 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
728 libzfs_handle_t *hdl = zhp->zpool_hdl;
730 char buf[ZFS_MAXPROPLEN];
731 nvlist_t *features = NULL;
733 boolean_t firstexpand = (NULL == *plp);
735 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
739 while (*last != NULL)
740 last = &(*last)->pl_next;
743 features = zpool_get_features(zhp);
745 if ((*plp)->pl_all && firstexpand) {
746 for (int i = 0; i < SPA_FEATURES; i++) {
747 zprop_list_t *entry = zfs_alloc(hdl,
748 sizeof (zprop_list_t));
749 entry->pl_prop = ZPROP_INVAL;
750 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
751 spa_feature_table[i].fi_uname);
752 entry->pl_width = strlen(entry->pl_user_prop);
753 entry->pl_all = B_TRUE;
756 last = &entry->pl_next;
760 /* add any unsupported features */
761 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
762 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
767 if (zfeature_is_supported(nvpair_name(nvp)))
770 propname = zfs_asprintf(hdl, "unsupported@%s",
774 * Before adding the property to the list make sure that no
775 * other pool already added the same property.
779 while (entry != NULL) {
780 if (entry->pl_user_prop != NULL &&
781 strcmp(propname, entry->pl_user_prop) == 0) {
785 entry = entry->pl_next;
792 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
793 entry->pl_prop = ZPROP_INVAL;
794 entry->pl_user_prop = propname;
795 entry->pl_width = strlen(entry->pl_user_prop);
796 entry->pl_all = B_TRUE;
799 last = &entry->pl_next;
802 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
807 if (entry->pl_prop != ZPROP_INVAL &&
808 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
810 if (strlen(buf) > entry->pl_width)
811 entry->pl_width = strlen(buf);
819 * Get the state for the given feature on the given ZFS pool.
822 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
826 boolean_t found = B_FALSE;
827 nvlist_t *features = zpool_get_features(zhp);
829 const char *feature = strchr(propname, '@') + 1;
831 supported = zpool_prop_feature(propname);
832 ASSERT(supported || zpool_prop_unsupported(propname));
835 * Convert from feature name to feature guid. This conversion is
836 * unecessary for unsupported@... properties because they already
843 ret = zfeature_lookup_name(feature, &fid);
845 (void) strlcpy(buf, "-", len);
848 feature = spa_feature_table[fid].fi_guid;
851 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
856 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
859 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
861 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
866 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
868 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
871 (void) strlcpy(buf, "-", len);
880 * Don't start the slice at the default block of 34; many storage
881 * devices will use a stripe width of 128k, so start there instead.
883 #define NEW_START_BLOCK 256
886 * Validate the given pool name, optionally putting an extended error message in
890 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
896 ret = pool_namecheck(pool, &why, &what);
899 * The rules for reserved pool names were extended at a later point.
900 * But we need to support users with existing pools that may now be
901 * invalid. So we only check for this expanded set of names during a
902 * create (or import), and only in userland.
904 if (ret == 0 && !isopen &&
905 (strncmp(pool, "mirror", 6) == 0 ||
906 strncmp(pool, "raidz", 5) == 0 ||
907 strncmp(pool, "spare", 5) == 0 ||
908 strcmp(pool, "log") == 0)) {
911 dgettext(TEXT_DOMAIN, "name is reserved"));
919 case NAME_ERR_TOOLONG:
921 dgettext(TEXT_DOMAIN, "name is too long"));
924 case NAME_ERR_INVALCHAR:
926 dgettext(TEXT_DOMAIN, "invalid character "
927 "'%c' in pool name"), what);
930 case NAME_ERR_NOLETTER:
931 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
932 "name must begin with a letter"));
935 case NAME_ERR_RESERVED:
936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
937 "name is reserved"));
940 case NAME_ERR_DISKLIKE:
941 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
942 "pool name is reserved"));
945 case NAME_ERR_LEADING_SLASH:
946 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
947 "leading slash in name"));
950 case NAME_ERR_EMPTY_COMPONENT:
951 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
952 "empty component in name"));
955 case NAME_ERR_TRAILING_SLASH:
956 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
957 "trailing slash in name"));
960 case NAME_ERR_MULTIPLE_AT:
961 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
962 "multiple '@' delimiters in name"));
974 * Open a handle to the given pool, even if the pool is currently in the FAULTED
978 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
984 * Make sure the pool name is valid.
986 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
987 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
988 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
993 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
996 zhp->zpool_hdl = hdl;
997 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
999 if (zpool_refresh_stats(zhp, &missing) != 0) {
1005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1006 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1007 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1016 * Like the above, but silent on error. Used when iterating over pools (because
1017 * the configuration cache may be out of date).
1020 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1022 zpool_handle_t *zhp;
1025 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1028 zhp->zpool_hdl = hdl;
1029 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1031 if (zpool_refresh_stats(zhp, &missing) != 0) {
1047 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1051 zpool_open(libzfs_handle_t *hdl, const char *pool)
1053 zpool_handle_t *zhp;
1055 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1058 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1059 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1060 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1069 * Close the handle. Simply frees the memory associated with the handle.
1072 zpool_close(zpool_handle_t *zhp)
1074 if (zhp->zpool_config)
1075 nvlist_free(zhp->zpool_config);
1076 if (zhp->zpool_old_config)
1077 nvlist_free(zhp->zpool_old_config);
1078 if (zhp->zpool_props)
1079 nvlist_free(zhp->zpool_props);
1084 * Return the name of the pool.
1087 zpool_get_name(zpool_handle_t *zhp)
1089 return (zhp->zpool_name);
1094 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1097 zpool_get_state(zpool_handle_t *zhp)
1099 return (zhp->zpool_state);
1103 * Create the named pool, using the provided vdev list. It is assumed
1104 * that the consumer has already validated the contents of the nvlist, so we
1105 * don't have to worry about error semantics.
1108 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1109 nvlist_t *props, nvlist_t *fsprops)
1111 zfs_cmd_t zc = { 0 };
1112 nvlist_t *zc_fsprops = NULL;
1113 nvlist_t *zc_props = NULL;
1117 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1118 "cannot create '%s'"), pool);
1120 if (!zpool_name_valid(hdl, B_FALSE, pool))
1121 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1123 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1127 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1129 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1130 SPA_VERSION_1, flags, msg)) == NULL) {
1139 zoned = ((nvlist_lookup_string(fsprops,
1140 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1141 strcmp(zonestr, "on") == 0);
1143 if ((zc_fsprops = zfs_valid_proplist(hdl,
1144 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1148 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1151 if (nvlist_add_nvlist(zc_props,
1152 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1157 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1160 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1162 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1164 zcmd_free_nvlists(&zc);
1165 nvlist_free(zc_props);
1166 nvlist_free(zc_fsprops);
1171 * This can happen if the user has specified the same
1172 * device multiple times. We can't reliably detect this
1173 * until we try to add it and see we already have a
1176 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1177 "one or more vdevs refer to the same device"));
1178 return (zfs_error(hdl, EZFS_BADDEV, msg));
1182 * This occurs when one of the devices is below
1183 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1184 * device was the problem device since there's no
1185 * reliable way to determine device size from userland.
1190 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1192 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1193 "one or more devices is less than the "
1194 "minimum size (%s)"), buf);
1196 return (zfs_error(hdl, EZFS_BADDEV, msg));
1199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1200 "one or more devices is out of space"));
1201 return (zfs_error(hdl, EZFS_BADDEV, msg));
1204 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1205 "cache device must be a disk or disk slice"));
1206 return (zfs_error(hdl, EZFS_BADDEV, msg));
1209 return (zpool_standard_error(hdl, errno, msg));
1214 zcmd_free_nvlists(&zc);
1215 nvlist_free(zc_props);
1216 nvlist_free(zc_fsprops);
1221 * Destroy the given pool. It is up to the caller to ensure that there are no
1222 * datasets left in the pool.
1225 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1227 zfs_cmd_t zc = { 0 };
1228 zfs_handle_t *zfp = NULL;
1229 libzfs_handle_t *hdl = zhp->zpool_hdl;
1232 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1233 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1236 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1237 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1239 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1240 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1241 "cannot destroy '%s'"), zhp->zpool_name);
1243 if (errno == EROFS) {
1244 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1245 "one or more devices is read only"));
1246 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1248 (void) zpool_standard_error(hdl, errno, msg);
1257 remove_mountpoint(zfp);
1265 * Add the given vdevs to the pool. The caller must have already performed the
1266 * necessary verification to ensure that the vdev specification is well-formed.
1269 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1271 zfs_cmd_t zc = { 0 };
1273 libzfs_handle_t *hdl = zhp->zpool_hdl;
1275 nvlist_t **spares, **l2cache;
1276 uint_t nspares, nl2cache;
1278 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1279 "cannot add to '%s'"), zhp->zpool_name);
1281 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1282 SPA_VERSION_SPARES &&
1283 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1284 &spares, &nspares) == 0) {
1285 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1286 "upgraded to add hot spares"));
1287 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1290 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1291 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1294 for (s = 0; s < nspares; s++) {
1297 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1298 &path) == 0 && pool_uses_efi(spares[s])) {
1299 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1300 "device '%s' contains an EFI label and "
1301 "cannot be used on root pools."),
1302 zpool_vdev_name(hdl, NULL, spares[s],
1304 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1309 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1310 SPA_VERSION_L2CACHE &&
1311 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1312 &l2cache, &nl2cache) == 0) {
1313 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1314 "upgraded to add cache devices"));
1315 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1318 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1320 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1322 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1326 * This can happen if the user has specified the same
1327 * device multiple times. We can't reliably detect this
1328 * until we try to add it and see we already have a
1331 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1332 "one or more vdevs refer to the same device"));
1333 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1338 * This occurrs when one of the devices is below
1339 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1340 * device was the problem device since there's no
1341 * reliable way to determine device size from userland.
1346 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1349 "device is less than the minimum "
1352 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1356 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1357 "pool must be upgraded to add these vdevs"));
1358 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1362 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1363 "root pool can not have multiple vdevs"
1364 " or separate logs"));
1365 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1369 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1370 "cache device must be a disk or disk slice"));
1371 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1375 (void) zpool_standard_error(hdl, errno, msg);
1383 zcmd_free_nvlists(&zc);
1389 * Exports the pool from the system. The caller must ensure that there are no
1390 * mounted datasets in the pool.
1393 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1394 const char *log_str)
1396 zfs_cmd_t zc = { 0 };
1399 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1400 "cannot export '%s'"), zhp->zpool_name);
1402 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1403 zc.zc_cookie = force;
1404 zc.zc_guid = hardforce;
1405 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1407 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1410 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1411 "use '-f' to override the following errors:\n"
1412 "'%s' has an active shared spare which could be"
1413 " used by other pools once '%s' is exported."),
1414 zhp->zpool_name, zhp->zpool_name);
1415 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1418 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1427 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1429 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1433 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1435 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1439 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1442 nvlist_t *nv = NULL;
1448 if (!hdl->libzfs_printerr || config == NULL)
1451 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1452 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1456 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1458 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1460 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1461 strftime(timestr, 128, 0, &t) != 0) {
1463 (void) printf(dgettext(TEXT_DOMAIN,
1464 "Would be able to return %s "
1465 "to its state as of %s.\n"),
1468 (void) printf(dgettext(TEXT_DOMAIN,
1469 "Pool %s returned to its state as of %s.\n"),
1473 (void) printf(dgettext(TEXT_DOMAIN,
1474 "%s approximately %lld "),
1475 dryrun ? "Would discard" : "Discarded",
1477 (void) printf(dgettext(TEXT_DOMAIN,
1478 "minutes of transactions.\n"));
1479 } else if (loss > 0) {
1480 (void) printf(dgettext(TEXT_DOMAIN,
1481 "%s approximately %lld "),
1482 dryrun ? "Would discard" : "Discarded", loss);
1483 (void) printf(dgettext(TEXT_DOMAIN,
1484 "seconds of transactions.\n"));
1490 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1493 nvlist_t *nv = NULL;
1495 uint64_t edata = UINT64_MAX;
1500 if (!hdl->libzfs_printerr)
1504 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1506 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1508 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1509 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1510 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1511 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1514 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1515 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1518 (void) printf(dgettext(TEXT_DOMAIN,
1519 "Recovery is possible, but will result in some data loss.\n"));
1521 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1522 strftime(timestr, 128, 0, &t) != 0) {
1523 (void) printf(dgettext(TEXT_DOMAIN,
1524 "\tReturning the pool to its state as of %s\n"
1525 "\tshould correct the problem. "),
1528 (void) printf(dgettext(TEXT_DOMAIN,
1529 "\tReverting the pool to an earlier state "
1530 "should correct the problem.\n\t"));
1534 (void) printf(dgettext(TEXT_DOMAIN,
1535 "Approximately %lld minutes of data\n"
1536 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1537 } else if (loss > 0) {
1538 (void) printf(dgettext(TEXT_DOMAIN,
1539 "Approximately %lld seconds of data\n"
1540 "\tmust be discarded, irreversibly. "), loss);
1542 if (edata != 0 && edata != UINT64_MAX) {
1544 (void) printf(dgettext(TEXT_DOMAIN,
1545 "After rewind, at least\n"
1546 "\tone persistent user-data error will remain. "));
1548 (void) printf(dgettext(TEXT_DOMAIN,
1549 "After rewind, several\n"
1550 "\tpersistent user-data errors will remain. "));
1553 (void) printf(dgettext(TEXT_DOMAIN,
1554 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1555 reason >= 0 ? "clear" : "import", name);
1557 (void) printf(dgettext(TEXT_DOMAIN,
1558 "A scrub of the pool\n"
1559 "\tis strongly recommended after recovery.\n"));
1563 (void) printf(dgettext(TEXT_DOMAIN,
1564 "Destroy and re-create the pool from\n\ta backup source.\n"));
1568 * zpool_import() is a contracted interface. Should be kept the same
1571 * Applications should use zpool_import_props() to import a pool with
1572 * new properties value to be set.
1575 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1578 nvlist_t *props = NULL;
1581 if (altroot != NULL) {
1582 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1583 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1584 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1588 if (nvlist_add_string(props,
1589 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1590 nvlist_add_string(props,
1591 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1593 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1594 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1599 ret = zpool_import_props(hdl, config, newname, props,
1607 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1613 uint64_t is_log = 0;
1615 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1619 (void) printf("\t%*s%s%s\n", indent, "", name,
1620 is_log ? " [log]" : "");
1622 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1623 &child, &children) != 0)
1626 for (c = 0; c < children; c++) {
1627 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1628 print_vdev_tree(hdl, vname, child[c], indent + 2);
1634 zpool_print_unsup_feat(nvlist_t *config)
1636 nvlist_t *nvinfo, *unsup_feat;
1638 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1640 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1643 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1644 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1647 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1648 verify(nvpair_value_string(nvp, &desc) == 0);
1650 if (strlen(desc) > 0)
1651 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1653 (void) printf("\t%s\n", nvpair_name(nvp));
1658 * Import the given pool using the known configuration and a list of
1659 * properties to be set. The configuration should have come from
1660 * zpool_find_import(). The 'newname' parameters control whether the pool
1661 * is imported with a different name.
1664 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1665 nvlist_t *props, int flags)
1667 zfs_cmd_t zc = { 0 };
1668 zpool_rewind_policy_t policy;
1669 nvlist_t *nv = NULL;
1670 nvlist_t *nvinfo = NULL;
1671 nvlist_t *missing = NULL;
1678 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1681 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1682 "cannot import pool '%s'"), origname);
1684 if (newname != NULL) {
1685 if (!zpool_name_valid(hdl, B_FALSE, newname))
1686 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1687 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1689 thename = (char *)newname;
1696 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1698 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1701 if ((props = zpool_valid_proplist(hdl, origname,
1702 props, version, flags, errbuf)) == NULL) {
1704 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1710 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1712 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1715 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1719 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1724 zc.zc_cookie = flags;
1725 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1727 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1728 zcmd_free_nvlists(&zc);
1735 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1736 zpool_get_rewind_policy(config, &policy);
1742 * Dry-run failed, but we print out what success
1743 * looks like if we found a best txg
1745 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1746 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1752 if (newname == NULL)
1753 (void) snprintf(desc, sizeof (desc),
1754 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1757 (void) snprintf(desc, sizeof (desc),
1758 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1763 if (nv != NULL && nvlist_lookup_nvlist(nv,
1764 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1765 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1766 (void) printf(dgettext(TEXT_DOMAIN, "This "
1767 "pool uses the following feature(s) not "
1768 "supported by this system:\n"));
1769 zpool_print_unsup_feat(nv);
1770 if (nvlist_exists(nvinfo,
1771 ZPOOL_CONFIG_CAN_RDONLY)) {
1772 (void) printf(dgettext(TEXT_DOMAIN,
1773 "All unsupported features are only "
1774 "required for writing to the pool."
1775 "\nThe pool can be imported using "
1776 "'-o readonly=on'.\n"));
1780 * Unsupported version.
1782 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1786 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1790 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1791 "one or more devices is read only"));
1792 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1796 if (nv && nvlist_lookup_nvlist(nv,
1797 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1798 nvlist_lookup_nvlist(nvinfo,
1799 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1800 (void) printf(dgettext(TEXT_DOMAIN,
1801 "The devices below are missing, use "
1802 "'-m' to import the pool anyway:\n"));
1803 print_vdev_tree(hdl, NULL, missing, 2);
1804 (void) printf("\n");
1806 (void) zpool_standard_error(hdl, error, desc);
1810 (void) zpool_standard_error(hdl, error, desc);
1814 (void) zpool_standard_error(hdl, error, desc);
1815 zpool_explain_recover(hdl,
1816 newname ? origname : thename, -error, nv);
1823 zpool_handle_t *zhp;
1826 * This should never fail, but play it safe anyway.
1828 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1830 else if (zhp != NULL)
1832 if (policy.zrp_request &
1833 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1834 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1835 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1841 zcmd_free_nvlists(&zc);
1851 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1853 zfs_cmd_t zc = { 0 };
1855 libzfs_handle_t *hdl = zhp->zpool_hdl;
1857 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1858 zc.zc_cookie = func;
1860 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1861 (errno == ENOENT && func != POOL_SCAN_NONE))
1864 if (func == POOL_SCAN_SCRUB) {
1865 (void) snprintf(msg, sizeof (msg),
1866 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1867 } else if (func == POOL_SCAN_NONE) {
1868 (void) snprintf(msg, sizeof (msg),
1869 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1872 assert(!"unexpected result");
1875 if (errno == EBUSY) {
1877 pool_scan_stat_t *ps = NULL;
1880 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1881 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1882 (void) nvlist_lookup_uint64_array(nvroot,
1883 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1884 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1885 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1887 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1888 } else if (errno == ENOENT) {
1889 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1891 return (zpool_standard_error(hdl, errno, msg));
1896 * This provides a very minimal check whether a given string is likely a
1897 * c#t#d# style string. Users of this are expected to do their own
1898 * verification of the s# part.
1900 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1903 * More elaborate version for ones which may start with "/dev/dsk/"
1907 ctd_check_path(char *str) {
1909 * If it starts with a slash, check the last component.
1911 if (str && str[0] == '/') {
1912 char *tmp = strrchr(str, '/');
1915 * If it ends in "/old", check the second-to-last
1916 * component of the string instead.
1918 if (tmp != str && strcmp(tmp, "/old") == 0) {
1919 for (tmp--; *tmp != '/'; tmp--)
1924 return (CTD_CHECK(str));
1928 * Find a vdev that matches the search criteria specified. We use the
1929 * the nvpair name to determine how we should look for the device.
1930 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1931 * spare; but FALSE if its an INUSE spare.
1934 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1935 boolean_t *l2cache, boolean_t *log)
1942 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1944 /* Nothing to look for */
1945 if (search == NULL || pair == NULL)
1948 /* Obtain the key we will use to search */
1949 srchkey = nvpair_name(pair);
1951 switch (nvpair_type(pair)) {
1952 case DATA_TYPE_UINT64:
1953 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1954 uint64_t srchval, theguid;
1956 verify(nvpair_value_uint64(pair, &srchval) == 0);
1957 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1959 if (theguid == srchval)
1964 case DATA_TYPE_STRING: {
1965 char *srchval, *val;
1967 verify(nvpair_value_string(pair, &srchval) == 0);
1968 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1972 * Search for the requested value. Special cases:
1974 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1975 * "s0" or "s0/old". The "s0" part is hidden from the user,
1976 * but included in the string, so this matches around it.
1977 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1979 * Otherwise, all other searches are simple string compares.
1981 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1982 ctd_check_path(val)) {
1983 uint64_t wholedisk = 0;
1985 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1988 int slen = strlen(srchval);
1989 int vlen = strlen(val);
1991 if (slen != vlen - 2)
1995 * make_leaf_vdev() should only set
1996 * wholedisk for ZPOOL_CONFIG_PATHs which
1997 * will include "/dev/dsk/", giving plenty of
1998 * room for the indices used next.
2003 * strings identical except trailing "s0"
2005 if (strcmp(&val[vlen - 2], "s0") == 0 &&
2006 strncmp(srchval, val, slen) == 0)
2010 * strings identical except trailing "s0/old"
2012 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
2013 strcmp(&srchval[slen - 4], "/old") == 0 &&
2014 strncmp(srchval, val, slen - 4) == 0)
2019 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2020 char *type, *idx, *end, *p;
2021 uint64_t id, vdev_id;
2024 * Determine our vdev type, keeping in mind
2025 * that the srchval is composed of a type and
2026 * vdev id pair (i.e. mirror-4).
2028 if ((type = strdup(srchval)) == NULL)
2031 if ((p = strrchr(type, '-')) == NULL) {
2039 * If the types don't match then keep looking.
2041 if (strncmp(val, type, strlen(val)) != 0) {
2046 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2047 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2048 strncmp(type, VDEV_TYPE_MIRROR,
2049 strlen(VDEV_TYPE_MIRROR)) == 0);
2050 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2054 vdev_id = strtoull(idx, &end, 10);
2061 * Now verify that we have the correct vdev id.
2070 if (strcmp(srchval, val) == 0)
2079 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2080 &child, &children) != 0)
2083 for (c = 0; c < children; c++) {
2084 if ((ret = vdev_to_nvlist_iter(child[c], search,
2085 avail_spare, l2cache, NULL)) != NULL) {
2087 * The 'is_log' value is only set for the toplevel
2088 * vdev, not the leaf vdevs. So we always lookup the
2089 * log device from the root of the vdev tree (where
2090 * 'log' is non-NULL).
2093 nvlist_lookup_uint64(child[c],
2094 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2102 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2103 &child, &children) == 0) {
2104 for (c = 0; c < children; c++) {
2105 if ((ret = vdev_to_nvlist_iter(child[c], search,
2106 avail_spare, l2cache, NULL)) != NULL) {
2107 *avail_spare = B_TRUE;
2113 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2114 &child, &children) == 0) {
2115 for (c = 0; c < children; c++) {
2116 if ((ret = vdev_to_nvlist_iter(child[c], search,
2117 avail_spare, l2cache, NULL)) != NULL) {
2128 * Given a physical path (minus the "/devices" prefix), find the
2132 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2133 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2135 nvlist_t *search, *nvroot, *ret;
2137 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2138 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2140 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2143 *avail_spare = B_FALSE;
2147 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2148 nvlist_free(search);
2154 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2157 zpool_vdev_is_interior(const char *name)
2159 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2160 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2166 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2167 boolean_t *l2cache, boolean_t *log)
2169 char buf[MAXPATHLEN];
2171 nvlist_t *nvroot, *search, *ret;
2174 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2176 guid = strtoull(path, &end, 10);
2177 if (guid != 0 && *end == '\0') {
2178 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2179 } else if (zpool_vdev_is_interior(path)) {
2180 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2181 } else if (path[0] != '/') {
2182 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
2183 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2185 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2188 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2191 *avail_spare = B_FALSE;
2195 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2196 nvlist_free(search);
2202 vdev_online(nvlist_t *nv)
2206 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2207 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2208 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2215 * Helper function for zpool_get_physpaths().
2218 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2219 size_t *bytes_written)
2221 size_t bytes_left, pos, rsz;
2225 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2227 return (EZFS_NODEVICE);
2229 pos = *bytes_written;
2230 bytes_left = physpath_size - pos;
2231 format = (pos == 0) ? "%s" : " %s";
2233 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2234 *bytes_written += rsz;
2236 if (rsz >= bytes_left) {
2237 /* if physpath was not copied properly, clear it */
2238 if (bytes_left != 0) {
2241 return (EZFS_NOSPC);
2247 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2248 size_t *rsz, boolean_t is_spare)
2253 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2254 return (EZFS_INVALCONFIG);
2256 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2258 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2259 * For a spare vdev, we only want to boot from the active
2264 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2267 return (EZFS_INVALCONFIG);
2270 if (vdev_online(nv)) {
2271 if ((ret = vdev_get_one_physpath(nv, physpath,
2272 phypath_size, rsz)) != 0)
2275 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2276 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2277 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2282 if (nvlist_lookup_nvlist_array(nv,
2283 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2284 return (EZFS_INVALCONFIG);
2286 for (i = 0; i < count; i++) {
2287 ret = vdev_get_physpaths(child[i], physpath,
2288 phypath_size, rsz, is_spare);
2289 if (ret == EZFS_NOSPC)
2294 return (EZFS_POOL_INVALARG);
2298 * Get phys_path for a root pool config.
2299 * Return 0 on success; non-zero on failure.
2302 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2305 nvlist_t *vdev_root;
2312 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2314 return (EZFS_INVALCONFIG);
2316 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2317 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2318 &child, &count) != 0)
2319 return (EZFS_INVALCONFIG);
2322 * root pool can not have EFI labeled disks and can only have
2323 * a single top-level vdev.
2325 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2326 pool_uses_efi(vdev_root))
2327 return (EZFS_POOL_INVALARG);
2329 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2332 /* No online devices */
2334 return (EZFS_NODEVICE);
2340 * Get phys_path for a root pool
2341 * Return 0 on success; non-zero on failure.
2344 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2346 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2351 * If the device has being dynamically expanded then we need to relabel
2352 * the disk to use the new unallocated space.
2355 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2358 char path[MAXPATHLEN];
2361 int (*_efi_use_whole_disk)(int);
2363 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2364 "efi_use_whole_disk")) == NULL)
2367 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2369 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2370 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2371 "relabel '%s': unable to open device"), name);
2372 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2376 * It's possible that we might encounter an error if the device
2377 * does not have any unallocated space left. If so, we simply
2378 * ignore that error and continue on.
2380 error = _efi_use_whole_disk(fd);
2382 if (error && error != VT_ENOSPC) {
2383 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2384 "relabel '%s': unable to read disk capacity"), name);
2385 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2392 * Bring the specified vdev online. The 'flags' parameter is a set of the
2393 * ZFS_ONLINE_* flags.
2396 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2397 vdev_state_t *newstate)
2399 zfs_cmd_t zc = { 0 };
2402 boolean_t avail_spare, l2cache, islog;
2403 libzfs_handle_t *hdl = zhp->zpool_hdl;
2405 if (flags & ZFS_ONLINE_EXPAND) {
2406 (void) snprintf(msg, sizeof (msg),
2407 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2409 (void) snprintf(msg, sizeof (msg),
2410 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2413 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2414 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2416 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2418 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2421 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2423 if (flags & ZFS_ONLINE_EXPAND ||
2424 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2425 char *pathname = NULL;
2426 uint64_t wholedisk = 0;
2428 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2430 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2434 * XXX - L2ARC 1.0 devices can't support expansion.
2437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2438 "cannot expand cache devices"));
2439 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2443 pathname += strlen(DISK_ROOT) + 1;
2444 (void) zpool_relabel_disk(hdl, pathname);
2448 zc.zc_cookie = VDEV_STATE_ONLINE;
2451 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2452 if (errno == EINVAL) {
2453 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2454 "from this pool into a new one. Use '%s' "
2455 "instead"), "zpool detach");
2456 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2458 return (zpool_standard_error(hdl, errno, msg));
2461 *newstate = zc.zc_cookie;
2466 * Take the specified vdev offline
2469 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2471 zfs_cmd_t zc = { 0 };
2474 boolean_t avail_spare, l2cache;
2475 libzfs_handle_t *hdl = zhp->zpool_hdl;
2477 (void) snprintf(msg, sizeof (msg),
2478 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2480 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2481 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2483 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2485 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2488 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2490 zc.zc_cookie = VDEV_STATE_OFFLINE;
2491 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2493 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2500 * There are no other replicas of this device.
2502 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2506 * The log device has unplayed logs
2508 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2511 return (zpool_standard_error(hdl, errno, msg));
2516 * Mark the given vdev faulted.
2519 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2521 zfs_cmd_t zc = { 0 };
2523 libzfs_handle_t *hdl = zhp->zpool_hdl;
2525 (void) snprintf(msg, sizeof (msg),
2526 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2528 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2530 zc.zc_cookie = VDEV_STATE_FAULTED;
2533 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2540 * There are no other replicas of this device.
2542 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2545 return (zpool_standard_error(hdl, errno, msg));
2551 * Mark the given vdev degraded.
2554 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2556 zfs_cmd_t zc = { 0 };
2558 libzfs_handle_t *hdl = zhp->zpool_hdl;
2560 (void) snprintf(msg, sizeof (msg),
2561 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2563 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2565 zc.zc_cookie = VDEV_STATE_DEGRADED;
2568 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2571 return (zpool_standard_error(hdl, errno, msg));
2575 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2579 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2585 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2587 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2590 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2591 children == 2 && child[which] == tgt)
2594 for (c = 0; c < children; c++)
2595 if (is_replacing_spare(child[c], tgt, which))
2603 * Attach new_disk (fully described by nvroot) to old_disk.
2604 * If 'replacing' is specified, the new disk will replace the old one.
2607 zpool_vdev_attach(zpool_handle_t *zhp,
2608 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2610 zfs_cmd_t zc = { 0 };
2614 boolean_t avail_spare, l2cache, islog;
2619 nvlist_t *config_root;
2620 libzfs_handle_t *hdl = zhp->zpool_hdl;
2621 boolean_t rootpool = zpool_is_bootable(zhp);
2624 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2625 "cannot replace %s with %s"), old_disk, new_disk);
2627 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2628 "cannot attach %s to %s"), new_disk, old_disk);
2631 * If this is a root pool, make sure that we're not attaching an
2632 * EFI labeled device.
2634 if (rootpool && pool_uses_efi(nvroot)) {
2635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2636 "EFI labeled devices are not supported on root pools."));
2637 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2640 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2641 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2643 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2646 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2649 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2651 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2652 zc.zc_cookie = replacing;
2654 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2655 &child, &children) != 0 || children != 1) {
2656 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2657 "new device must be a single disk"));
2658 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2661 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2662 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2664 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2668 * If the target is a hot spare that has been swapped in, we can only
2669 * replace it with another hot spare.
2672 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2673 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2674 NULL) == NULL || !avail_spare) &&
2675 is_replacing_spare(config_root, tgt, 1)) {
2676 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2677 "can only be replaced by another hot spare"));
2679 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2684 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2687 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2689 zcmd_free_nvlists(&zc);
2694 * XXX need a better way to prevent user from
2695 * booting up a half-baked vdev.
2697 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2698 "sure to wait until resilver is done "
2699 "before rebooting.\n"));
2700 (void) fprintf(stderr, "\n");
2701 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If "
2702 "you boot from pool '%s', you may need to update\n"
2703 "boot code on newly attached disk '%s'.\n\n"
2704 "Assuming you use GPT partitioning and 'da0' is "
2705 "your new boot disk\n"
2706 "you may use the following command:\n\n"
2707 "\tgpart bootcode -b /boot/pmbr -p "
2708 "/boot/gptzfsboot -i 1 da0\n\n"),
2709 zhp->zpool_name, new_disk);
2717 * Can't attach to or replace this type of vdev.
2720 uint64_t version = zpool_get_prop_int(zhp,
2721 ZPOOL_PROP_VERSION, NULL);
2724 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2725 "cannot replace a log with a spare"));
2726 else if (version >= SPA_VERSION_MULTI_REPLACE)
2727 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2728 "already in replacing/spare config; wait "
2729 "for completion or use 'zpool detach'"));
2731 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2732 "cannot replace a replacing device"));
2734 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2735 "can only attach to mirrors and top-level "
2738 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2743 * The new device must be a single disk.
2745 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2746 "new device must be a single disk"));
2747 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2751 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2753 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2758 * The new device is too small.
2760 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2761 "device is too small"));
2762 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2767 * The new device has a different alignment requirement.
2769 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2770 "devices have different sector alignment"));
2771 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2776 * The resulting top-level vdev spec won't fit in the label.
2778 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2782 (void) zpool_standard_error(hdl, errno, msg);
2789 * Detach the specified device.
2792 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2794 zfs_cmd_t zc = { 0 };
2797 boolean_t avail_spare, l2cache;
2798 libzfs_handle_t *hdl = zhp->zpool_hdl;
2800 (void) snprintf(msg, sizeof (msg),
2801 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2803 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2804 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2806 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2809 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2812 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2814 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2816 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2823 * Can't detach from this type of vdev.
2825 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2826 "applicable to mirror and replacing vdevs"));
2827 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2832 * There are no other replicas of this device.
2834 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2838 (void) zpool_standard_error(hdl, errno, msg);
2845 * Find a mirror vdev in the source nvlist.
2847 * The mchild array contains a list of disks in one of the top-level mirrors
2848 * of the source pool. The schild array contains a list of disks that the
2849 * user specified on the command line. We loop over the mchild array to
2850 * see if any entry in the schild array matches.
2852 * If a disk in the mchild array is found in the schild array, we return
2853 * the index of that entry. Otherwise we return -1.
2856 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2857 nvlist_t **schild, uint_t schildren)
2861 for (mc = 0; mc < mchildren; mc++) {
2863 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2864 mchild[mc], B_FALSE);
2866 for (sc = 0; sc < schildren; sc++) {
2867 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2868 schild[sc], B_FALSE);
2869 boolean_t result = (strcmp(mpath, spath) == 0);
2885 * Split a mirror pool. If newroot points to null, then a new nvlist
2886 * is generated and it is the responsibility of the caller to free it.
2889 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2890 nvlist_t *props, splitflags_t flags)
2892 zfs_cmd_t zc = { 0 };
2894 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2895 nvlist_t **varray = NULL, *zc_props = NULL;
2896 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2897 libzfs_handle_t *hdl = zhp->zpool_hdl;
2899 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2902 (void) snprintf(msg, sizeof (msg),
2903 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2905 if (!zpool_name_valid(hdl, B_FALSE, newname))
2906 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2908 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2909 (void) fprintf(stderr, gettext("Internal error: unable to "
2910 "retrieve pool configuration\n"));
2914 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2916 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2919 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2920 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2921 props, vers, flags, msg)) == NULL)
2925 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2927 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2928 "Source pool is missing vdev tree"));
2930 nvlist_free(zc_props);
2934 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2937 if (*newroot == NULL ||
2938 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2939 &newchild, &newchildren) != 0)
2942 for (c = 0; c < children; c++) {
2943 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2945 nvlist_t **mchild, *vdev;
2950 * Unlike cache & spares, slogs are stored in the
2951 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2953 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2955 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2957 if (is_log || is_hole) {
2959 * Create a hole vdev and put it in the config.
2961 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2963 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2964 VDEV_TYPE_HOLE) != 0)
2966 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2971 varray[vcount++] = vdev;
2975 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2977 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2978 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2979 "Source pool must be composed only of mirrors\n"));
2980 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2984 verify(nvlist_lookup_nvlist_array(child[c],
2985 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2987 /* find or add an entry for this top-level vdev */
2988 if (newchildren > 0 &&
2989 (entry = find_vdev_entry(zhp, mchild, mchildren,
2990 newchild, newchildren)) >= 0) {
2991 /* We found a disk that the user specified. */
2992 vdev = mchild[entry];
2995 /* User didn't specify a disk for this vdev. */
2996 vdev = mchild[mchildren - 1];
2999 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3003 /* did we find every disk the user specified? */
3004 if (found != newchildren) {
3005 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3006 "include at most one disk from each mirror"));
3007 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3011 /* Prepare the nvlist for populating. */
3012 if (*newroot == NULL) {
3013 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3016 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3017 VDEV_TYPE_ROOT) != 0)
3020 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3023 /* Add all the children we found */
3024 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3025 lastlog == 0 ? vcount : lastlog) != 0)
3029 * If we're just doing a dry run, exit now with success.
3032 memory_err = B_FALSE;
3037 /* now build up the config list & call the ioctl */
3038 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3041 if (nvlist_add_nvlist(newconfig,
3042 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3043 nvlist_add_string(newconfig,
3044 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3045 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3049 * The new pool is automatically part of the namespace unless we
3050 * explicitly export it.
3053 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3054 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3055 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3056 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3058 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3061 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3062 retval = zpool_standard_error(hdl, errno, msg);
3067 memory_err = B_FALSE;
3070 if (varray != NULL) {
3073 for (v = 0; v < vcount; v++)
3074 nvlist_free(varray[v]);
3077 zcmd_free_nvlists(&zc);
3079 nvlist_free(zc_props);
3081 nvlist_free(newconfig);
3083 nvlist_free(*newroot);
3091 return (no_memory(hdl));
3097 * Remove the given device. Currently, this is supported only for hot spares
3098 * and level 2 cache devices.
3101 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3103 zfs_cmd_t zc = { 0 };
3106 boolean_t avail_spare, l2cache, islog;
3107 libzfs_handle_t *hdl = zhp->zpool_hdl;
3110 (void) snprintf(msg, sizeof (msg),
3111 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3113 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3114 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3116 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3118 * XXX - this should just go away.
3120 if (!avail_spare && !l2cache && !islog) {
3121 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3122 "only inactive hot spares, cache, top-level, "
3123 "or log devices can be removed"));
3124 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3127 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3128 if (islog && version < SPA_VERSION_HOLES) {
3129 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3130 "pool must be upgrade to support log removal"));
3131 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3134 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3136 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3139 return (zpool_standard_error(hdl, errno, msg));
3143 * Clear the errors for the pool, or the particular device if specified.
3146 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3148 zfs_cmd_t zc = { 0 };
3151 zpool_rewind_policy_t policy;
3152 boolean_t avail_spare, l2cache;
3153 libzfs_handle_t *hdl = zhp->zpool_hdl;
3154 nvlist_t *nvi = NULL;
3158 (void) snprintf(msg, sizeof (msg),
3159 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3162 (void) snprintf(msg, sizeof (msg),
3163 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3166 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3168 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3169 &l2cache, NULL)) == 0)
3170 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3173 * Don't allow error clearing for hot spares. Do allow
3174 * error clearing for l2cache devices.
3177 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3179 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3183 zpool_get_rewind_policy(rewindnvl, &policy);
3184 zc.zc_cookie = policy.zrp_request;
3186 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3189 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3192 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3194 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3195 zcmd_free_nvlists(&zc);
3200 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3201 errno != EPERM && errno != EACCES)) {
3202 if (policy.zrp_request &
3203 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3204 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3205 zpool_rewind_exclaim(hdl, zc.zc_name,
3206 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3210 zcmd_free_nvlists(&zc);
3214 zcmd_free_nvlists(&zc);
3215 return (zpool_standard_error(hdl, errno, msg));
3219 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3222 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3224 zfs_cmd_t zc = { 0 };
3226 libzfs_handle_t *hdl = zhp->zpool_hdl;
3228 (void) snprintf(msg, sizeof (msg),
3229 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3232 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3234 zc.zc_cookie = ZPOOL_NO_REWIND;
3236 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3239 return (zpool_standard_error(hdl, errno, msg));
3243 * Change the GUID for a pool.
3246 zpool_reguid(zpool_handle_t *zhp)
3249 libzfs_handle_t *hdl = zhp->zpool_hdl;
3250 zfs_cmd_t zc = { 0 };
3252 (void) snprintf(msg, sizeof (msg),
3253 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3255 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3256 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3259 return (zpool_standard_error(hdl, errno, msg));
3266 zpool_reopen(zpool_handle_t *zhp)
3268 zfs_cmd_t zc = { 0 };
3270 libzfs_handle_t *hdl = zhp->zpool_hdl;
3272 (void) snprintf(msg, sizeof (msg),
3273 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3276 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3277 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3279 return (zpool_standard_error(hdl, errno, msg));
3283 * Convert from a devid string to a path.
3286 devid_to_path(char *devid_str)
3291 devid_nmlist_t *list = NULL;
3294 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3297 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3299 devid_str_free(minor);
3305 if ((path = strdup(list[0].devname)) == NULL)
3308 devid_free_nmlist(list);
3314 * Convert from a path to a devid string.
3317 path_to_devid(const char *path)
3323 if ((fd = open(path, O_RDONLY)) < 0)
3328 if (devid_get(fd, &devid) == 0) {
3329 if (devid_get_minor_name(fd, &minor) == 0)
3330 ret = devid_str_encode(devid, minor);
3332 devid_str_free(minor);
3341 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3342 * ignore any failure here, since a common case is for an unprivileged user to
3343 * type 'zpool status', and we'll display the correct information anyway.
3346 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3348 zfs_cmd_t zc = { 0 };
3350 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3351 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3352 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3355 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3359 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3360 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3361 * We also check if this is a whole disk, in which case we strip off the
3362 * trailing 's0' slice name.
3364 * This routine is also responsible for identifying when disks have been
3365 * reconfigured in a new location. The kernel will have opened the device by
3366 * devid, but the path will still refer to the old location. To catch this, we
3367 * first do a path -> devid translation (which is fast for the common case). If
3368 * the devid matches, we're done. If not, we do a reverse devid -> path
3369 * translation and issue the appropriate ioctl() to update the path of the vdev.
3370 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3374 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3385 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3386 (uint64_t **)&vs, &vsc) == 0;
3387 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0;
3390 * If the device is not currently present, assume it will not
3391 * come back at the same device path. Display the device by GUID.
3393 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3394 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) {
3395 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3397 (void) snprintf(buf, sizeof (buf), "%llu",
3398 (u_longlong_t)value);
3400 } else if (have_path) {
3403 * If the device is dead (faulted, offline, etc) then don't
3404 * bother opening it. Otherwise we may be forcing the user to
3405 * open a misbehaving device, which can have undesirable
3408 if ((have_stats == 0 ||
3409 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3411 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3413 * Determine if the current path is correct.
3415 char *newdevid = path_to_devid(path);
3417 if (newdevid == NULL ||
3418 strcmp(devid, newdevid) != 0) {
3421 if ((newpath = devid_to_path(devid)) != NULL) {
3423 * Update the path appropriately.
3425 set_path(zhp, nv, newpath);
3426 if (nvlist_add_string(nv,
3427 ZPOOL_CONFIG_PATH, newpath) == 0)
3428 verify(nvlist_lookup_string(nv,
3436 devid_str_free(newdevid);
3440 if (strncmp(path, "/dev/dsk/", 9) == 0)
3443 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3444 &value) == 0 && value) {
3445 int pathlen = strlen(path);
3446 char *tmp = zfs_strdup(hdl, path);
3449 * If it starts with c#, and ends with "s0", chop
3450 * the "s0" off, or if it ends with "s0/old", remove
3451 * the "s0" from the middle.
3453 if (CTD_CHECK(tmp)) {
3454 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3455 tmp[pathlen - 2] = '\0';
3456 } else if (pathlen > 6 &&
3457 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3458 (void) strcpy(&tmp[pathlen - 6],
3465 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
3466 path += sizeof(_PATH_DEV) - 1;
3469 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3472 * If it's a raidz device, we need to stick in the parity level.
3474 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3475 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3477 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3478 (u_longlong_t)value);
3483 * We identify each top-level vdev by using a <type-id>
3484 * naming convention.
3489 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3491 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3497 return (zfs_strdup(hdl, path));
3501 zbookmark_compare(const void *a, const void *b)
3503 return (memcmp(a, b, sizeof (zbookmark_t)));
3507 * Retrieve the persistent error log, uniquify the members, and return to the
3511 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3513 zfs_cmd_t zc = { 0 };
3515 zbookmark_t *zb = NULL;
3519 * Retrieve the raw error list from the kernel. If the number of errors
3520 * has increased, allocate more space and continue until we get the
3523 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3527 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3528 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3530 zc.zc_nvlist_dst_size = count;
3531 (void) strcpy(zc.zc_name, zhp->zpool_name);
3533 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3535 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3536 if (errno == ENOMEM) {
3537 count = zc.zc_nvlist_dst_size;
3538 if ((zc.zc_nvlist_dst = (uintptr_t)
3539 zfs_alloc(zhp->zpool_hdl, count *
3540 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3551 * Sort the resulting bookmarks. This is a little confusing due to the
3552 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3553 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3554 * _not_ copied as part of the process. So we point the start of our
3555 * array appropriate and decrement the total number of elements.
3557 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3558 zc.zc_nvlist_dst_size;
3559 count -= zc.zc_nvlist_dst_size;
3561 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3563 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3566 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3568 for (i = 0; i < count; i++) {
3571 /* ignoring zb_blkid and zb_level for now */
3572 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3573 zb[i-1].zb_object == zb[i].zb_object)
3576 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3578 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3579 zb[i].zb_objset) != 0) {
3583 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3584 zb[i].zb_object) != 0) {
3588 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3595 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3599 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3600 return (no_memory(zhp->zpool_hdl));
3604 * Upgrade a ZFS pool to the latest on-disk version.
3607 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3609 zfs_cmd_t zc = { 0 };
3610 libzfs_handle_t *hdl = zhp->zpool_hdl;
3612 (void) strcpy(zc.zc_name, zhp->zpool_name);
3613 zc.zc_cookie = new_version;
3615 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3616 return (zpool_standard_error_fmt(hdl, errno,
3617 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3623 zfs_save_arguments(int argc, char **argv, char *string, int len)
3625 (void) strlcpy(string, basename(argv[0]), len);
3626 for (int i = 1; i < argc; i++) {
3627 (void) strlcat(string, " ", len);
3628 (void) strlcat(string, argv[i], len);
3633 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3635 zfs_cmd_t zc = { 0 };
3639 args = fnvlist_alloc();
3640 fnvlist_add_string(args, "message", message);
3641 err = zcmd_write_src_nvlist(hdl, &zc, args);
3643 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3645 zcmd_free_nvlists(&zc);
3650 * Perform ioctl to get some command history of a pool.
3652 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3653 * logical offset of the history buffer to start reading from.
3655 * Upon return, 'off' is the next logical offset to read from and
3656 * 'len' is the actual amount of bytes read into 'buf'.
3659 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3661 zfs_cmd_t zc = { 0 };
3662 libzfs_handle_t *hdl = zhp->zpool_hdl;
3664 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3666 zc.zc_history = (uint64_t)(uintptr_t)buf;
3667 zc.zc_history_len = *len;
3668 zc.zc_history_offset = *off;
3670 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3673 return (zfs_error_fmt(hdl, EZFS_PERM,
3674 dgettext(TEXT_DOMAIN,
3675 "cannot show history for pool '%s'"),
3678 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3679 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3680 "'%s'"), zhp->zpool_name));
3682 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3683 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3684 "'%s', pool must be upgraded"), zhp->zpool_name));
3686 return (zpool_standard_error_fmt(hdl, errno,
3687 dgettext(TEXT_DOMAIN,
3688 "cannot get history for '%s'"), zhp->zpool_name));
3692 *len = zc.zc_history_len;
3693 *off = zc.zc_history_offset;
3699 * Process the buffer of nvlists, unpacking and storing each nvlist record
3700 * into 'records'. 'leftover' is set to the number of bytes that weren't
3701 * processed as there wasn't a complete record.
3704 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3705 nvlist_t ***records, uint_t *numrecords)
3711 while (bytes_read > sizeof (reclen)) {
3713 /* get length of packed record (stored as little endian) */
3714 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3715 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3717 if (bytes_read < sizeof (reclen) + reclen)
3721 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3723 bytes_read -= sizeof (reclen) + reclen;
3724 buf += sizeof (reclen) + reclen;
3726 /* add record to nvlist array */
3728 if (ISP2(*numrecords + 1)) {
3729 *records = realloc(*records,
3730 *numrecords * 2 * sizeof (nvlist_t *));
3732 (*records)[*numrecords - 1] = nv;
3735 *leftover = bytes_read;
3739 #define HIS_BUF_LEN (128*1024)
3742 * Retrieve the command history of a pool.
3745 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3747 char buf[HIS_BUF_LEN];
3749 nvlist_t **records = NULL;
3750 uint_t numrecords = 0;
3754 uint64_t bytes_read = sizeof (buf);
3757 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3760 /* if nothing else was read in, we're at EOF, just return */
3764 if ((err = zpool_history_unpack(buf, bytes_read,
3765 &leftover, &records, &numrecords)) != 0)
3773 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3774 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3775 records, numrecords) == 0);
3777 for (i = 0; i < numrecords; i++)
3778 nvlist_free(records[i]);
3785 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3786 char *pathname, size_t len)
3788 zfs_cmd_t zc = { 0 };
3789 boolean_t mounted = B_FALSE;
3790 char *mntpnt = NULL;
3791 char dsname[MAXNAMELEN];
3794 /* special case for the MOS */
3795 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3799 /* get the dataset's name */
3800 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3802 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3803 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3804 /* just write out a path of two object numbers */
3805 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3809 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3811 /* find out if the dataset is mounted */
3812 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3814 /* get the corrupted object's path */
3815 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3817 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3820 (void) snprintf(pathname, len, "%s%s", mntpnt,
3823 (void) snprintf(pathname, len, "%s:%s",
3824 dsname, zc.zc_value);
3827 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3834 * Read the EFI label from the config, if a label does not exist then
3835 * pass back the error to the caller. If the caller has passed a non-NULL
3836 * diskaddr argument then we set it to the starting address of the EFI
3840 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3844 char diskname[MAXPATHLEN];
3847 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3850 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3851 strrchr(path, '/'));
3852 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3853 struct dk_gpt *vtoc;
3855 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3857 *sb = vtoc->efi_parts[0].p_start;
3866 * determine where a partition starts on a disk in the current
3870 find_start_block(nvlist_t *config)
3874 diskaddr_t sb = MAXOFFSET_T;
3877 if (nvlist_lookup_nvlist_array(config,
3878 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3879 if (nvlist_lookup_uint64(config,
3880 ZPOOL_CONFIG_WHOLE_DISK,
3881 &wholedisk) != 0 || !wholedisk) {
3882 return (MAXOFFSET_T);
3884 if (read_efi_label(config, &sb) < 0)
3889 for (c = 0; c < children; c++) {
3890 sb = find_start_block(child[c]);
3891 if (sb != MAXOFFSET_T) {
3895 return (MAXOFFSET_T);
3900 * Label an individual disk. The name provided is the short name,
3901 * stripped of any leading /dev path.
3904 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name)
3907 char path[MAXPATHLEN];
3908 struct dk_gpt *vtoc;
3910 size_t resv = EFI_MIN_RESV_SIZE;
3911 uint64_t slice_size;
3912 diskaddr_t start_block;
3915 /* prepare an error message just in case */
3916 (void) snprintf(errbuf, sizeof (errbuf),
3917 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3922 if (zpool_is_bootable(zhp)) {
3923 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3924 "EFI labeled devices are not supported on root "
3926 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3929 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3930 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3932 if (zhp->zpool_start_block == 0)
3933 start_block = find_start_block(nvroot);
3935 start_block = zhp->zpool_start_block;
3936 zhp->zpool_start_block = start_block;
3939 start_block = NEW_START_BLOCK;
3942 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3945 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3947 * This shouldn't happen. We've long since verified that this
3948 * is a valid device.
3951 dgettext(TEXT_DOMAIN, "unable to open device"));
3952 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3955 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3957 * The only way this can fail is if we run out of memory, or we
3958 * were unable to read the disk's capacity
3960 if (errno == ENOMEM)
3961 (void) no_memory(hdl);
3964 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3965 "unable to read disk capacity"), name);
3967 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3970 slice_size = vtoc->efi_last_u_lba + 1;
3971 slice_size -= EFI_MIN_RESV_SIZE;
3972 if (start_block == MAXOFFSET_T)
3973 start_block = NEW_START_BLOCK;
3974 slice_size -= start_block;
3976 vtoc->efi_parts[0].p_start = start_block;
3977 vtoc->efi_parts[0].p_size = slice_size;
3980 * Why we use V_USR: V_BACKUP confuses users, and is considered
3981 * disposable by some EFI utilities (since EFI doesn't have a backup
3982 * slice). V_UNASSIGNED is supposed to be used only for zero size
3983 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3984 * etc. were all pretty specific. V_USR is as close to reality as we
3985 * can get, in the absence of V_OTHER.
3987 vtoc->efi_parts[0].p_tag = V_USR;
3988 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3990 vtoc->efi_parts[8].p_start = slice_size + start_block;
3991 vtoc->efi_parts[8].p_size = resv;
3992 vtoc->efi_parts[8].p_tag = V_RESERVED;
3994 if (efi_write(fd, vtoc) != 0) {
3996 * Some block drivers (like pcata) may not support EFI
3997 * GPT labels. Print out a helpful error message dir-
3998 * ecting the user to manually label the disk and give
4004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4005 "try using fdisk(1M) and then provide a specific slice"));
4006 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4016 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
4022 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
4023 if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
4024 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
4025 strcmp(type, VDEV_TYPE_MISSING) == 0) {
4026 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4027 "vdev type '%s' is not supported"), type);
4028 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
4031 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
4032 &child, &children) == 0) {
4033 for (c = 0; c < children; c++) {
4034 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
4042 * Check if this zvol is allowable for use as a dump device; zero if
4043 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
4045 * Allowable storage configurations include mirrors, all raidz variants, and
4046 * pools with log, cache, and spare devices. Pools which are backed by files or
4047 * have missing/hole vdevs are not suitable.
4050 zvol_check_dump_config(char *arg)
4052 zpool_handle_t *zhp = NULL;
4053 nvlist_t *config, *nvroot;
4057 libzfs_handle_t *hdl;
4059 char poolname[ZPOOL_MAXNAMELEN];
4060 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4063 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4067 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4068 "dump is not supported on device '%s'"), arg);
4070 if ((hdl = libzfs_init()) == NULL)
4072 libzfs_print_on_error(hdl, B_TRUE);
4074 volname = arg + pathlen;
4076 /* check the configuration of the pool */
4077 if ((p = strchr(volname, '/')) == NULL) {
4078 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4079 "malformed dataset name"));
4080 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4082 } else if (p - volname >= ZFS_MAXNAMELEN) {
4083 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4084 "dataset name is too long"));
4085 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4088 (void) strncpy(poolname, volname, p - volname);
4089 poolname[p - volname] = '\0';
4092 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4093 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4094 "could not open pool '%s'"), poolname);
4095 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4098 config = zpool_get_config(zhp, NULL);
4099 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4101 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4102 "could not obtain vdev configuration for '%s'"), poolname);
4103 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4107 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4108 &top, &toplevels) == 0);
4110 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {