4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29 #include <sys/types.h>
41 #include <sys/zfs_ioctl.h>
44 #include "zfs_namecheck.h"
46 #include "libzfs_impl.h"
47 #include "zfs_comutil.h"
48 #include "zfeature_common.h"
50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52 #define DISK_ROOT "/dev/dsk"
53 #define RDISK_ROOT "/dev/rdsk"
54 #define BACKUP_SLICE "s2"
56 typedef struct prop_flags {
57 int create:1; /* Validate property on creation */
58 int import:1; /* Validate property on import */
62 * ====================================================================
63 * zpool property functions
64 * ====================================================================
68 zpool_get_all_props(zpool_handle_t *zhp)
71 libzfs_handle_t *hdl = zhp->zpool_hdl;
73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
79 if (errno == ENOMEM) {
80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
81 zcmd_free_nvlists(&zc);
85 zcmd_free_nvlists(&zc);
90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
91 zcmd_free_nvlists(&zc);
95 zcmd_free_nvlists(&zc);
101 zpool_props_refresh(zpool_handle_t *zhp)
105 old_props = zhp->zpool_props;
107 if (zpool_get_all_props(zhp) != 0)
110 nvlist_free(old_props);
115 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
121 zprop_source_t source;
123 nvl = zhp->zpool_props;
124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
129 source = ZPROP_SRC_DEFAULT;
130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
141 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
145 zprop_source_t source;
147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
149 * zpool_get_all_props() has most likely failed because
150 * the pool is faulted, but if all we need is the top level
151 * vdev's guid then get it from the zhp config nvlist.
153 if ((prop == ZPOOL_PROP_GUID) &&
154 (nvlist_lookup_nvlist(zhp->zpool_config,
155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
160 return (zpool_prop_default_numeric(prop));
163 nvl = zhp->zpool_props;
164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
169 source = ZPROP_SRC_DEFAULT;
170 value = zpool_prop_default_numeric(prop);
180 * Map VDEV STATE to printed strings.
183 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
186 case VDEV_STATE_CLOSED:
187 case VDEV_STATE_OFFLINE:
188 return (gettext("OFFLINE"));
189 case VDEV_STATE_REMOVED:
190 return (gettext("REMOVED"));
191 case VDEV_STATE_CANT_OPEN:
192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
193 return (gettext("FAULTED"));
194 else if (aux == VDEV_AUX_SPLIT_POOL)
195 return (gettext("SPLIT"));
197 return (gettext("UNAVAIL"));
198 case VDEV_STATE_FAULTED:
199 return (gettext("FAULTED"));
200 case VDEV_STATE_DEGRADED:
201 return (gettext("DEGRADED"));
202 case VDEV_STATE_HEALTHY:
203 return (gettext("ONLINE"));
206 return (gettext("UNKNOWN"));
210 * Map POOL STATE to printed strings.
213 zpool_pool_state_to_name(pool_state_t state)
216 case POOL_STATE_ACTIVE:
217 return (gettext("ACTIVE"));
218 case POOL_STATE_EXPORTED:
219 return (gettext("EXPORTED"));
220 case POOL_STATE_DESTROYED:
221 return (gettext("DESTROYED"));
222 case POOL_STATE_SPARE:
223 return (gettext("SPARE"));
224 case POOL_STATE_L2CACHE:
225 return (gettext("L2CACHE"));
226 case POOL_STATE_UNINITIALIZED:
227 return (gettext("UNINITIALIZED"));
228 case POOL_STATE_UNAVAIL:
229 return (gettext("UNAVAIL"));
230 case POOL_STATE_POTENTIALLY_ACTIVE:
231 return (gettext("POTENTIALLY_ACTIVE"));
234 return (gettext("UNKNOWN"));
238 * Get a zpool property value for 'prop' and return the value in
239 * a pre-allocated buffer.
242 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
243 zprop_source_t *srctype, boolean_t literal)
247 zprop_source_t src = ZPROP_SRC_NONE;
252 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
254 case ZPOOL_PROP_NAME:
255 (void) strlcpy(buf, zpool_get_name(zhp), len);
258 case ZPOOL_PROP_HEALTH:
259 (void) strlcpy(buf, "FAULTED", len);
262 case ZPOOL_PROP_GUID:
263 intval = zpool_get_prop_int(zhp, prop, &src);
264 (void) snprintf(buf, len, "%llu", intval);
267 case ZPOOL_PROP_ALTROOT:
268 case ZPOOL_PROP_CACHEFILE:
269 case ZPOOL_PROP_COMMENT:
270 if (zhp->zpool_props != NULL ||
271 zpool_get_all_props(zhp) == 0) {
273 zpool_get_prop_string(zhp, prop, &src),
279 (void) strlcpy(buf, "-", len);
288 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
289 prop != ZPOOL_PROP_NAME)
292 switch (zpool_prop_get_type(prop)) {
293 case PROP_TYPE_STRING:
294 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
298 case PROP_TYPE_NUMBER:
299 intval = zpool_get_prop_int(zhp, prop, &src);
302 case ZPOOL_PROP_SIZE:
303 case ZPOOL_PROP_ALLOCATED:
304 case ZPOOL_PROP_FREE:
305 case ZPOOL_PROP_FREEING:
306 case ZPOOL_PROP_LEAKED:
308 (void) snprintf(buf, len, "%llu",
309 (u_longlong_t)intval);
311 (void) zfs_nicenum(intval, buf, len);
314 case ZPOOL_PROP_EXPANDSZ:
316 (void) strlcpy(buf, "-", len);
317 } else if (literal) {
318 (void) snprintf(buf, len, "%llu",
319 (u_longlong_t)intval);
321 (void) zfs_nicenum(intval, buf, len);
324 case ZPOOL_PROP_CAPACITY:
326 (void) snprintf(buf, len, "%llu",
327 (u_longlong_t)intval);
329 (void) snprintf(buf, len, "%llu%%",
330 (u_longlong_t)intval);
333 case ZPOOL_PROP_FRAGMENTATION:
334 if (intval == UINT64_MAX) {
335 (void) strlcpy(buf, "-", len);
337 (void) snprintf(buf, len, "%llu%%",
338 (u_longlong_t)intval);
341 case ZPOOL_PROP_DEDUPRATIO:
342 (void) snprintf(buf, len, "%llu.%02llux",
343 (u_longlong_t)(intval / 100),
344 (u_longlong_t)(intval % 100));
346 case ZPOOL_PROP_HEALTH:
347 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
348 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
349 verify(nvlist_lookup_uint64_array(nvroot,
350 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
353 (void) strlcpy(buf, zpool_state_to_name(intval,
356 case ZPOOL_PROP_VERSION:
357 if (intval >= SPA_VERSION_FEATURES) {
358 (void) snprintf(buf, len, "-");
363 (void) snprintf(buf, len, "%llu", intval);
367 case PROP_TYPE_INDEX:
368 intval = zpool_get_prop_int(zhp, prop, &src);
369 if (zpool_prop_index_to_string(prop, intval, &strval)
372 (void) strlcpy(buf, strval, len);
386 * Check if the bootfs name has the same pool name as it is set to.
387 * Assuming bootfs is a valid dataset name.
390 bootfs_name_valid(const char *pool, char *bootfs)
392 int len = strlen(pool);
394 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
397 if (strncmp(pool, bootfs, len) == 0 &&
398 (bootfs[len] == '/' || bootfs[len] == '\0'))
405 * Inspect the configuration to determine if any of the devices contain
409 pool_uses_efi(nvlist_t *config)
415 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
416 &child, &children) != 0)
417 return (read_efi_label(config, NULL) >= 0);
419 for (c = 0; c < children; c++) {
420 if (pool_uses_efi(child[c]))
428 zpool_is_bootable(zpool_handle_t *zhp)
430 char bootfs[ZPOOL_MAXNAMELEN];
432 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
433 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
434 sizeof (bootfs)) != 0);
439 * Given an nvlist of zpool properties to be set, validate that they are
440 * correct, and parse any numeric properties (index, boolean, etc) if they are
441 * specified as strings.
444 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
445 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
453 struct stat64 statbuf;
457 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
458 (void) no_memory(hdl);
463 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
464 const char *propname = nvpair_name(elem);
466 prop = zpool_name_to_prop(propname);
467 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
469 char *fname = strchr(propname, '@') + 1;
471 err = zfeature_lookup_name(fname, NULL);
473 ASSERT3U(err, ==, ENOENT);
474 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
475 "invalid feature '%s'"), fname);
476 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
480 if (nvpair_type(elem) != DATA_TYPE_STRING) {
481 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
482 "'%s' must be a string"), propname);
483 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
487 (void) nvpair_value_string(elem, &strval);
488 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 "property '%s' can only be set to "
491 "'enabled'"), propname);
492 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
496 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
497 (void) no_memory(hdl);
504 * Make sure this property is valid and applies to this type.
506 if (prop == ZPROP_INVAL) {
507 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
508 "invalid property '%s'"), propname);
509 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
513 if (zpool_prop_readonly(prop)) {
514 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
515 "is readonly"), propname);
516 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
520 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
521 &strval, &intval, errbuf) != 0)
525 * Perform additional checking for specific properties.
528 case ZPOOL_PROP_VERSION:
529 if (intval < version ||
530 !SPA_VERSION_IS_SUPPORTED(intval)) {
531 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
532 "property '%s' number %d is invalid."),
534 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
539 case ZPOOL_PROP_BOOTFS:
540 if (flags.create || flags.import) {
541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
542 "property '%s' cannot be set at creation "
543 "or import time"), propname);
544 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
548 if (version < SPA_VERSION_BOOTFS) {
549 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
550 "pool must be upgraded to support "
551 "'%s' property"), propname);
552 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
557 * bootfs property value has to be a dataset name and
558 * the dataset has to be in the same pool as it sets to.
560 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
562 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
563 "is an invalid name"), strval);
564 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
568 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
569 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
570 "could not open pool '%s'"), poolname);
571 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
574 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
575 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
579 * bootfs property cannot be set on a disk which has
582 if (pool_uses_efi(nvroot)) {
583 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
584 "property '%s' not supported on "
585 "EFI labeled devices"), propname);
586 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
594 case ZPOOL_PROP_ALTROOT:
595 if (!flags.create && !flags.import) {
596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
597 "property '%s' can only be set during pool "
598 "creation or import"), propname);
599 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
603 if (strval[0] != '/') {
604 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
605 "bad alternate root '%s'"), strval);
606 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
611 case ZPOOL_PROP_CACHEFILE:
612 if (strval[0] == '\0')
615 if (strcmp(strval, "none") == 0)
618 if (strval[0] != '/') {
619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
620 "property '%s' must be empty, an "
621 "absolute path, or 'none'"), propname);
622 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
626 slash = strrchr(strval, '/');
628 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
629 strcmp(slash, "/..") == 0) {
630 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
631 "'%s' is not a valid file"), strval);
632 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
638 if (strval[0] != '\0' &&
639 (stat64(strval, &statbuf) != 0 ||
640 !S_ISDIR(statbuf.st_mode))) {
641 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
642 "'%s' is not a valid directory"),
644 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
651 case ZPOOL_PROP_COMMENT:
652 for (check = strval; *check != '\0'; check++) {
653 if (!isprint(*check)) {
655 dgettext(TEXT_DOMAIN,
656 "comment may only have printable "
658 (void) zfs_error(hdl, EZFS_BADPROP,
663 if (strlen(strval) > ZPROP_MAX_COMMENT) {
664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
665 "comment must not exceed %d characters"),
667 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
671 case ZPOOL_PROP_READONLY:
673 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
674 "property '%s' can only be set at "
675 "import time"), propname);
676 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
685 nvlist_free(retprops);
690 * Set zpool property : propname=propval.
693 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
695 zfs_cmd_t zc = { 0 };
698 nvlist_t *nvl = NULL;
701 prop_flags_t flags = { 0 };
703 (void) snprintf(errbuf, sizeof (errbuf),
704 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
707 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
708 return (no_memory(zhp->zpool_hdl));
710 if (nvlist_add_string(nvl, propname, propval) != 0) {
712 return (no_memory(zhp->zpool_hdl));
715 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
716 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
717 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
726 * Execute the corresponding ioctl() to set this property.
728 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
730 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
735 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
737 zcmd_free_nvlists(&zc);
741 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
743 (void) zpool_props_refresh(zhp);
749 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
751 libzfs_handle_t *hdl = zhp->zpool_hdl;
753 char buf[ZFS_MAXPROPLEN];
754 nvlist_t *features = NULL;
756 boolean_t firstexpand = (NULL == *plp);
758 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
762 while (*last != NULL)
763 last = &(*last)->pl_next;
766 features = zpool_get_features(zhp);
768 if ((*plp)->pl_all && firstexpand) {
769 for (int i = 0; i < SPA_FEATURES; i++) {
770 zprop_list_t *entry = zfs_alloc(hdl,
771 sizeof (zprop_list_t));
772 entry->pl_prop = ZPROP_INVAL;
773 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
774 spa_feature_table[i].fi_uname);
775 entry->pl_width = strlen(entry->pl_user_prop);
776 entry->pl_all = B_TRUE;
779 last = &entry->pl_next;
783 /* add any unsupported features */
784 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
785 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
790 if (zfeature_is_supported(nvpair_name(nvp)))
793 propname = zfs_asprintf(hdl, "unsupported@%s",
797 * Before adding the property to the list make sure that no
798 * other pool already added the same property.
802 while (entry != NULL) {
803 if (entry->pl_user_prop != NULL &&
804 strcmp(propname, entry->pl_user_prop) == 0) {
808 entry = entry->pl_next;
815 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
816 entry->pl_prop = ZPROP_INVAL;
817 entry->pl_user_prop = propname;
818 entry->pl_width = strlen(entry->pl_user_prop);
819 entry->pl_all = B_TRUE;
822 last = &entry->pl_next;
825 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
830 if (entry->pl_prop != ZPROP_INVAL &&
831 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
832 NULL, B_FALSE) == 0) {
833 if (strlen(buf) > entry->pl_width)
834 entry->pl_width = strlen(buf);
842 * Get the state for the given feature on the given ZFS pool.
845 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
849 boolean_t found = B_FALSE;
850 nvlist_t *features = zpool_get_features(zhp);
852 const char *feature = strchr(propname, '@') + 1;
854 supported = zpool_prop_feature(propname);
855 ASSERT(supported || zpool_prop_unsupported(propname));
858 * Convert from feature name to feature guid. This conversion is
859 * unecessary for unsupported@... properties because they already
866 ret = zfeature_lookup_name(feature, &fid);
868 (void) strlcpy(buf, "-", len);
871 feature = spa_feature_table[fid].fi_guid;
874 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
879 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
882 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
884 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
889 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
891 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
894 (void) strlcpy(buf, "-", len);
903 * Don't start the slice at the default block of 34; many storage
904 * devices will use a stripe width of 128k, so start there instead.
906 #define NEW_START_BLOCK 256
909 * Validate the given pool name, optionally putting an extended error message in
913 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
919 ret = pool_namecheck(pool, &why, &what);
922 * The rules for reserved pool names were extended at a later point.
923 * But we need to support users with existing pools that may now be
924 * invalid. So we only check for this expanded set of names during a
925 * create (or import), and only in userland.
927 if (ret == 0 && !isopen &&
928 (strncmp(pool, "mirror", 6) == 0 ||
929 strncmp(pool, "raidz", 5) == 0 ||
930 strncmp(pool, "spare", 5) == 0 ||
931 strcmp(pool, "log") == 0)) {
934 dgettext(TEXT_DOMAIN, "name is reserved"));
942 case NAME_ERR_TOOLONG:
944 dgettext(TEXT_DOMAIN, "name is too long"));
947 case NAME_ERR_INVALCHAR:
949 dgettext(TEXT_DOMAIN, "invalid character "
950 "'%c' in pool name"), what);
953 case NAME_ERR_NOLETTER:
954 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
955 "name must begin with a letter"));
958 case NAME_ERR_RESERVED:
959 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
960 "name is reserved"));
963 case NAME_ERR_DISKLIKE:
964 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
965 "pool name is reserved"));
968 case NAME_ERR_LEADING_SLASH:
969 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
970 "leading slash in name"));
973 case NAME_ERR_EMPTY_COMPONENT:
974 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
975 "empty component in name"));
978 case NAME_ERR_TRAILING_SLASH:
979 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
980 "trailing slash in name"));
983 case NAME_ERR_MULTIPLE_AT:
984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
985 "multiple '@' delimiters in name"));
997 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1001 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1003 zpool_handle_t *zhp;
1007 * Make sure the pool name is valid.
1009 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1010 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1011 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1016 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1019 zhp->zpool_hdl = hdl;
1020 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1022 if (zpool_refresh_stats(zhp, &missing) != 0) {
1028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1029 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1030 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1039 * Like the above, but silent on error. Used when iterating over pools (because
1040 * the configuration cache may be out of date).
1043 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1045 zpool_handle_t *zhp;
1048 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1051 zhp->zpool_hdl = hdl;
1052 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1054 if (zpool_refresh_stats(zhp, &missing) != 0) {
1070 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1074 zpool_open(libzfs_handle_t *hdl, const char *pool)
1076 zpool_handle_t *zhp;
1078 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1081 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1082 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1083 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1092 * Close the handle. Simply frees the memory associated with the handle.
1095 zpool_close(zpool_handle_t *zhp)
1097 if (zhp->zpool_config)
1098 nvlist_free(zhp->zpool_config);
1099 if (zhp->zpool_old_config)
1100 nvlist_free(zhp->zpool_old_config);
1101 if (zhp->zpool_props)
1102 nvlist_free(zhp->zpool_props);
1107 * Return the name of the pool.
1110 zpool_get_name(zpool_handle_t *zhp)
1112 return (zhp->zpool_name);
1117 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1120 zpool_get_state(zpool_handle_t *zhp)
1122 return (zhp->zpool_state);
1126 * Create the named pool, using the provided vdev list. It is assumed
1127 * that the consumer has already validated the contents of the nvlist, so we
1128 * don't have to worry about error semantics.
1131 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1132 nvlist_t *props, nvlist_t *fsprops)
1134 zfs_cmd_t zc = { 0 };
1135 nvlist_t *zc_fsprops = NULL;
1136 nvlist_t *zc_props = NULL;
1140 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1141 "cannot create '%s'"), pool);
1143 if (!zpool_name_valid(hdl, B_FALSE, pool))
1144 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1146 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1150 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1152 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1153 SPA_VERSION_1, flags, msg)) == NULL) {
1162 zoned = ((nvlist_lookup_string(fsprops,
1163 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1164 strcmp(zonestr, "on") == 0);
1166 if ((zc_fsprops = zfs_valid_proplist(hdl,
1167 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1171 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1174 if (nvlist_add_nvlist(zc_props,
1175 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1180 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1183 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1185 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1187 zcmd_free_nvlists(&zc);
1188 nvlist_free(zc_props);
1189 nvlist_free(zc_fsprops);
1194 * This can happen if the user has specified the same
1195 * device multiple times. We can't reliably detect this
1196 * until we try to add it and see we already have a
1199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1200 "one or more vdevs refer to the same device"));
1201 return (zfs_error(hdl, EZFS_BADDEV, msg));
1205 * This occurs when one of the devices is below
1206 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1207 * device was the problem device since there's no
1208 * reliable way to determine device size from userland.
1213 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1215 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1216 "one or more devices is less than the "
1217 "minimum size (%s)"), buf);
1219 return (zfs_error(hdl, EZFS_BADDEV, msg));
1222 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1223 "one or more devices is out of space"));
1224 return (zfs_error(hdl, EZFS_BADDEV, msg));
1227 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1228 "cache device must be a disk or disk slice"));
1229 return (zfs_error(hdl, EZFS_BADDEV, msg));
1232 return (zpool_standard_error(hdl, errno, msg));
1237 zcmd_free_nvlists(&zc);
1238 nvlist_free(zc_props);
1239 nvlist_free(zc_fsprops);
1244 * Destroy the given pool. It is up to the caller to ensure that there are no
1245 * datasets left in the pool.
1248 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1250 zfs_cmd_t zc = { 0 };
1251 zfs_handle_t *zfp = NULL;
1252 libzfs_handle_t *hdl = zhp->zpool_hdl;
1255 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1256 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1259 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1260 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1262 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1263 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1264 "cannot destroy '%s'"), zhp->zpool_name);
1266 if (errno == EROFS) {
1267 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1268 "one or more devices is read only"));
1269 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1271 (void) zpool_standard_error(hdl, errno, msg);
1280 remove_mountpoint(zfp);
1288 * Add the given vdevs to the pool. The caller must have already performed the
1289 * necessary verification to ensure that the vdev specification is well-formed.
1292 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1294 zfs_cmd_t zc = { 0 };
1296 libzfs_handle_t *hdl = zhp->zpool_hdl;
1298 nvlist_t **spares, **l2cache;
1299 uint_t nspares, nl2cache;
1301 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1302 "cannot add to '%s'"), zhp->zpool_name);
1304 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1305 SPA_VERSION_SPARES &&
1306 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1307 &spares, &nspares) == 0) {
1308 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1309 "upgraded to add hot spares"));
1310 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1313 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1314 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1317 for (s = 0; s < nspares; s++) {
1320 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1321 &path) == 0 && pool_uses_efi(spares[s])) {
1322 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1323 "device '%s' contains an EFI label and "
1324 "cannot be used on root pools."),
1325 zpool_vdev_name(hdl, NULL, spares[s],
1327 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1332 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1333 SPA_VERSION_L2CACHE &&
1334 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1335 &l2cache, &nl2cache) == 0) {
1336 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1337 "upgraded to add cache devices"));
1338 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1341 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1343 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1345 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1349 * This can happen if the user has specified the same
1350 * device multiple times. We can't reliably detect this
1351 * until we try to add it and see we already have a
1354 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1355 "one or more vdevs refer to the same device"));
1356 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1361 * This occurrs when one of the devices is below
1362 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1363 * device was the problem device since there's no
1364 * reliable way to determine device size from userland.
1369 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1371 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1372 "device is less than the minimum "
1375 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1379 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1380 "pool must be upgraded to add these vdevs"));
1381 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1385 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1386 "root pool can not have multiple vdevs"
1387 " or separate logs"));
1388 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1392 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1393 "cache device must be a disk or disk slice"));
1394 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1398 (void) zpool_standard_error(hdl, errno, msg);
1406 zcmd_free_nvlists(&zc);
1412 * Exports the pool from the system. The caller must ensure that there are no
1413 * mounted datasets in the pool.
1416 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1417 const char *log_str)
1419 zfs_cmd_t zc = { 0 };
1422 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1423 "cannot export '%s'"), zhp->zpool_name);
1425 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1426 zc.zc_cookie = force;
1427 zc.zc_guid = hardforce;
1428 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1430 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1433 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1434 "use '-f' to override the following errors:\n"
1435 "'%s' has an active shared spare which could be"
1436 " used by other pools once '%s' is exported."),
1437 zhp->zpool_name, zhp->zpool_name);
1438 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1441 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1450 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1452 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1456 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1458 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1462 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1465 nvlist_t *nv = NULL;
1471 if (!hdl->libzfs_printerr || config == NULL)
1474 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1475 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1479 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1481 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1483 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1484 strftime(timestr, 128, 0, &t) != 0) {
1486 (void) printf(dgettext(TEXT_DOMAIN,
1487 "Would be able to return %s "
1488 "to its state as of %s.\n"),
1491 (void) printf(dgettext(TEXT_DOMAIN,
1492 "Pool %s returned to its state as of %s.\n"),
1496 (void) printf(dgettext(TEXT_DOMAIN,
1497 "%s approximately %lld "),
1498 dryrun ? "Would discard" : "Discarded",
1500 (void) printf(dgettext(TEXT_DOMAIN,
1501 "minutes of transactions.\n"));
1502 } else if (loss > 0) {
1503 (void) printf(dgettext(TEXT_DOMAIN,
1504 "%s approximately %lld "),
1505 dryrun ? "Would discard" : "Discarded", loss);
1506 (void) printf(dgettext(TEXT_DOMAIN,
1507 "seconds of transactions.\n"));
1513 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1516 nvlist_t *nv = NULL;
1518 uint64_t edata = UINT64_MAX;
1523 if (!hdl->libzfs_printerr)
1527 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1529 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1531 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1532 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1533 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1534 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1537 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1538 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1541 (void) printf(dgettext(TEXT_DOMAIN,
1542 "Recovery is possible, but will result in some data loss.\n"));
1544 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1545 strftime(timestr, 128, 0, &t) != 0) {
1546 (void) printf(dgettext(TEXT_DOMAIN,
1547 "\tReturning the pool to its state as of %s\n"
1548 "\tshould correct the problem. "),
1551 (void) printf(dgettext(TEXT_DOMAIN,
1552 "\tReverting the pool to an earlier state "
1553 "should correct the problem.\n\t"));
1557 (void) printf(dgettext(TEXT_DOMAIN,
1558 "Approximately %lld minutes of data\n"
1559 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1560 } else if (loss > 0) {
1561 (void) printf(dgettext(TEXT_DOMAIN,
1562 "Approximately %lld seconds of data\n"
1563 "\tmust be discarded, irreversibly. "), loss);
1565 if (edata != 0 && edata != UINT64_MAX) {
1567 (void) printf(dgettext(TEXT_DOMAIN,
1568 "After rewind, at least\n"
1569 "\tone persistent user-data error will remain. "));
1571 (void) printf(dgettext(TEXT_DOMAIN,
1572 "After rewind, several\n"
1573 "\tpersistent user-data errors will remain. "));
1576 (void) printf(dgettext(TEXT_DOMAIN,
1577 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1578 reason >= 0 ? "clear" : "import", name);
1580 (void) printf(dgettext(TEXT_DOMAIN,
1581 "A scrub of the pool\n"
1582 "\tis strongly recommended after recovery.\n"));
1586 (void) printf(dgettext(TEXT_DOMAIN,
1587 "Destroy and re-create the pool from\n\ta backup source.\n"));
1591 * zpool_import() is a contracted interface. Should be kept the same
1594 * Applications should use zpool_import_props() to import a pool with
1595 * new properties value to be set.
1598 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1601 nvlist_t *props = NULL;
1604 if (altroot != NULL) {
1605 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1606 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1607 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1611 if (nvlist_add_string(props,
1612 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1613 nvlist_add_string(props,
1614 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1616 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1617 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1622 ret = zpool_import_props(hdl, config, newname, props,
1630 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1636 uint64_t is_log = 0;
1638 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1642 (void) printf("\t%*s%s%s\n", indent, "", name,
1643 is_log ? " [log]" : "");
1645 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1646 &child, &children) != 0)
1649 for (c = 0; c < children; c++) {
1650 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1651 print_vdev_tree(hdl, vname, child[c], indent + 2);
1657 zpool_print_unsup_feat(nvlist_t *config)
1659 nvlist_t *nvinfo, *unsup_feat;
1661 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1663 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1666 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1667 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1670 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1671 verify(nvpair_value_string(nvp, &desc) == 0);
1673 if (strlen(desc) > 0)
1674 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1676 (void) printf("\t%s\n", nvpair_name(nvp));
1681 * Import the given pool using the known configuration and a list of
1682 * properties to be set. The configuration should have come from
1683 * zpool_find_import(). The 'newname' parameters control whether the pool
1684 * is imported with a different name.
1687 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1688 nvlist_t *props, int flags)
1690 zfs_cmd_t zc = { 0 };
1691 zpool_rewind_policy_t policy;
1692 nvlist_t *nv = NULL;
1693 nvlist_t *nvinfo = NULL;
1694 nvlist_t *missing = NULL;
1701 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1704 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1705 "cannot import pool '%s'"), origname);
1707 if (newname != NULL) {
1708 if (!zpool_name_valid(hdl, B_FALSE, newname))
1709 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1710 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1712 thename = (char *)newname;
1719 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1721 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1724 if ((props = zpool_valid_proplist(hdl, origname,
1725 props, version, flags, errbuf)) == NULL) {
1727 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1733 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1735 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1738 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1742 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1747 zc.zc_cookie = flags;
1748 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1750 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1751 zcmd_free_nvlists(&zc);
1758 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1759 zpool_get_rewind_policy(config, &policy);
1765 * Dry-run failed, but we print out what success
1766 * looks like if we found a best txg
1768 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1769 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1775 if (newname == NULL)
1776 (void) snprintf(desc, sizeof (desc),
1777 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1780 (void) snprintf(desc, sizeof (desc),
1781 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1786 if (nv != NULL && nvlist_lookup_nvlist(nv,
1787 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1788 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1789 (void) printf(dgettext(TEXT_DOMAIN, "This "
1790 "pool uses the following feature(s) not "
1791 "supported by this system:\n"));
1792 zpool_print_unsup_feat(nv);
1793 if (nvlist_exists(nvinfo,
1794 ZPOOL_CONFIG_CAN_RDONLY)) {
1795 (void) printf(dgettext(TEXT_DOMAIN,
1796 "All unsupported features are only "
1797 "required for writing to the pool."
1798 "\nThe pool can be imported using "
1799 "'-o readonly=on'.\n"));
1803 * Unsupported version.
1805 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1809 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1813 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1814 "one or more devices is read only"));
1815 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1819 if (nv && nvlist_lookup_nvlist(nv,
1820 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1821 nvlist_lookup_nvlist(nvinfo,
1822 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1823 (void) printf(dgettext(TEXT_DOMAIN,
1824 "The devices below are missing, use "
1825 "'-m' to import the pool anyway:\n"));
1826 print_vdev_tree(hdl, NULL, missing, 2);
1827 (void) printf("\n");
1829 (void) zpool_standard_error(hdl, error, desc);
1833 (void) zpool_standard_error(hdl, error, desc);
1837 (void) zpool_standard_error(hdl, error, desc);
1838 zpool_explain_recover(hdl,
1839 newname ? origname : thename, -error, nv);
1846 zpool_handle_t *zhp;
1849 * This should never fail, but play it safe anyway.
1851 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1853 else if (zhp != NULL)
1855 if (policy.zrp_request &
1856 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1857 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1858 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1864 zcmd_free_nvlists(&zc);
1874 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1876 zfs_cmd_t zc = { 0 };
1878 libzfs_handle_t *hdl = zhp->zpool_hdl;
1880 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1881 zc.zc_cookie = func;
1883 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1884 (errno == ENOENT && func != POOL_SCAN_NONE))
1887 if (func == POOL_SCAN_SCRUB) {
1888 (void) snprintf(msg, sizeof (msg),
1889 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1890 } else if (func == POOL_SCAN_NONE) {
1891 (void) snprintf(msg, sizeof (msg),
1892 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1895 assert(!"unexpected result");
1898 if (errno == EBUSY) {
1900 pool_scan_stat_t *ps = NULL;
1903 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1904 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1905 (void) nvlist_lookup_uint64_array(nvroot,
1906 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1907 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1908 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1910 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1911 } else if (errno == ENOENT) {
1912 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1914 return (zpool_standard_error(hdl, errno, msg));
1919 * This provides a very minimal check whether a given string is likely a
1920 * c#t#d# style string. Users of this are expected to do their own
1921 * verification of the s# part.
1923 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1926 * More elaborate version for ones which may start with "/dev/dsk/"
1930 ctd_check_path(char *str) {
1932 * If it starts with a slash, check the last component.
1934 if (str && str[0] == '/') {
1935 char *tmp = strrchr(str, '/');
1938 * If it ends in "/old", check the second-to-last
1939 * component of the string instead.
1941 if (tmp != str && strcmp(tmp, "/old") == 0) {
1942 for (tmp--; *tmp != '/'; tmp--)
1947 return (CTD_CHECK(str));
1951 * Find a vdev that matches the search criteria specified. We use the
1952 * the nvpair name to determine how we should look for the device.
1953 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1954 * spare; but FALSE if its an INUSE spare.
1957 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1958 boolean_t *l2cache, boolean_t *log)
1965 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1967 /* Nothing to look for */
1968 if (search == NULL || pair == NULL)
1971 /* Obtain the key we will use to search */
1972 srchkey = nvpair_name(pair);
1974 switch (nvpair_type(pair)) {
1975 case DATA_TYPE_UINT64:
1976 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1977 uint64_t srchval, theguid;
1979 verify(nvpair_value_uint64(pair, &srchval) == 0);
1980 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1982 if (theguid == srchval)
1987 case DATA_TYPE_STRING: {
1988 char *srchval, *val;
1990 verify(nvpair_value_string(pair, &srchval) == 0);
1991 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1995 * Search for the requested value. Special cases:
1997 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1998 * "s0" or "s0/old". The "s0" part is hidden from the user,
1999 * but included in the string, so this matches around it.
2000 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2002 * Otherwise, all other searches are simple string compares.
2004 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
2005 ctd_check_path(val)) {
2006 uint64_t wholedisk = 0;
2008 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2011 int slen = strlen(srchval);
2012 int vlen = strlen(val);
2014 if (slen != vlen - 2)
2018 * make_leaf_vdev() should only set
2019 * wholedisk for ZPOOL_CONFIG_PATHs which
2020 * will include "/dev/dsk/", giving plenty of
2021 * room for the indices used next.
2026 * strings identical except trailing "s0"
2028 if (strcmp(&val[vlen - 2], "s0") == 0 &&
2029 strncmp(srchval, val, slen) == 0)
2033 * strings identical except trailing "s0/old"
2035 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
2036 strcmp(&srchval[slen - 4], "/old") == 0 &&
2037 strncmp(srchval, val, slen - 4) == 0)
2042 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2043 char *type, *idx, *end, *p;
2044 uint64_t id, vdev_id;
2047 * Determine our vdev type, keeping in mind
2048 * that the srchval is composed of a type and
2049 * vdev id pair (i.e. mirror-4).
2051 if ((type = strdup(srchval)) == NULL)
2054 if ((p = strrchr(type, '-')) == NULL) {
2062 * If the types don't match then keep looking.
2064 if (strncmp(val, type, strlen(val)) != 0) {
2069 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2070 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2071 strncmp(type, VDEV_TYPE_MIRROR,
2072 strlen(VDEV_TYPE_MIRROR)) == 0);
2073 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2077 vdev_id = strtoull(idx, &end, 10);
2084 * Now verify that we have the correct vdev id.
2093 if (strcmp(srchval, val) == 0)
2102 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2103 &child, &children) != 0)
2106 for (c = 0; c < children; c++) {
2107 if ((ret = vdev_to_nvlist_iter(child[c], search,
2108 avail_spare, l2cache, NULL)) != NULL) {
2110 * The 'is_log' value is only set for the toplevel
2111 * vdev, not the leaf vdevs. So we always lookup the
2112 * log device from the root of the vdev tree (where
2113 * 'log' is non-NULL).
2116 nvlist_lookup_uint64(child[c],
2117 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2125 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2126 &child, &children) == 0) {
2127 for (c = 0; c < children; c++) {
2128 if ((ret = vdev_to_nvlist_iter(child[c], search,
2129 avail_spare, l2cache, NULL)) != NULL) {
2130 *avail_spare = B_TRUE;
2136 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2137 &child, &children) == 0) {
2138 for (c = 0; c < children; c++) {
2139 if ((ret = vdev_to_nvlist_iter(child[c], search,
2140 avail_spare, l2cache, NULL)) != NULL) {
2151 * Given a physical path (minus the "/devices" prefix), find the
2155 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2156 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2158 nvlist_t *search, *nvroot, *ret;
2160 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2161 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2163 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2166 *avail_spare = B_FALSE;
2170 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2171 nvlist_free(search);
2177 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2180 zpool_vdev_is_interior(const char *name)
2182 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2183 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2189 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2190 boolean_t *l2cache, boolean_t *log)
2192 char buf[MAXPATHLEN];
2194 nvlist_t *nvroot, *search, *ret;
2197 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2199 guid = strtoull(path, &end, 10);
2200 if (guid != 0 && *end == '\0') {
2201 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2202 } else if (zpool_vdev_is_interior(path)) {
2203 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2204 } else if (path[0] != '/') {
2205 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
2206 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2208 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2211 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2214 *avail_spare = B_FALSE;
2218 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2219 nvlist_free(search);
2225 vdev_online(nvlist_t *nv)
2229 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2230 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2231 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2238 * Helper function for zpool_get_physpaths().
2241 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2242 size_t *bytes_written)
2244 size_t bytes_left, pos, rsz;
2248 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2250 return (EZFS_NODEVICE);
2252 pos = *bytes_written;
2253 bytes_left = physpath_size - pos;
2254 format = (pos == 0) ? "%s" : " %s";
2256 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2257 *bytes_written += rsz;
2259 if (rsz >= bytes_left) {
2260 /* if physpath was not copied properly, clear it */
2261 if (bytes_left != 0) {
2264 return (EZFS_NOSPC);
2270 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2271 size_t *rsz, boolean_t is_spare)
2276 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2277 return (EZFS_INVALCONFIG);
2279 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2281 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2282 * For a spare vdev, we only want to boot from the active
2287 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2290 return (EZFS_INVALCONFIG);
2293 if (vdev_online(nv)) {
2294 if ((ret = vdev_get_one_physpath(nv, physpath,
2295 phypath_size, rsz)) != 0)
2298 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2299 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2300 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2305 if (nvlist_lookup_nvlist_array(nv,
2306 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2307 return (EZFS_INVALCONFIG);
2309 for (i = 0; i < count; i++) {
2310 ret = vdev_get_physpaths(child[i], physpath,
2311 phypath_size, rsz, is_spare);
2312 if (ret == EZFS_NOSPC)
2317 return (EZFS_POOL_INVALARG);
2321 * Get phys_path for a root pool config.
2322 * Return 0 on success; non-zero on failure.
2325 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2328 nvlist_t *vdev_root;
2335 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2337 return (EZFS_INVALCONFIG);
2339 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2340 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2341 &child, &count) != 0)
2342 return (EZFS_INVALCONFIG);
2345 * root pool can not have EFI labeled disks and can only have
2346 * a single top-level vdev.
2348 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2349 pool_uses_efi(vdev_root))
2350 return (EZFS_POOL_INVALARG);
2352 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2355 /* No online devices */
2357 return (EZFS_NODEVICE);
2363 * Get phys_path for a root pool
2364 * Return 0 on success; non-zero on failure.
2367 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2369 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2374 * If the device has being dynamically expanded then we need to relabel
2375 * the disk to use the new unallocated space.
2378 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2381 char path[MAXPATHLEN];
2384 int (*_efi_use_whole_disk)(int);
2386 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2387 "efi_use_whole_disk")) == NULL)
2390 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2392 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2393 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2394 "relabel '%s': unable to open device"), name);
2395 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2399 * It's possible that we might encounter an error if the device
2400 * does not have any unallocated space left. If so, we simply
2401 * ignore that error and continue on.
2403 error = _efi_use_whole_disk(fd);
2405 if (error && error != VT_ENOSPC) {
2406 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2407 "relabel '%s': unable to read disk capacity"), name);
2408 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2415 * Bring the specified vdev online. The 'flags' parameter is a set of the
2416 * ZFS_ONLINE_* flags.
2419 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2420 vdev_state_t *newstate)
2422 zfs_cmd_t zc = { 0 };
2425 boolean_t avail_spare, l2cache, islog;
2426 libzfs_handle_t *hdl = zhp->zpool_hdl;
2428 if (flags & ZFS_ONLINE_EXPAND) {
2429 (void) snprintf(msg, sizeof (msg),
2430 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2432 (void) snprintf(msg, sizeof (msg),
2433 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2436 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2437 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2439 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2441 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2444 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2446 if (flags & ZFS_ONLINE_EXPAND ||
2447 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2448 char *pathname = NULL;
2449 uint64_t wholedisk = 0;
2451 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2453 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2457 * XXX - L2ARC 1.0 devices can't support expansion.
2460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2461 "cannot expand cache devices"));
2462 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2466 pathname += strlen(DISK_ROOT) + 1;
2467 (void) zpool_relabel_disk(hdl, pathname);
2471 zc.zc_cookie = VDEV_STATE_ONLINE;
2474 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2475 if (errno == EINVAL) {
2476 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2477 "from this pool into a new one. Use '%s' "
2478 "instead"), "zpool detach");
2479 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2481 return (zpool_standard_error(hdl, errno, msg));
2484 *newstate = zc.zc_cookie;
2489 * Take the specified vdev offline
2492 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2494 zfs_cmd_t zc = { 0 };
2497 boolean_t avail_spare, l2cache;
2498 libzfs_handle_t *hdl = zhp->zpool_hdl;
2500 (void) snprintf(msg, sizeof (msg),
2501 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2503 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2504 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2506 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2508 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2511 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2513 zc.zc_cookie = VDEV_STATE_OFFLINE;
2514 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2516 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2523 * There are no other replicas of this device.
2525 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2529 * The log device has unplayed logs
2531 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2534 return (zpool_standard_error(hdl, errno, msg));
2539 * Mark the given vdev faulted.
2542 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2544 zfs_cmd_t zc = { 0 };
2546 libzfs_handle_t *hdl = zhp->zpool_hdl;
2548 (void) snprintf(msg, sizeof (msg),
2549 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2551 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2553 zc.zc_cookie = VDEV_STATE_FAULTED;
2556 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2563 * There are no other replicas of this device.
2565 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2568 return (zpool_standard_error(hdl, errno, msg));
2574 * Mark the given vdev degraded.
2577 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2579 zfs_cmd_t zc = { 0 };
2581 libzfs_handle_t *hdl = zhp->zpool_hdl;
2583 (void) snprintf(msg, sizeof (msg),
2584 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2586 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2588 zc.zc_cookie = VDEV_STATE_DEGRADED;
2591 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2594 return (zpool_standard_error(hdl, errno, msg));
2598 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2602 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2608 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2610 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2613 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2614 children == 2 && child[which] == tgt)
2617 for (c = 0; c < children; c++)
2618 if (is_replacing_spare(child[c], tgt, which))
2626 * Attach new_disk (fully described by nvroot) to old_disk.
2627 * If 'replacing' is specified, the new disk will replace the old one.
2630 zpool_vdev_attach(zpool_handle_t *zhp,
2631 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2633 zfs_cmd_t zc = { 0 };
2637 boolean_t avail_spare, l2cache, islog;
2642 nvlist_t *config_root;
2643 libzfs_handle_t *hdl = zhp->zpool_hdl;
2644 boolean_t rootpool = zpool_is_bootable(zhp);
2647 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2648 "cannot replace %s with %s"), old_disk, new_disk);
2650 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2651 "cannot attach %s to %s"), new_disk, old_disk);
2654 * If this is a root pool, make sure that we're not attaching an
2655 * EFI labeled device.
2657 if (rootpool && pool_uses_efi(nvroot)) {
2658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2659 "EFI labeled devices are not supported on root pools."));
2660 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2663 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2664 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2666 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2669 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2672 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2674 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2675 zc.zc_cookie = replacing;
2677 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2678 &child, &children) != 0 || children != 1) {
2679 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2680 "new device must be a single disk"));
2681 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2684 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2685 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2687 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2691 * If the target is a hot spare that has been swapped in, we can only
2692 * replace it with another hot spare.
2695 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2696 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2697 NULL) == NULL || !avail_spare) &&
2698 is_replacing_spare(config_root, tgt, 1)) {
2699 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2700 "can only be replaced by another hot spare"));
2702 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2707 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2710 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2712 zcmd_free_nvlists(&zc);
2717 * XXX need a better way to prevent user from
2718 * booting up a half-baked vdev.
2720 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2721 "sure to wait until resilver is done "
2722 "before rebooting.\n"));
2723 (void) fprintf(stderr, "\n");
2724 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If "
2725 "you boot from pool '%s', you may need to update\n"
2726 "boot code on newly attached disk '%s'.\n\n"
2727 "Assuming you use GPT partitioning and 'da0' is "
2728 "your new boot disk\n"
2729 "you may use the following command:\n\n"
2730 "\tgpart bootcode -b /boot/pmbr -p "
2731 "/boot/gptzfsboot -i 1 da0\n\n"),
2732 zhp->zpool_name, new_disk);
2740 * Can't attach to or replace this type of vdev.
2743 uint64_t version = zpool_get_prop_int(zhp,
2744 ZPOOL_PROP_VERSION, NULL);
2747 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2748 "cannot replace a log with a spare"));
2749 else if (version >= SPA_VERSION_MULTI_REPLACE)
2750 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2751 "already in replacing/spare config; wait "
2752 "for completion or use 'zpool detach'"));
2754 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2755 "cannot replace a replacing device"));
2757 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2758 "can only attach to mirrors and top-level "
2761 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2766 * The new device must be a single disk.
2768 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2769 "new device must be a single disk"));
2770 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2774 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2776 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2781 * The new device is too small.
2783 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2784 "device is too small"));
2785 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2790 * The new device has a different alignment requirement.
2792 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2793 "devices have different sector alignment"));
2794 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2799 * The resulting top-level vdev spec won't fit in the label.
2801 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2805 (void) zpool_standard_error(hdl, errno, msg);
2812 * Detach the specified device.
2815 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2817 zfs_cmd_t zc = { 0 };
2820 boolean_t avail_spare, l2cache;
2821 libzfs_handle_t *hdl = zhp->zpool_hdl;
2823 (void) snprintf(msg, sizeof (msg),
2824 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2826 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2827 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2829 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2832 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2835 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2837 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2839 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2846 * Can't detach from this type of vdev.
2848 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2849 "applicable to mirror and replacing vdevs"));
2850 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2855 * There are no other replicas of this device.
2857 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2861 (void) zpool_standard_error(hdl, errno, msg);
2868 * Find a mirror vdev in the source nvlist.
2870 * The mchild array contains a list of disks in one of the top-level mirrors
2871 * of the source pool. The schild array contains a list of disks that the
2872 * user specified on the command line. We loop over the mchild array to
2873 * see if any entry in the schild array matches.
2875 * If a disk in the mchild array is found in the schild array, we return
2876 * the index of that entry. Otherwise we return -1.
2879 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2880 nvlist_t **schild, uint_t schildren)
2884 for (mc = 0; mc < mchildren; mc++) {
2886 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2887 mchild[mc], B_FALSE);
2889 for (sc = 0; sc < schildren; sc++) {
2890 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2891 schild[sc], B_FALSE);
2892 boolean_t result = (strcmp(mpath, spath) == 0);
2908 * Split a mirror pool. If newroot points to null, then a new nvlist
2909 * is generated and it is the responsibility of the caller to free it.
2912 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2913 nvlist_t *props, splitflags_t flags)
2915 zfs_cmd_t zc = { 0 };
2917 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2918 nvlist_t **varray = NULL, *zc_props = NULL;
2919 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2920 libzfs_handle_t *hdl = zhp->zpool_hdl;
2922 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2925 (void) snprintf(msg, sizeof (msg),
2926 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2928 if (!zpool_name_valid(hdl, B_FALSE, newname))
2929 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2931 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2932 (void) fprintf(stderr, gettext("Internal error: unable to "
2933 "retrieve pool configuration\n"));
2937 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2939 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2942 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2943 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2944 props, vers, flags, msg)) == NULL)
2948 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2951 "Source pool is missing vdev tree"));
2953 nvlist_free(zc_props);
2957 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2960 if (*newroot == NULL ||
2961 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2962 &newchild, &newchildren) != 0)
2965 for (c = 0; c < children; c++) {
2966 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2968 nvlist_t **mchild, *vdev;
2973 * Unlike cache & spares, slogs are stored in the
2974 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2976 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2978 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2980 if (is_log || is_hole) {
2982 * Create a hole vdev and put it in the config.
2984 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2986 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2987 VDEV_TYPE_HOLE) != 0)
2989 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2994 varray[vcount++] = vdev;
2998 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3000 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3001 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3002 "Source pool must be composed only of mirrors\n"));
3003 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3007 verify(nvlist_lookup_nvlist_array(child[c],
3008 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3010 /* find or add an entry for this top-level vdev */
3011 if (newchildren > 0 &&
3012 (entry = find_vdev_entry(zhp, mchild, mchildren,
3013 newchild, newchildren)) >= 0) {
3014 /* We found a disk that the user specified. */
3015 vdev = mchild[entry];
3018 /* User didn't specify a disk for this vdev. */
3019 vdev = mchild[mchildren - 1];
3022 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3026 /* did we find every disk the user specified? */
3027 if (found != newchildren) {
3028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3029 "include at most one disk from each mirror"));
3030 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3034 /* Prepare the nvlist for populating. */
3035 if (*newroot == NULL) {
3036 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3039 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3040 VDEV_TYPE_ROOT) != 0)
3043 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3046 /* Add all the children we found */
3047 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3048 lastlog == 0 ? vcount : lastlog) != 0)
3052 * If we're just doing a dry run, exit now with success.
3055 memory_err = B_FALSE;
3060 /* now build up the config list & call the ioctl */
3061 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3064 if (nvlist_add_nvlist(newconfig,
3065 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3066 nvlist_add_string(newconfig,
3067 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3068 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3072 * The new pool is automatically part of the namespace unless we
3073 * explicitly export it.
3076 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3077 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3078 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3079 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3081 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3084 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3085 retval = zpool_standard_error(hdl, errno, msg);
3090 memory_err = B_FALSE;
3093 if (varray != NULL) {
3096 for (v = 0; v < vcount; v++)
3097 nvlist_free(varray[v]);
3100 zcmd_free_nvlists(&zc);
3102 nvlist_free(zc_props);
3104 nvlist_free(newconfig);
3106 nvlist_free(*newroot);
3114 return (no_memory(hdl));
3120 * Remove the given device. Currently, this is supported only for hot spares
3121 * and level 2 cache devices.
3124 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3126 zfs_cmd_t zc = { 0 };
3129 boolean_t avail_spare, l2cache, islog;
3130 libzfs_handle_t *hdl = zhp->zpool_hdl;
3133 (void) snprintf(msg, sizeof (msg),
3134 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3136 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3137 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3139 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3141 * XXX - this should just go away.
3143 if (!avail_spare && !l2cache && !islog) {
3144 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3145 "only inactive hot spares, cache, top-level, "
3146 "or log devices can be removed"));
3147 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3150 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3151 if (islog && version < SPA_VERSION_HOLES) {
3152 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3153 "pool must be upgrade to support log removal"));
3154 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3157 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3159 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3162 return (zpool_standard_error(hdl, errno, msg));
3166 * Clear the errors for the pool, or the particular device if specified.
3169 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3171 zfs_cmd_t zc = { 0 };
3174 zpool_rewind_policy_t policy;
3175 boolean_t avail_spare, l2cache;
3176 libzfs_handle_t *hdl = zhp->zpool_hdl;
3177 nvlist_t *nvi = NULL;
3181 (void) snprintf(msg, sizeof (msg),
3182 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3185 (void) snprintf(msg, sizeof (msg),
3186 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3189 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3191 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3192 &l2cache, NULL)) == 0)
3193 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3196 * Don't allow error clearing for hot spares. Do allow
3197 * error clearing for l2cache devices.
3200 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3202 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3206 zpool_get_rewind_policy(rewindnvl, &policy);
3207 zc.zc_cookie = policy.zrp_request;
3209 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3212 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3215 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3217 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3218 zcmd_free_nvlists(&zc);
3223 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3224 errno != EPERM && errno != EACCES)) {
3225 if (policy.zrp_request &
3226 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3227 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3228 zpool_rewind_exclaim(hdl, zc.zc_name,
3229 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3233 zcmd_free_nvlists(&zc);
3237 zcmd_free_nvlists(&zc);
3238 return (zpool_standard_error(hdl, errno, msg));
3242 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3245 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3247 zfs_cmd_t zc = { 0 };
3249 libzfs_handle_t *hdl = zhp->zpool_hdl;
3251 (void) snprintf(msg, sizeof (msg),
3252 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3255 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3257 zc.zc_cookie = ZPOOL_NO_REWIND;
3259 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3262 return (zpool_standard_error(hdl, errno, msg));
3266 * Change the GUID for a pool.
3269 zpool_reguid(zpool_handle_t *zhp)
3272 libzfs_handle_t *hdl = zhp->zpool_hdl;
3273 zfs_cmd_t zc = { 0 };
3275 (void) snprintf(msg, sizeof (msg),
3276 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3278 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3279 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3282 return (zpool_standard_error(hdl, errno, msg));
3289 zpool_reopen(zpool_handle_t *zhp)
3291 zfs_cmd_t zc = { 0 };
3293 libzfs_handle_t *hdl = zhp->zpool_hdl;
3295 (void) snprintf(msg, sizeof (msg),
3296 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3299 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3300 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3302 return (zpool_standard_error(hdl, errno, msg));
3306 * Convert from a devid string to a path.
3309 devid_to_path(char *devid_str)
3314 devid_nmlist_t *list = NULL;
3317 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3320 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3322 devid_str_free(minor);
3328 if ((path = strdup(list[0].devname)) == NULL)
3331 devid_free_nmlist(list);
3337 * Convert from a path to a devid string.
3340 path_to_devid(const char *path)
3347 if ((fd = open(path, O_RDONLY)) < 0)
3352 if (devid_get(fd, &devid) == 0) {
3353 if (devid_get_minor_name(fd, &minor) == 0)
3354 ret = devid_str_encode(devid, minor);
3356 devid_str_free(minor);
3368 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3369 * ignore any failure here, since a common case is for an unprivileged user to
3370 * type 'zpool status', and we'll display the correct information anyway.
3373 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3375 zfs_cmd_t zc = { 0 };
3377 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3378 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3379 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3382 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3386 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3387 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3388 * We also check if this is a whole disk, in which case we strip off the
3389 * trailing 's0' slice name.
3391 * This routine is also responsible for identifying when disks have been
3392 * reconfigured in a new location. The kernel will have opened the device by
3393 * devid, but the path will still refer to the old location. To catch this, we
3394 * first do a path -> devid translation (which is fast for the common case). If
3395 * the devid matches, we're done. If not, we do a reverse devid -> path
3396 * translation and issue the appropriate ioctl() to update the path of the vdev.
3397 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3401 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3412 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3413 (uint64_t **)&vs, &vsc) == 0;
3414 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0;
3417 * If the device is not currently present, assume it will not
3418 * come back at the same device path. Display the device by GUID.
3420 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3421 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) {
3422 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3424 (void) snprintf(buf, sizeof (buf), "%llu",
3425 (u_longlong_t)value);
3427 } else if (have_path) {
3430 * If the device is dead (faulted, offline, etc) then don't
3431 * bother opening it. Otherwise we may be forcing the user to
3432 * open a misbehaving device, which can have undesirable
3435 if ((have_stats == 0 ||
3436 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3438 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3440 * Determine if the current path is correct.
3442 char *newdevid = path_to_devid(path);
3444 if (newdevid == NULL ||
3445 strcmp(devid, newdevid) != 0) {
3448 if ((newpath = devid_to_path(devid)) != NULL) {
3450 * Update the path appropriately.
3452 set_path(zhp, nv, newpath);
3453 if (nvlist_add_string(nv,
3454 ZPOOL_CONFIG_PATH, newpath) == 0)
3455 verify(nvlist_lookup_string(nv,
3463 devid_str_free(newdevid);
3467 if (strncmp(path, "/dev/dsk/", 9) == 0)
3470 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3471 &value) == 0 && value) {
3472 int pathlen = strlen(path);
3473 char *tmp = zfs_strdup(hdl, path);
3476 * If it starts with c#, and ends with "s0", chop
3477 * the "s0" off, or if it ends with "s0/old", remove
3478 * the "s0" from the middle.
3480 if (CTD_CHECK(tmp)) {
3481 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3482 tmp[pathlen - 2] = '\0';
3483 } else if (pathlen > 6 &&
3484 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3485 (void) strcpy(&tmp[pathlen - 6],
3492 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
3493 path += sizeof(_PATH_DEV) - 1;
3496 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3499 * If it's a raidz device, we need to stick in the parity level.
3501 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3502 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3504 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3505 (u_longlong_t)value);
3510 * We identify each top-level vdev by using a <type-id>
3511 * naming convention.
3516 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3518 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3524 return (zfs_strdup(hdl, path));
3528 zbookmark_compare(const void *a, const void *b)
3530 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3534 * Retrieve the persistent error log, uniquify the members, and return to the
3538 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3540 zfs_cmd_t zc = { 0 };
3542 zbookmark_phys_t *zb = NULL;
3546 * Retrieve the raw error list from the kernel. If the number of errors
3547 * has increased, allocate more space and continue until we get the
3550 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3554 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3555 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3557 zc.zc_nvlist_dst_size = count;
3558 (void) strcpy(zc.zc_name, zhp->zpool_name);
3560 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3562 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3563 if (errno == ENOMEM) {
3566 count = zc.zc_nvlist_dst_size;
3567 dst = zfs_alloc(zhp->zpool_hdl, count *
3568 sizeof (zbookmark_phys_t));
3571 zc.zc_nvlist_dst = (uintptr_t)dst;
3581 * Sort the resulting bookmarks. This is a little confusing due to the
3582 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3583 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3584 * _not_ copied as part of the process. So we point the start of our
3585 * array appropriate and decrement the total number of elements.
3587 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3588 zc.zc_nvlist_dst_size;
3589 count -= zc.zc_nvlist_dst_size;
3591 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_compare);
3593 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3596 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3598 for (i = 0; i < count; i++) {
3601 /* ignoring zb_blkid and zb_level for now */
3602 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3603 zb[i-1].zb_object == zb[i].zb_object)
3606 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3608 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3609 zb[i].zb_objset) != 0) {
3613 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3614 zb[i].zb_object) != 0) {
3618 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3625 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3629 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3630 return (no_memory(zhp->zpool_hdl));
3634 * Upgrade a ZFS pool to the latest on-disk version.
3637 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3639 zfs_cmd_t zc = { 0 };
3640 libzfs_handle_t *hdl = zhp->zpool_hdl;
3642 (void) strcpy(zc.zc_name, zhp->zpool_name);
3643 zc.zc_cookie = new_version;
3645 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3646 return (zpool_standard_error_fmt(hdl, errno,
3647 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3653 zfs_save_arguments(int argc, char **argv, char *string, int len)
3655 (void) strlcpy(string, basename(argv[0]), len);
3656 for (int i = 1; i < argc; i++) {
3657 (void) strlcat(string, " ", len);
3658 (void) strlcat(string, argv[i], len);
3663 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3665 zfs_cmd_t zc = { 0 };
3669 args = fnvlist_alloc();
3670 fnvlist_add_string(args, "message", message);
3671 err = zcmd_write_src_nvlist(hdl, &zc, args);
3673 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3675 zcmd_free_nvlists(&zc);
3680 * Perform ioctl to get some command history of a pool.
3682 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3683 * logical offset of the history buffer to start reading from.
3685 * Upon return, 'off' is the next logical offset to read from and
3686 * 'len' is the actual amount of bytes read into 'buf'.
3689 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3691 zfs_cmd_t zc = { 0 };
3692 libzfs_handle_t *hdl = zhp->zpool_hdl;
3694 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3696 zc.zc_history = (uint64_t)(uintptr_t)buf;
3697 zc.zc_history_len = *len;
3698 zc.zc_history_offset = *off;
3700 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3703 return (zfs_error_fmt(hdl, EZFS_PERM,
3704 dgettext(TEXT_DOMAIN,
3705 "cannot show history for pool '%s'"),
3708 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3709 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3710 "'%s'"), zhp->zpool_name));
3712 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3713 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3714 "'%s', pool must be upgraded"), zhp->zpool_name));
3716 return (zpool_standard_error_fmt(hdl, errno,
3717 dgettext(TEXT_DOMAIN,
3718 "cannot get history for '%s'"), zhp->zpool_name));
3722 *len = zc.zc_history_len;
3723 *off = zc.zc_history_offset;
3729 * Process the buffer of nvlists, unpacking and storing each nvlist record
3730 * into 'records'. 'leftover' is set to the number of bytes that weren't
3731 * processed as there wasn't a complete record.
3734 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3735 nvlist_t ***records, uint_t *numrecords)
3741 while (bytes_read > sizeof (reclen)) {
3743 /* get length of packed record (stored as little endian) */
3744 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3745 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3747 if (bytes_read < sizeof (reclen) + reclen)
3751 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3753 bytes_read -= sizeof (reclen) + reclen;
3754 buf += sizeof (reclen) + reclen;
3756 /* add record to nvlist array */
3758 if (ISP2(*numrecords + 1)) {
3759 *records = realloc(*records,
3760 *numrecords * 2 * sizeof (nvlist_t *));
3762 (*records)[*numrecords - 1] = nv;
3765 *leftover = bytes_read;
3769 /* from spa_history.c: spa_history_create_obj() */
3770 #define HIS_BUF_LEN_DEF (128 << 10)
3771 #define HIS_BUF_LEN_MAX (1 << 30)
3774 * Retrieve the command history of a pool.
3777 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3780 uint64_t bufsize = HIS_BUF_LEN_DEF;
3782 nvlist_t **records = NULL;
3783 uint_t numrecords = 0;
3786 if ((buf = malloc(bufsize)) == NULL)
3789 uint64_t bytes_read = bufsize;
3792 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3795 /* if nothing else was read in, we're at EOF, just return */
3796 if (bytes_read == 0)
3799 if ((err = zpool_history_unpack(buf, bytes_read,
3800 &leftover, &records, &numrecords)) != 0)
3805 * If the history block is too big, double the buffer
3806 * size and try again.
3808 if (leftover == bytes_read) {
3813 if ((bufsize >= HIS_BUF_LEN_MAX) ||
3814 ((buf = malloc(bufsize)) == NULL)) {
3825 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3826 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3827 records, numrecords) == 0);
3829 for (i = 0; i < numrecords; i++)
3830 nvlist_free(records[i]);
3837 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3838 char *pathname, size_t len)
3840 zfs_cmd_t zc = { 0 };
3841 boolean_t mounted = B_FALSE;
3842 char *mntpnt = NULL;
3843 char dsname[MAXNAMELEN];
3846 /* special case for the MOS */
3847 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3851 /* get the dataset's name */
3852 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3854 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3855 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3856 /* just write out a path of two object numbers */
3857 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3861 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3863 /* find out if the dataset is mounted */
3864 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3866 /* get the corrupted object's path */
3867 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3869 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3872 (void) snprintf(pathname, len, "%s%s", mntpnt,
3875 (void) snprintf(pathname, len, "%s:%s",
3876 dsname, zc.zc_value);
3879 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3886 * Read the EFI label from the config, if a label does not exist then
3887 * pass back the error to the caller. If the caller has passed a non-NULL
3888 * diskaddr argument then we set it to the starting address of the EFI
3892 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3896 char diskname[MAXPATHLEN];
3899 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3902 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3903 strrchr(path, '/'));
3904 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3905 struct dk_gpt *vtoc;
3907 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3909 *sb = vtoc->efi_parts[0].p_start;
3918 * determine where a partition starts on a disk in the current
3922 find_start_block(nvlist_t *config)
3926 diskaddr_t sb = MAXOFFSET_T;
3929 if (nvlist_lookup_nvlist_array(config,
3930 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3931 if (nvlist_lookup_uint64(config,
3932 ZPOOL_CONFIG_WHOLE_DISK,
3933 &wholedisk) != 0 || !wholedisk) {
3934 return (MAXOFFSET_T);
3936 if (read_efi_label(config, &sb) < 0)
3941 for (c = 0; c < children; c++) {
3942 sb = find_start_block(child[c]);
3943 if (sb != MAXOFFSET_T) {
3947 return (MAXOFFSET_T);
3952 * Label an individual disk. The name provided is the short name,
3953 * stripped of any leading /dev path.
3956 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name)
3959 char path[MAXPATHLEN];
3960 struct dk_gpt *vtoc;
3962 size_t resv = EFI_MIN_RESV_SIZE;
3963 uint64_t slice_size;
3964 diskaddr_t start_block;
3967 /* prepare an error message just in case */
3968 (void) snprintf(errbuf, sizeof (errbuf),
3969 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3974 if (zpool_is_bootable(zhp)) {
3975 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3976 "EFI labeled devices are not supported on root "
3978 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3981 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3982 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3984 if (zhp->zpool_start_block == 0)
3985 start_block = find_start_block(nvroot);
3987 start_block = zhp->zpool_start_block;
3988 zhp->zpool_start_block = start_block;
3991 start_block = NEW_START_BLOCK;
3994 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3997 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3999 * This shouldn't happen. We've long since verified that this
4000 * is a valid device.
4003 dgettext(TEXT_DOMAIN, "unable to open device"));
4004 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4007 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4009 * The only way this can fail is if we run out of memory, or we
4010 * were unable to read the disk's capacity
4012 if (errno == ENOMEM)
4013 (void) no_memory(hdl);
4016 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4017 "unable to read disk capacity"), name);
4019 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4022 slice_size = vtoc->efi_last_u_lba + 1;
4023 slice_size -= EFI_MIN_RESV_SIZE;
4024 if (start_block == MAXOFFSET_T)
4025 start_block = NEW_START_BLOCK;
4026 slice_size -= start_block;
4028 vtoc->efi_parts[0].p_start = start_block;
4029 vtoc->efi_parts[0].p_size = slice_size;
4032 * Why we use V_USR: V_BACKUP confuses users, and is considered
4033 * disposable by some EFI utilities (since EFI doesn't have a backup
4034 * slice). V_UNASSIGNED is supposed to be used only for zero size
4035 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4036 * etc. were all pretty specific. V_USR is as close to reality as we
4037 * can get, in the absence of V_OTHER.
4039 vtoc->efi_parts[0].p_tag = V_USR;
4040 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
4042 vtoc->efi_parts[8].p_start = slice_size + start_block;
4043 vtoc->efi_parts[8].p_size = resv;
4044 vtoc->efi_parts[8].p_tag = V_RESERVED;
4046 if (efi_write(fd, vtoc) != 0) {
4048 * Some block drivers (like pcata) may not support EFI
4049 * GPT labels. Print out a helpful error message dir-
4050 * ecting the user to manually label the disk and give
4056 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4057 "try using fdisk(1M) and then provide a specific slice"));
4058 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4068 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
4074 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
4075 if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
4076 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
4077 strcmp(type, VDEV_TYPE_MISSING) == 0) {
4078 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4079 "vdev type '%s' is not supported"), type);
4080 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
4083 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
4084 &child, &children) == 0) {
4085 for (c = 0; c < children; c++) {
4086 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
4094 * Check if this zvol is allowable for use as a dump device; zero if
4095 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
4097 * Allowable storage configurations include mirrors, all raidz variants, and
4098 * pools with log, cache, and spare devices. Pools which are backed by files or
4099 * have missing/hole vdevs are not suitable.
4102 zvol_check_dump_config(char *arg)
4104 zpool_handle_t *zhp = NULL;
4105 nvlist_t *config, *nvroot;
4109 libzfs_handle_t *hdl;
4111 char poolname[ZPOOL_MAXNAMELEN];
4112 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4115 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4119 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4120 "dump is not supported on device '%s'"), arg);
4122 if ((hdl = libzfs_init()) == NULL)
4124 libzfs_print_on_error(hdl, B_TRUE);
4126 volname = arg + pathlen;
4128 /* check the configuration of the pool */
4129 if ((p = strchr(volname, '/')) == NULL) {
4130 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4131 "malformed dataset name"));
4132 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4134 } else if (p - volname >= ZFS_MAXNAMELEN) {
4135 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4136 "dataset name is too long"));
4137 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4140 (void) strncpy(poolname, volname, p - volname);
4141 poolname[p - volname] = '\0';
4144 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4145 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4146 "could not open pool '%s'"), poolname);
4147 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4150 config = zpool_get_config(zhp, NULL);
4151 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4153 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4154 "could not obtain vdev configuration for '%s'"), poolname);
4155 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4159 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4160 &top, &toplevels) == 0);
4162 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {