4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright (c) 2018 Datto Inc.
28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29 * Copyright (c) 2017, Intel Corporation.
30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
42 #include <sys/efi_partition.h>
43 #include <sys/systeminfo.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/vdev_disk.h>
49 #include "zfs_namecheck.h"
51 #include "libzfs_impl.h"
52 #include "zfs_comutil.h"
53 #include "zfeature_common.h"
55 static boolean_t zpool_vdev_is_interior(const char *name);
57 typedef struct prop_flags {
58 int create:1; /* Validate property on creation */
59 int import:1; /* Validate property on import */
63 * ====================================================================
64 * zpool property functions
65 * ====================================================================
69 zpool_get_all_props(zpool_handle_t *zhp)
71 zfs_cmd_t zc = {"\0"};
72 libzfs_handle_t *hdl = zhp->zpool_hdl;
74 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
76 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
79 while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
80 if (errno == ENOMEM) {
81 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
82 zcmd_free_nvlists(&zc);
86 zcmd_free_nvlists(&zc);
91 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
92 zcmd_free_nvlists(&zc);
96 zcmd_free_nvlists(&zc);
102 zpool_props_refresh(zpool_handle_t *zhp)
106 old_props = zhp->zpool_props;
108 if (zpool_get_all_props(zhp) != 0)
111 nvlist_free(old_props);
116 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
122 zprop_source_t source;
124 nvl = zhp->zpool_props;
125 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
126 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
128 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
130 source = ZPROP_SRC_DEFAULT;
131 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
142 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
146 zprop_source_t source;
148 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
150 * zpool_get_all_props() has most likely failed because
151 * the pool is faulted, but if all we need is the top level
152 * vdev's guid then get it from the zhp config nvlist.
154 if ((prop == ZPOOL_PROP_GUID) &&
155 (nvlist_lookup_nvlist(zhp->zpool_config,
156 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
157 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
161 return (zpool_prop_default_numeric(prop));
164 nvl = zhp->zpool_props;
165 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
166 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
168 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
170 source = ZPROP_SRC_DEFAULT;
171 value = zpool_prop_default_numeric(prop);
181 * Map VDEV STATE to printed strings.
184 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
187 case VDEV_STATE_CLOSED:
188 case VDEV_STATE_OFFLINE:
189 return (gettext("OFFLINE"));
190 case VDEV_STATE_REMOVED:
191 return (gettext("REMOVED"));
192 case VDEV_STATE_CANT_OPEN:
193 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
194 return (gettext("FAULTED"));
195 else if (aux == VDEV_AUX_SPLIT_POOL)
196 return (gettext("SPLIT"));
198 return (gettext("UNAVAIL"));
199 case VDEV_STATE_FAULTED:
200 return (gettext("FAULTED"));
201 case VDEV_STATE_DEGRADED:
202 return (gettext("DEGRADED"));
203 case VDEV_STATE_HEALTHY:
204 return (gettext("ONLINE"));
210 return (gettext("UNKNOWN"));
214 * Map POOL STATE to printed strings.
217 zpool_pool_state_to_name(pool_state_t state)
222 case POOL_STATE_ACTIVE:
223 return (gettext("ACTIVE"));
224 case POOL_STATE_EXPORTED:
225 return (gettext("EXPORTED"));
226 case POOL_STATE_DESTROYED:
227 return (gettext("DESTROYED"));
228 case POOL_STATE_SPARE:
229 return (gettext("SPARE"));
230 case POOL_STATE_L2CACHE:
231 return (gettext("L2CACHE"));
232 case POOL_STATE_UNINITIALIZED:
233 return (gettext("UNINITIALIZED"));
234 case POOL_STATE_UNAVAIL:
235 return (gettext("UNAVAIL"));
236 case POOL_STATE_POTENTIALLY_ACTIVE:
237 return (gettext("POTENTIALLY_ACTIVE"));
240 return (gettext("UNKNOWN"));
244 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
248 zpool_get_state_str(zpool_handle_t *zhp)
250 zpool_errata_t errata;
251 zpool_status_t status;
257 status = zpool_get_status(zhp, NULL, &errata);
259 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
260 str = gettext("FAULTED");
261 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
262 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
263 str = gettext("SUSPENDED");
265 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
266 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
267 verify(nvlist_lookup_uint64_array(nvroot,
268 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
270 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
276 * Get a zpool property value for 'prop' and return the value in
277 * a pre-allocated buffer.
280 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
281 size_t len, zprop_source_t *srctype, boolean_t literal)
285 zprop_source_t src = ZPROP_SRC_NONE;
287 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
289 case ZPOOL_PROP_NAME:
290 (void) strlcpy(buf, zpool_get_name(zhp), len);
293 case ZPOOL_PROP_HEALTH:
294 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
297 case ZPOOL_PROP_GUID:
298 intval = zpool_get_prop_int(zhp, prop, &src);
299 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
302 case ZPOOL_PROP_ALTROOT:
303 case ZPOOL_PROP_CACHEFILE:
304 case ZPOOL_PROP_COMMENT:
305 if (zhp->zpool_props != NULL ||
306 zpool_get_all_props(zhp) == 0) {
308 zpool_get_prop_string(zhp, prop, &src),
314 (void) strlcpy(buf, "-", len);
323 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
324 prop != ZPOOL_PROP_NAME)
327 switch (zpool_prop_get_type(prop)) {
328 case PROP_TYPE_STRING:
329 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
333 case PROP_TYPE_NUMBER:
334 intval = zpool_get_prop_int(zhp, prop, &src);
337 case ZPOOL_PROP_SIZE:
338 case ZPOOL_PROP_ALLOCATED:
339 case ZPOOL_PROP_FREE:
340 case ZPOOL_PROP_FREEING:
341 case ZPOOL_PROP_LEAKED:
342 case ZPOOL_PROP_ASHIFT:
344 (void) snprintf(buf, len, "%llu",
345 (u_longlong_t)intval);
347 (void) zfs_nicenum(intval, buf, len);
350 case ZPOOL_PROP_EXPANDSZ:
351 case ZPOOL_PROP_CHECKPOINT:
353 (void) strlcpy(buf, "-", len);
354 } else if (literal) {
355 (void) snprintf(buf, len, "%llu",
356 (u_longlong_t)intval);
358 (void) zfs_nicebytes(intval, buf, len);
362 case ZPOOL_PROP_CAPACITY:
364 (void) snprintf(buf, len, "%llu",
365 (u_longlong_t)intval);
367 (void) snprintf(buf, len, "%llu%%",
368 (u_longlong_t)intval);
372 case ZPOOL_PROP_FRAGMENTATION:
373 if (intval == UINT64_MAX) {
374 (void) strlcpy(buf, "-", len);
375 } else if (literal) {
376 (void) snprintf(buf, len, "%llu",
377 (u_longlong_t)intval);
379 (void) snprintf(buf, len, "%llu%%",
380 (u_longlong_t)intval);
384 case ZPOOL_PROP_DEDUPRATIO:
386 (void) snprintf(buf, len, "%llu.%02llu",
387 (u_longlong_t)(intval / 100),
388 (u_longlong_t)(intval % 100));
390 (void) snprintf(buf, len, "%llu.%02llux",
391 (u_longlong_t)(intval / 100),
392 (u_longlong_t)(intval % 100));
395 case ZPOOL_PROP_HEALTH:
396 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
398 case ZPOOL_PROP_VERSION:
399 if (intval >= SPA_VERSION_FEATURES) {
400 (void) snprintf(buf, len, "-");
405 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
409 case PROP_TYPE_INDEX:
410 intval = zpool_get_prop_int(zhp, prop, &src);
411 if (zpool_prop_index_to_string(prop, intval, &strval)
414 (void) strlcpy(buf, strval, len);
428 * Check if the bootfs name has the same pool name as it is set to.
429 * Assuming bootfs is a valid dataset name.
432 bootfs_name_valid(const char *pool, const char *bootfs)
434 int len = strlen(pool);
435 if (bootfs[0] == '\0')
438 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
441 if (strncmp(pool, bootfs, len) == 0 &&
442 (bootfs[len] == '/' || bootfs[len] == '\0'))
449 zpool_is_bootable(zpool_handle_t *zhp)
451 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
453 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
454 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
455 sizeof (bootfs)) != 0);
460 * Given an nvlist of zpool properties to be set, validate that they are
461 * correct, and parse any numeric properties (index, boolean, etc) if they are
462 * specified as strings.
465 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
466 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
474 struct stat64 statbuf;
477 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
478 (void) no_memory(hdl);
483 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
484 const char *propname = nvpair_name(elem);
486 prop = zpool_name_to_prop(propname);
487 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
489 char *fname = strchr(propname, '@') + 1;
491 err = zfeature_lookup_name(fname, NULL);
493 ASSERT3U(err, ==, ENOENT);
494 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
495 "invalid feature '%s'"), fname);
496 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
500 if (nvpair_type(elem) != DATA_TYPE_STRING) {
501 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
502 "'%s' must be a string"), propname);
503 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
507 (void) nvpair_value_string(elem, &strval);
508 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
509 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
511 "property '%s' can only be set to "
512 "'enabled' or 'disabled'"), propname);
513 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
518 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
519 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
520 "property '%s' can only be set to "
521 "'disabled' at creation time"), propname);
522 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
526 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
527 (void) no_memory(hdl);
534 * Make sure this property is valid and applies to this type.
536 if (prop == ZPOOL_PROP_INVAL) {
537 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
538 "invalid property '%s'"), propname);
539 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
543 if (zpool_prop_readonly(prop)) {
544 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
545 "is readonly"), propname);
546 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
550 if (!flags.create && zpool_prop_setonce(prop)) {
551 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
552 "property '%s' can only be set at "
553 "creation time"), propname);
554 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
558 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
559 &strval, &intval, errbuf) != 0)
563 * Perform additional checking for specific properties.
566 case ZPOOL_PROP_VERSION:
567 if (intval < version ||
568 !SPA_VERSION_IS_SUPPORTED(intval)) {
569 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
570 "property '%s' number %d is invalid."),
572 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
577 case ZPOOL_PROP_ASHIFT:
579 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
581 "property '%s' number %d is invalid, only "
582 "values between %" PRId32 " and "
583 "%" PRId32 " are allowed."),
584 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
585 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
590 case ZPOOL_PROP_BOOTFS:
591 if (flags.create || flags.import) {
592 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
593 "property '%s' cannot be set at creation "
594 "or import time"), propname);
595 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
599 if (version < SPA_VERSION_BOOTFS) {
600 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
601 "pool must be upgraded to support "
602 "'%s' property"), propname);
603 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
608 * bootfs property value has to be a dataset name and
609 * the dataset has to be in the same pool as it sets to.
611 if (!bootfs_name_valid(poolname, strval)) {
612 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
613 "is an invalid name"), strval);
614 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
618 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
620 "could not open pool '%s'"), poolname);
621 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
627 case ZPOOL_PROP_ALTROOT:
628 if (!flags.create && !flags.import) {
629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
630 "property '%s' can only be set during pool "
631 "creation or import"), propname);
632 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
636 if (strval[0] != '/') {
637 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
638 "bad alternate root '%s'"), strval);
639 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
644 case ZPOOL_PROP_CACHEFILE:
645 if (strval[0] == '\0')
648 if (strcmp(strval, "none") == 0)
651 if (strval[0] != '/') {
652 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
653 "property '%s' must be empty, an "
654 "absolute path, or 'none'"), propname);
655 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
659 slash = strrchr(strval, '/');
661 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
662 strcmp(slash, "/..") == 0) {
663 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
664 "'%s' is not a valid file"), strval);
665 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
671 if (strval[0] != '\0' &&
672 (stat64(strval, &statbuf) != 0 ||
673 !S_ISDIR(statbuf.st_mode))) {
674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
675 "'%s' is not a valid directory"),
677 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
684 case ZPOOL_PROP_COMMENT:
685 for (check = strval; *check != '\0'; check++) {
686 if (!isprint(*check)) {
688 dgettext(TEXT_DOMAIN,
689 "comment may only have printable "
691 (void) zfs_error(hdl, EZFS_BADPROP,
696 if (strlen(strval) > ZPROP_MAX_COMMENT) {
697 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
698 "comment must not exceed %d characters"),
700 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
704 case ZPOOL_PROP_READONLY:
706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
707 "property '%s' can only be set at "
708 "import time"), propname);
709 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
713 case ZPOOL_PROP_MULTIHOST:
714 if (get_system_hostid() == 0) {
715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
716 "requires a non-zero system hostid"));
717 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
721 case ZPOOL_PROP_DEDUPDITTO:
722 printf("Note: property '%s' no longer has "
723 "any effect\n", propname);
733 nvlist_free(retprops);
738 * Set zpool property : propname=propval.
741 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
743 zfs_cmd_t zc = {"\0"};
746 nvlist_t *nvl = NULL;
749 prop_flags_t flags = { 0 };
751 (void) snprintf(errbuf, sizeof (errbuf),
752 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
755 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
756 return (no_memory(zhp->zpool_hdl));
758 if (nvlist_add_string(nvl, propname, propval) != 0) {
760 return (no_memory(zhp->zpool_hdl));
763 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
764 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
765 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
774 * Execute the corresponding ioctl() to set this property.
776 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
778 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
783 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
785 zcmd_free_nvlists(&zc);
789 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
791 (void) zpool_props_refresh(zhp);
797 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
799 libzfs_handle_t *hdl = zhp->zpool_hdl;
801 char buf[ZFS_MAXPROPLEN];
802 nvlist_t *features = NULL;
805 boolean_t firstexpand = (NULL == *plp);
808 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
812 while (*last != NULL)
813 last = &(*last)->pl_next;
816 features = zpool_get_features(zhp);
818 if ((*plp)->pl_all && firstexpand) {
819 for (i = 0; i < SPA_FEATURES; i++) {
820 zprop_list_t *entry = zfs_alloc(hdl,
821 sizeof (zprop_list_t));
822 entry->pl_prop = ZPROP_INVAL;
823 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
824 spa_feature_table[i].fi_uname);
825 entry->pl_width = strlen(entry->pl_user_prop);
826 entry->pl_all = B_TRUE;
829 last = &entry->pl_next;
833 /* add any unsupported features */
834 for (nvp = nvlist_next_nvpair(features, NULL);
835 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
840 if (zfeature_is_supported(nvpair_name(nvp)))
843 propname = zfs_asprintf(hdl, "unsupported@%s",
847 * Before adding the property to the list make sure that no
848 * other pool already added the same property.
852 while (entry != NULL) {
853 if (entry->pl_user_prop != NULL &&
854 strcmp(propname, entry->pl_user_prop) == 0) {
858 entry = entry->pl_next;
865 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
866 entry->pl_prop = ZPROP_INVAL;
867 entry->pl_user_prop = propname;
868 entry->pl_width = strlen(entry->pl_user_prop);
869 entry->pl_all = B_TRUE;
872 last = &entry->pl_next;
875 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
880 if (entry->pl_prop != ZPROP_INVAL &&
881 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
882 NULL, B_FALSE) == 0) {
883 if (strlen(buf) > entry->pl_width)
884 entry->pl_width = strlen(buf);
892 * Get the state for the given feature on the given ZFS pool.
895 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
899 boolean_t found = B_FALSE;
900 nvlist_t *features = zpool_get_features(zhp);
902 const char *feature = strchr(propname, '@') + 1;
904 supported = zpool_prop_feature(propname);
905 ASSERT(supported || zpool_prop_unsupported(propname));
908 * Convert from feature name to feature guid. This conversion is
909 * unnecessary for unsupported@... properties because they already
916 ret = zfeature_lookup_name(feature, &fid);
918 (void) strlcpy(buf, "-", len);
921 feature = spa_feature_table[fid].fi_guid;
924 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
929 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
932 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
934 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
939 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
941 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
944 (void) strlcpy(buf, "-", len);
953 * Validate the given pool name, optionally putting an extended error message in
957 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
963 ret = pool_namecheck(pool, &why, &what);
966 * The rules for reserved pool names were extended at a later point.
967 * But we need to support users with existing pools that may now be
968 * invalid. So we only check for this expanded set of names during a
969 * create (or import), and only in userland.
971 if (ret == 0 && !isopen &&
972 (strncmp(pool, "mirror", 6) == 0 ||
973 strncmp(pool, "raidz", 5) == 0 ||
974 strncmp(pool, "spare", 5) == 0 ||
975 strcmp(pool, "log") == 0)) {
978 dgettext(TEXT_DOMAIN, "name is reserved"));
986 case NAME_ERR_TOOLONG:
988 dgettext(TEXT_DOMAIN, "name is too long"));
991 case NAME_ERR_INVALCHAR:
993 dgettext(TEXT_DOMAIN, "invalid character "
994 "'%c' in pool name"), what);
997 case NAME_ERR_NOLETTER:
998 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
999 "name must begin with a letter"));
1002 case NAME_ERR_RESERVED:
1003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004 "name is reserved"));
1007 case NAME_ERR_DISKLIKE:
1008 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1009 "pool name is reserved"));
1012 case NAME_ERR_LEADING_SLASH:
1013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1014 "leading slash in name"));
1017 case NAME_ERR_EMPTY_COMPONENT:
1018 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1019 "empty component in name"));
1022 case NAME_ERR_TRAILING_SLASH:
1023 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1024 "trailing slash in name"));
1027 case NAME_ERR_MULTIPLE_DELIMITERS:
1028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1029 "multiple '@' and/or '#' delimiters in "
1033 case NAME_ERR_NO_AT:
1034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1035 "permission set is missing '@'"));
1039 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1040 "(%d) not defined"), why);
1051 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1055 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1057 zpool_handle_t *zhp;
1061 * Make sure the pool name is valid.
1063 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1064 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1065 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1070 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1073 zhp->zpool_hdl = hdl;
1074 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1076 if (zpool_refresh_stats(zhp, &missing) != 0) {
1082 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1083 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1084 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1093 * Like the above, but silent on error. Used when iterating over pools (because
1094 * the configuration cache may be out of date).
1097 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1099 zpool_handle_t *zhp;
1102 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1105 zhp->zpool_hdl = hdl;
1106 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1108 if (zpool_refresh_stats(zhp, &missing) != 0) {
1124 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1128 zpool_open(libzfs_handle_t *hdl, const char *pool)
1130 zpool_handle_t *zhp;
1132 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1135 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1136 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1137 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1146 * Close the handle. Simply frees the memory associated with the handle.
1149 zpool_close(zpool_handle_t *zhp)
1151 nvlist_free(zhp->zpool_config);
1152 nvlist_free(zhp->zpool_old_config);
1153 nvlist_free(zhp->zpool_props);
1158 * Return the name of the pool.
1161 zpool_get_name(zpool_handle_t *zhp)
1163 return (zhp->zpool_name);
1168 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1171 zpool_get_state(zpool_handle_t *zhp)
1173 return (zhp->zpool_state);
1177 * Check if vdev list contains a special vdev
1180 zpool_has_special_vdev(nvlist_t *nvroot)
1185 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
1187 for (uint_t c = 0; c < children; c++) {
1190 if (nvlist_lookup_string(child[c],
1191 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
1192 strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
1201 * Create the named pool, using the provided vdev list. It is assumed
1202 * that the consumer has already validated the contents of the nvlist, so we
1203 * don't have to worry about error semantics.
1206 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1207 nvlist_t *props, nvlist_t *fsprops)
1209 zfs_cmd_t zc = {"\0"};
1210 nvlist_t *zc_fsprops = NULL;
1211 nvlist_t *zc_props = NULL;
1212 nvlist_t *hidden_args = NULL;
1213 uint8_t *wkeydata = NULL;
1218 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1219 "cannot create '%s'"), pool);
1221 if (!zpool_name_valid(hdl, B_FALSE, pool))
1222 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1224 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1228 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1230 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1231 SPA_VERSION_1, flags, msg)) == NULL) {
1240 zoned = ((nvlist_lookup_string(fsprops,
1241 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1242 strcmp(zonestr, "on") == 0);
1244 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1245 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
1249 if (nvlist_exists(zc_fsprops,
1250 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
1251 !zpool_has_special_vdev(nvroot)) {
1252 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1253 "%s property requires a special vdev"),
1254 zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
1255 (void) zfs_error(hdl, EZFS_BADPROP, msg);
1260 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1263 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1264 &wkeydata, &wkeylen) != 0) {
1265 zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1268 if (nvlist_add_nvlist(zc_props,
1269 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1272 if (wkeydata != NULL) {
1273 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1276 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1277 wkeydata, wkeylen) != 0)
1280 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1286 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1289 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1291 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1293 zcmd_free_nvlists(&zc);
1294 nvlist_free(zc_props);
1295 nvlist_free(zc_fsprops);
1296 nvlist_free(hidden_args);
1297 if (wkeydata != NULL)
1303 * This can happen if the user has specified the same
1304 * device multiple times. We can't reliably detect this
1305 * until we try to add it and see we already have a
1306 * label. This can also happen under if the device is
1307 * part of an active md or lvm device.
1309 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1310 "one or more vdevs refer to the same device, or "
1311 "one of\nthe devices is part of an active md or "
1313 return (zfs_error(hdl, EZFS_BADDEV, msg));
1317 * This happens if the record size is smaller or larger
1318 * than the allowed size range, or not a power of 2.
1320 * NOTE: although zfs_valid_proplist is called earlier,
1321 * this case may have slipped through since the
1322 * pool does not exist yet and it is therefore
1323 * impossible to read properties e.g. max blocksize
1326 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1327 "record size invalid"));
1328 return (zfs_error(hdl, EZFS_BADPROP, msg));
1332 * This occurs when one of the devices is below
1333 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1334 * device was the problem device since there's no
1335 * reliable way to determine device size from userland.
1340 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1343 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1344 "one or more devices is less than the "
1345 "minimum size (%s)"), buf);
1347 return (zfs_error(hdl, EZFS_BADDEV, msg));
1350 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1351 "one or more devices is out of space"));
1352 return (zfs_error(hdl, EZFS_BADDEV, msg));
1355 return (zpool_standard_error(hdl, errno, msg));
1360 zcmd_free_nvlists(&zc);
1361 nvlist_free(zc_props);
1362 nvlist_free(zc_fsprops);
1363 nvlist_free(hidden_args);
1364 if (wkeydata != NULL)
1370 * Destroy the given pool. It is up to the caller to ensure that there are no
1371 * datasets left in the pool.
1374 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1376 zfs_cmd_t zc = {"\0"};
1377 zfs_handle_t *zfp = NULL;
1378 libzfs_handle_t *hdl = zhp->zpool_hdl;
1381 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1382 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1385 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1386 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1388 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1389 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1390 "cannot destroy '%s'"), zhp->zpool_name);
1392 if (errno == EROFS) {
1393 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1394 "one or more devices is read only"));
1395 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1397 (void) zpool_standard_error(hdl, errno, msg);
1406 remove_mountpoint(zfp);
1414 * Create a checkpoint in the given pool.
1417 zpool_checkpoint(zpool_handle_t *zhp)
1419 libzfs_handle_t *hdl = zhp->zpool_hdl;
1423 error = lzc_pool_checkpoint(zhp->zpool_name);
1425 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1426 "cannot checkpoint '%s'"), zhp->zpool_name);
1427 (void) zpool_standard_error(hdl, error, msg);
1435 * Discard the checkpoint from the given pool.
1438 zpool_discard_checkpoint(zpool_handle_t *zhp)
1440 libzfs_handle_t *hdl = zhp->zpool_hdl;
1444 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1446 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1447 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1448 (void) zpool_standard_error(hdl, error, msg);
1456 * Add the given vdevs to the pool. The caller must have already performed the
1457 * necessary verification to ensure that the vdev specification is well-formed.
1460 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1462 zfs_cmd_t zc = {"\0"};
1464 libzfs_handle_t *hdl = zhp->zpool_hdl;
1466 nvlist_t **spares, **l2cache;
1467 uint_t nspares, nl2cache;
1469 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1470 "cannot add to '%s'"), zhp->zpool_name);
1472 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1473 SPA_VERSION_SPARES &&
1474 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1475 &spares, &nspares) == 0) {
1476 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1477 "upgraded to add hot spares"));
1478 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1481 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1482 SPA_VERSION_L2CACHE &&
1483 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1484 &l2cache, &nl2cache) == 0) {
1485 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1486 "upgraded to add cache devices"));
1487 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1490 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1492 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1494 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1498 * This can happen if the user has specified the same
1499 * device multiple times. We can't reliably detect this
1500 * until we try to add it and see we already have a
1503 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1504 "one or more vdevs refer to the same device"));
1505 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1509 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1510 "invalid config; a pool with removing/removed "
1511 "vdevs does not support adding raidz vdevs"));
1512 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1517 * This occurs when one of the devices is below
1518 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1519 * device was the problem device since there's no
1520 * reliable way to determine device size from userland.
1525 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1529 "device is less than the minimum "
1532 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1536 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1537 "pool must be upgraded to add these vdevs"));
1538 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1542 (void) zpool_standard_error(hdl, errno, msg);
1550 zcmd_free_nvlists(&zc);
1556 * Exports the pool from the system. The caller must ensure that there are no
1557 * mounted datasets in the pool.
1560 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1561 const char *log_str)
1563 zfs_cmd_t zc = {"\0"};
1566 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1567 "cannot export '%s'"), zhp->zpool_name);
1569 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1570 zc.zc_cookie = force;
1571 zc.zc_guid = hardforce;
1572 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1574 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1577 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1578 "use '-f' to override the following errors:\n"
1579 "'%s' has an active shared spare which could be"
1580 " used by other pools once '%s' is exported."),
1581 zhp->zpool_name, zhp->zpool_name);
1582 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1585 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1594 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1596 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1600 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1602 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1606 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1609 nvlist_t *nv = NULL;
1615 if (!hdl->libzfs_printerr || config == NULL)
1618 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1619 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1623 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1625 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1627 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1628 strftime(timestr, 128, "%c", &t) != 0) {
1630 (void) printf(dgettext(TEXT_DOMAIN,
1631 "Would be able to return %s "
1632 "to its state as of %s.\n"),
1635 (void) printf(dgettext(TEXT_DOMAIN,
1636 "Pool %s returned to its state as of %s.\n"),
1640 (void) printf(dgettext(TEXT_DOMAIN,
1641 "%s approximately %lld "),
1642 dryrun ? "Would discard" : "Discarded",
1643 ((longlong_t)loss + 30) / 60);
1644 (void) printf(dgettext(TEXT_DOMAIN,
1645 "minutes of transactions.\n"));
1646 } else if (loss > 0) {
1647 (void) printf(dgettext(TEXT_DOMAIN,
1648 "%s approximately %lld "),
1649 dryrun ? "Would discard" : "Discarded",
1651 (void) printf(dgettext(TEXT_DOMAIN,
1652 "seconds of transactions.\n"));
1658 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1661 nvlist_t *nv = NULL;
1663 uint64_t edata = UINT64_MAX;
1668 if (!hdl->libzfs_printerr)
1672 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1674 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1676 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1677 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1678 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1679 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1682 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1683 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1686 (void) printf(dgettext(TEXT_DOMAIN,
1687 "Recovery is possible, but will result in some data loss.\n"));
1689 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1690 strftime(timestr, 128, "%c", &t) != 0) {
1691 (void) printf(dgettext(TEXT_DOMAIN,
1692 "\tReturning the pool to its state as of %s\n"
1693 "\tshould correct the problem. "),
1696 (void) printf(dgettext(TEXT_DOMAIN,
1697 "\tReverting the pool to an earlier state "
1698 "should correct the problem.\n\t"));
1702 (void) printf(dgettext(TEXT_DOMAIN,
1703 "Approximately %lld minutes of data\n"
1704 "\tmust be discarded, irreversibly. "),
1705 ((longlong_t)loss + 30) / 60);
1706 } else if (loss > 0) {
1707 (void) printf(dgettext(TEXT_DOMAIN,
1708 "Approximately %lld seconds of data\n"
1709 "\tmust be discarded, irreversibly. "),
1712 if (edata != 0 && edata != UINT64_MAX) {
1714 (void) printf(dgettext(TEXT_DOMAIN,
1715 "After rewind, at least\n"
1716 "\tone persistent user-data error will remain. "));
1718 (void) printf(dgettext(TEXT_DOMAIN,
1719 "After rewind, several\n"
1720 "\tpersistent user-data errors will remain. "));
1723 (void) printf(dgettext(TEXT_DOMAIN,
1724 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1725 reason >= 0 ? "clear" : "import", name);
1727 (void) printf(dgettext(TEXT_DOMAIN,
1728 "A scrub of the pool\n"
1729 "\tis strongly recommended after recovery.\n"));
1733 (void) printf(dgettext(TEXT_DOMAIN,
1734 "Destroy and re-create the pool from\n\ta backup source.\n"));
1738 * zpool_import() is a contracted interface. Should be kept the same
1741 * Applications should use zpool_import_props() to import a pool with
1742 * new properties value to be set.
1745 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1748 nvlist_t *props = NULL;
1751 if (altroot != NULL) {
1752 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1753 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1754 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1758 if (nvlist_add_string(props,
1759 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1760 nvlist_add_string(props,
1761 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1763 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1764 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1769 ret = zpool_import_props(hdl, config, newname, props,
1776 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1782 uint64_t is_log = 0;
1784 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1788 (void) printf("\t%*s%s%s\n", indent, "", name,
1789 is_log ? " [log]" : "");
1791 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1792 &child, &children) != 0)
1795 for (c = 0; c < children; c++) {
1796 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1797 print_vdev_tree(hdl, vname, child[c], indent + 2);
1803 zpool_print_unsup_feat(nvlist_t *config)
1805 nvlist_t *nvinfo, *unsup_feat;
1808 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1810 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1813 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1814 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1817 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1818 verify(nvpair_value_string(nvp, &desc) == 0);
1820 if (strlen(desc) > 0)
1821 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1823 (void) printf("\t%s\n", nvpair_name(nvp));
1828 * Import the given pool using the known configuration and a list of
1829 * properties to be set. The configuration should have come from
1830 * zpool_find_import(). The 'newname' parameters control whether the pool
1831 * is imported with a different name.
1834 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1835 nvlist_t *props, int flags)
1837 zfs_cmd_t zc = {"\0"};
1838 zpool_load_policy_t policy;
1839 nvlist_t *nv = NULL;
1840 nvlist_t *nvinfo = NULL;
1841 nvlist_t *missing = NULL;
1848 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1851 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1852 "cannot import pool '%s'"), origname);
1854 if (newname != NULL) {
1855 if (!zpool_name_valid(hdl, B_FALSE, newname))
1856 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1857 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1859 thename = (char *)newname;
1864 if (props != NULL) {
1866 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1868 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1871 if ((props = zpool_valid_proplist(hdl, origname,
1872 props, version, flags, errbuf)) == NULL)
1874 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1881 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1883 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1886 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1887 zcmd_free_nvlists(&zc);
1890 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1891 zcmd_free_nvlists(&zc);
1895 zc.zc_cookie = flags;
1896 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1898 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1899 zcmd_free_nvlists(&zc);
1906 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1908 zcmd_free_nvlists(&zc);
1910 zpool_get_load_policy(config, &policy);
1917 * Dry-run failed, but we print out what success
1918 * looks like if we found a best txg
1920 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
1921 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1927 if (newname == NULL)
1928 (void) snprintf(desc, sizeof (desc),
1929 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1932 (void) snprintf(desc, sizeof (desc),
1933 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1938 if (nv != NULL && nvlist_lookup_nvlist(nv,
1939 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1940 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1941 (void) printf(dgettext(TEXT_DOMAIN, "This "
1942 "pool uses the following feature(s) not "
1943 "supported by this system:\n"));
1944 zpool_print_unsup_feat(nv);
1945 if (nvlist_exists(nvinfo,
1946 ZPOOL_CONFIG_CAN_RDONLY)) {
1947 (void) printf(dgettext(TEXT_DOMAIN,
1948 "All unsupported features are only "
1949 "required for writing to the pool."
1950 "\nThe pool can be imported using "
1951 "'-o readonly=on'.\n"));
1955 * Unsupported version.
1957 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1961 if (nv != NULL && nvlist_lookup_nvlist(nv,
1962 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
1963 char *hostname = "<unknown>";
1964 uint64_t hostid = 0;
1965 mmp_state_t mmp_state;
1967 mmp_state = fnvlist_lookup_uint64(nvinfo,
1968 ZPOOL_CONFIG_MMP_STATE);
1970 if (nvlist_exists(nvinfo,
1971 ZPOOL_CONFIG_MMP_HOSTNAME))
1972 hostname = fnvlist_lookup_string(nvinfo,
1973 ZPOOL_CONFIG_MMP_HOSTNAME);
1975 if (nvlist_exists(nvinfo,
1976 ZPOOL_CONFIG_MMP_HOSTID))
1977 hostid = fnvlist_lookup_uint64(nvinfo,
1978 ZPOOL_CONFIG_MMP_HOSTID);
1980 if (mmp_state == MMP_STATE_ACTIVE) {
1981 (void) snprintf(aux, sizeof (aux),
1982 dgettext(TEXT_DOMAIN, "pool is imp"
1983 "orted on host '%s' (hostid=%lx).\n"
1984 "Export the pool on the other "
1985 "system, then run 'zpool import'."),
1986 hostname, (unsigned long) hostid);
1987 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
1988 (void) snprintf(aux, sizeof (aux),
1989 dgettext(TEXT_DOMAIN, "pool has "
1990 "the multihost property on and "
1991 "the\nsystem's hostid is not set. "
1992 "Set a unique system hostid with "
1993 "the zgenhostid(8) command.\n"));
1996 (void) zfs_error_aux(hdl, aux);
1998 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
2002 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
2006 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2007 "one or more devices is read only"));
2008 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2012 if (nv && nvlist_lookup_nvlist(nv,
2013 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2014 nvlist_lookup_nvlist(nvinfo,
2015 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2016 (void) printf(dgettext(TEXT_DOMAIN,
2017 "The devices below are missing or "
2018 "corrupted, use '-m' to import the pool "
2020 print_vdev_tree(hdl, NULL, missing, 2);
2021 (void) printf("\n");
2023 (void) zpool_standard_error(hdl, error, desc);
2027 (void) zpool_standard_error(hdl, error, desc);
2031 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2032 "one or more devices are already in use\n"));
2033 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2036 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2037 "new name of at least one dataset is longer than "
2038 "the maximum allowable length"));
2039 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2042 (void) zpool_standard_error(hdl, error, desc);
2043 zpool_explain_recover(hdl,
2044 newname ? origname : thename, -error, nv);
2051 zpool_handle_t *zhp;
2054 * This should never fail, but play it safe anyway.
2056 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2058 else if (zhp != NULL)
2060 if (policy.zlp_rewind &
2061 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2062 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2063 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2073 * Translate vdev names to guids. If a vdev_path is determined to be
2074 * unsuitable then a vd_errlist is allocated and the vdev path and errno
2078 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2079 nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2081 nvlist_t *errlist = NULL;
2084 for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2085 elem = nvlist_next_nvpair(vds, elem)) {
2086 boolean_t spare, cache;
2088 char *vd_path = nvpair_name(elem);
2089 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2092 if ((tgt == NULL) || cache || spare) {
2093 if (errlist == NULL) {
2094 errlist = fnvlist_alloc();
2098 uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2099 (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2100 fnvlist_add_int64(errlist, vd_path, err);
2104 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2105 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2107 char msg[MAXNAMELEN];
2108 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2109 fnvlist_add_string(guids_to_paths, msg, vd_path);
2113 verify(errlist != NULL);
2114 if (vd_errlist != NULL)
2115 *vd_errlist = errlist;
2117 fnvlist_free(errlist);
2124 xlate_init_err(int err)
2128 return (EZFS_NODEVICE);
2131 return (EZFS_BADDEV);
2133 return (EZFS_INITIALIZING);
2135 return (EZFS_NO_INITIALIZE);
2141 * Begin, suspend, or cancel the initialization (initializing of all free
2142 * blocks) for the given vdevs in the given pool.
2145 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2146 nvlist_t *vds, boolean_t wait)
2150 nvlist_t *vdev_guids = fnvlist_alloc();
2151 nvlist_t *guids_to_paths = fnvlist_alloc();
2152 nvlist_t *vd_errlist = NULL;
2156 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2157 guids_to_paths, &vd_errlist);
2160 verify(vd_errlist != NULL);
2164 err = lzc_initialize(zhp->zpool_name, cmd_type,
2165 vdev_guids, &errlist);
2168 if (errlist != NULL) {
2169 vd_errlist = fnvlist_lookup_nvlist(errlist,
2170 ZPOOL_INITIALIZE_VDEVS);
2173 (void) zpool_standard_error(zhp->zpool_hdl, err,
2174 dgettext(TEXT_DOMAIN, "operation failed"));
2179 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2180 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2182 uint64_t guid = fnvpair_value_uint64(elem);
2184 err = lzc_wait_tag(zhp->zpool_name,
2185 ZPOOL_WAIT_INITIALIZE, guid, NULL);
2187 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2188 err, dgettext(TEXT_DOMAIN, "error "
2189 "waiting for '%s' to initialize"),
2199 for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2200 elem = nvlist_next_nvpair(vd_errlist, elem)) {
2201 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2204 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2206 path = nvpair_name(elem);
2208 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2209 "cannot initialize '%s'", path);
2213 fnvlist_free(vdev_guids);
2214 fnvlist_free(guids_to_paths);
2216 if (vd_errlist != NULL)
2217 fnvlist_free(vd_errlist);
2219 return (err == 0 ? 0 : -1);
2223 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2226 return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2230 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2233 return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2237 xlate_trim_err(int err)
2241 return (EZFS_NODEVICE);
2244 return (EZFS_BADDEV);
2246 return (EZFS_TRIMMING);
2248 return (EZFS_NO_TRIM);
2250 return (EZFS_TRIM_NOTSUP);
2256 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2261 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2262 elem = nvlist_next_nvpair(vdev_guids, elem)) {
2264 uint64_t guid = fnvpair_value_uint64(elem);
2266 err = lzc_wait_tag(zhp->zpool_name,
2267 ZPOOL_WAIT_TRIM, guid, NULL);
2269 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2270 err, dgettext(TEXT_DOMAIN, "error "
2271 "waiting to trim '%s'"), nvpair_name(elem));
2280 * Check errlist and report any errors, omitting ones which should be
2281 * suppressed. Returns B_TRUE if any errors were reported.
2284 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
2285 nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
2288 boolean_t reported_errs = B_FALSE;
2290 int num_suppressed_errs = 0;
2292 for (elem = nvlist_next_nvpair(vds, NULL);
2293 elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
2297 for (elem = nvlist_next_nvpair(errlist, NULL);
2298 elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
2299 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2303 * If only the pool was specified, and it was not a secure
2304 * trim then suppress warnings for individual vdevs which
2305 * do not support trimming.
2307 if (vd_error == EZFS_TRIM_NOTSUP &&
2308 trim_flags->fullpool &&
2309 !trim_flags->secure) {
2310 num_suppressed_errs++;
2314 reported_errs = B_TRUE;
2315 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2317 path = nvpair_name(elem);
2319 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2320 "cannot trim '%s'", path);
2323 if (num_suppressed_errs == num_vds) {
2324 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2325 "no devices in pool support trim operations"));
2326 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
2327 dgettext(TEXT_DOMAIN, "cannot trim")));
2328 reported_errs = B_TRUE;
2331 return (reported_errs);
2335 * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2336 * the given vdevs in the given pool.
2339 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2340 trimflags_t *trim_flags)
2345 nvlist_t *vdev_guids = fnvlist_alloc();
2346 nvlist_t *guids_to_paths = fnvlist_alloc();
2347 nvlist_t *errlist = NULL;
2349 err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2350 guids_to_paths, &errlist);
2352 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
2357 err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2358 trim_flags->secure, vdev_guids, &errlist);
2360 nvlist_t *vd_errlist;
2361 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2362 ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
2363 if (check_trim_errs(zhp, trim_flags, guids_to_paths,
2371 (void) snprintf(msg, sizeof (msg),
2372 dgettext(TEXT_DOMAIN, "operation failed"));
2373 zpool_standard_error(zhp->zpool_hdl, err, msg);
2380 if (trim_flags->wait)
2381 retval = zpool_trim_wait(zhp, vdev_guids);
2384 if (errlist != NULL)
2385 fnvlist_free(errlist);
2386 fnvlist_free(vdev_guids);
2387 fnvlist_free(guids_to_paths);
2395 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
2397 zfs_cmd_t zc = {"\0"};
2400 libzfs_handle_t *hdl = zhp->zpool_hdl;
2402 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2403 zc.zc_cookie = func;
2406 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2411 /* ECANCELED on a scrub means we resumed a paused scrub */
2412 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
2413 cmd == POOL_SCRUB_NORMAL)
2416 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
2419 if (func == POOL_SCAN_SCRUB) {
2420 if (cmd == POOL_SCRUB_PAUSE) {
2421 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2422 "cannot pause scrubbing %s"), zc.zc_name);
2424 assert(cmd == POOL_SCRUB_NORMAL);
2425 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2426 "cannot scrub %s"), zc.zc_name);
2428 } else if (func == POOL_SCAN_RESILVER) {
2429 assert(cmd == POOL_SCRUB_NORMAL);
2430 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2431 "cannot restart resilver on %s"), zc.zc_name);
2432 } else if (func == POOL_SCAN_NONE) {
2433 (void) snprintf(msg, sizeof (msg),
2434 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2437 assert(!"unexpected result");
2442 pool_scan_stat_t *ps = NULL;
2445 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2446 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2447 (void) nvlist_lookup_uint64_array(nvroot,
2448 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2449 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
2450 if (cmd == POOL_SCRUB_PAUSE)
2451 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2453 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2455 return (zfs_error(hdl, EZFS_RESILVERING, msg));
2457 } else if (err == ENOENT) {
2458 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2459 } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2460 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg));
2462 return (zpool_standard_error(hdl, err, msg));
2467 * Find a vdev that matches the search criteria specified. We use the
2468 * the nvpair name to determine how we should look for the device.
2469 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2470 * spare; but FALSE if its an INUSE spare.
2473 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2474 boolean_t *l2cache, boolean_t *log)
2481 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2483 /* Nothing to look for */
2484 if (search == NULL || pair == NULL)
2487 /* Obtain the key we will use to search */
2488 srchkey = nvpair_name(pair);
2490 switch (nvpair_type(pair)) {
2491 case DATA_TYPE_UINT64:
2492 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2493 uint64_t srchval, theguid;
2495 verify(nvpair_value_uint64(pair, &srchval) == 0);
2496 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2498 if (theguid == srchval)
2503 case DATA_TYPE_STRING: {
2504 char *srchval, *val;
2506 verify(nvpair_value_string(pair, &srchval) == 0);
2507 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2511 * Search for the requested value. Special cases:
2513 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2514 * "-part1", or "p1". The suffix is hidden from the user,
2515 * but included in the string, so this matches around it.
2516 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2517 * is used to check all possible expanded paths.
2518 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2520 * Otherwise, all other searches are simple string compares.
2522 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2523 uint64_t wholedisk = 0;
2525 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2527 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2530 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2531 char *type, *idx, *end, *p;
2532 uint64_t id, vdev_id;
2535 * Determine our vdev type, keeping in mind
2536 * that the srchval is composed of a type and
2537 * vdev id pair (i.e. mirror-4).
2539 if ((type = strdup(srchval)) == NULL)
2542 if ((p = strrchr(type, '-')) == NULL) {
2550 * If the types don't match then keep looking.
2552 if (strncmp(val, type, strlen(val)) != 0) {
2557 verify(zpool_vdev_is_interior(type));
2558 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2562 vdev_id = strtoull(idx, &end, 10);
2569 * Now verify that we have the correct vdev id.
2578 if (strcmp(srchval, val) == 0)
2587 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2588 &child, &children) != 0)
2591 for (c = 0; c < children; c++) {
2592 if ((ret = vdev_to_nvlist_iter(child[c], search,
2593 avail_spare, l2cache, NULL)) != NULL) {
2595 * The 'is_log' value is only set for the toplevel
2596 * vdev, not the leaf vdevs. So we always lookup the
2597 * log device from the root of the vdev tree (where
2598 * 'log' is non-NULL).
2601 nvlist_lookup_uint64(child[c],
2602 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2610 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2611 &child, &children) == 0) {
2612 for (c = 0; c < children; c++) {
2613 if ((ret = vdev_to_nvlist_iter(child[c], search,
2614 avail_spare, l2cache, NULL)) != NULL) {
2615 *avail_spare = B_TRUE;
2621 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2622 &child, &children) == 0) {
2623 for (c = 0; c < children; c++) {
2624 if ((ret = vdev_to_nvlist_iter(child[c], search,
2625 avail_spare, l2cache, NULL)) != NULL) {
2636 * Given a physical path or guid, find the associated vdev.
2639 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2640 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2642 nvlist_t *search, *nvroot, *ret;
2646 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2648 guid = strtoull(ppath, &end, 0);
2649 if (guid != 0 && *end == '\0') {
2650 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2652 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH,
2656 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2659 *avail_spare = B_FALSE;
2663 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2664 nvlist_free(search);
2670 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2673 zpool_vdev_is_interior(const char *name)
2675 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2676 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2678 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2679 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2685 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2686 boolean_t *l2cache, boolean_t *log)
2689 nvlist_t *nvroot, *search, *ret;
2692 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2694 guid = strtoull(path, &end, 0);
2695 if (guid != 0 && *end == '\0') {
2696 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2697 } else if (zpool_vdev_is_interior(path)) {
2698 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2700 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2703 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2706 *avail_spare = B_FALSE;
2710 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2711 nvlist_free(search);
2717 vdev_is_online(nvlist_t *nv)
2721 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2722 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2723 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2730 * Helper function for zpool_get_physpaths().
2733 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2734 size_t *bytes_written)
2736 size_t bytes_left, pos, rsz;
2740 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2742 return (EZFS_NODEVICE);
2744 pos = *bytes_written;
2745 bytes_left = physpath_size - pos;
2746 format = (pos == 0) ? "%s" : " %s";
2748 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2749 *bytes_written += rsz;
2751 if (rsz >= bytes_left) {
2752 /* if physpath was not copied properly, clear it */
2753 if (bytes_left != 0) {
2756 return (EZFS_NOSPC);
2762 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2763 size_t *rsz, boolean_t is_spare)
2768 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2769 return (EZFS_INVALCONFIG);
2771 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2773 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2774 * For a spare vdev, we only want to boot from the active
2779 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2782 return (EZFS_INVALCONFIG);
2785 if (vdev_is_online(nv)) {
2786 if ((ret = vdev_get_one_physpath(nv, physpath,
2787 phypath_size, rsz)) != 0)
2790 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2791 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2792 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2793 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2798 if (nvlist_lookup_nvlist_array(nv,
2799 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2800 return (EZFS_INVALCONFIG);
2802 for (i = 0; i < count; i++) {
2803 ret = vdev_get_physpaths(child[i], physpath,
2804 phypath_size, rsz, is_spare);
2805 if (ret == EZFS_NOSPC)
2810 return (EZFS_POOL_INVALARG);
2814 * Get phys_path for a root pool config.
2815 * Return 0 on success; non-zero on failure.
2818 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2821 nvlist_t *vdev_root;
2828 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2830 return (EZFS_INVALCONFIG);
2832 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2833 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2834 &child, &count) != 0)
2835 return (EZFS_INVALCONFIG);
2838 * root pool can only have a single top-level vdev.
2840 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2841 return (EZFS_POOL_INVALARG);
2843 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2846 /* No online devices */
2848 return (EZFS_NODEVICE);
2854 * Get phys_path for a root pool
2855 * Return 0 on success; non-zero on failure.
2858 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2860 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2865 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2867 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2868 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2872 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2873 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2876 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2879 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2883 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2884 if (is_spare != NULL)
2886 if (is_l2cache != NULL)
2887 *is_l2cache = l2cache;
2894 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2896 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2898 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2902 * Bring the specified vdev online. The 'flags' parameter is a set of the
2903 * ZFS_ONLINE_* flags.
2906 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2907 vdev_state_t *newstate)
2909 zfs_cmd_t zc = {"\0"};
2913 boolean_t avail_spare, l2cache, islog;
2914 libzfs_handle_t *hdl = zhp->zpool_hdl;
2917 if (flags & ZFS_ONLINE_EXPAND) {
2918 (void) snprintf(msg, sizeof (msg),
2919 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2921 (void) snprintf(msg, sizeof (msg),
2922 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2925 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2926 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2928 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2930 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2933 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2935 if ((flags & ZFS_ONLINE_EXPAND ||
2936 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2937 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2938 uint64_t wholedisk = 0;
2940 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2944 * XXX - L2ARC 1.0 devices can't support expansion.
2947 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2948 "cannot expand cache devices"));
2949 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2953 const char *fullpath = path;
2954 char buf[MAXPATHLEN];
2956 if (path[0] != '/') {
2957 error = zfs_resolve_shortname(path, buf,
2960 return (zfs_error(hdl, EZFS_NODEVICE,
2966 error = zpool_relabel_disk(hdl, fullpath, msg);
2972 zc.zc_cookie = VDEV_STATE_ONLINE;
2975 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2976 if (errno == EINVAL) {
2977 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2978 "from this pool into a new one. Use '%s' "
2979 "instead"), "zpool detach");
2980 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2982 return (zpool_standard_error(hdl, errno, msg));
2985 *newstate = zc.zc_cookie;
2990 * Take the specified vdev offline
2993 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2995 zfs_cmd_t zc = {"\0"};
2998 boolean_t avail_spare, l2cache;
2999 libzfs_handle_t *hdl = zhp->zpool_hdl;
3001 (void) snprintf(msg, sizeof (msg),
3002 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
3004 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3005 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3007 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3009 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3012 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3014 zc.zc_cookie = VDEV_STATE_OFFLINE;
3015 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
3017 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3024 * There are no other replicas of this device.
3026 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3030 * The log device has unplayed logs
3032 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
3035 return (zpool_standard_error(hdl, errno, msg));
3040 * Mark the given vdev faulted.
3043 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3045 zfs_cmd_t zc = {"\0"};
3047 libzfs_handle_t *hdl = zhp->zpool_hdl;
3049 (void) snprintf(msg, sizeof (msg),
3050 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3052 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3054 zc.zc_cookie = VDEV_STATE_FAULTED;
3057 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3064 * There are no other replicas of this device.
3066 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3069 return (zpool_standard_error(hdl, errno, msg));
3075 * Mark the given vdev degraded.
3078 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3080 zfs_cmd_t zc = {"\0"};
3082 libzfs_handle_t *hdl = zhp->zpool_hdl;
3084 (void) snprintf(msg, sizeof (msg),
3085 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
3087 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3089 zc.zc_cookie = VDEV_STATE_DEGRADED;
3092 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3095 return (zpool_standard_error(hdl, errno, msg));
3099 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3103 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3109 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3111 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
3114 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
3115 children == 2 && child[which] == tgt)
3118 for (c = 0; c < children; c++)
3119 if (is_replacing_spare(child[c], tgt, which))
3127 * Attach new_disk (fully described by nvroot) to old_disk.
3128 * If 'replacing' is specified, the new disk will replace the old one.
3131 zpool_vdev_attach(zpool_handle_t *zhp,
3132 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
3134 zfs_cmd_t zc = {"\0"};
3138 boolean_t avail_spare, l2cache, islog;
3143 nvlist_t *config_root;
3144 libzfs_handle_t *hdl = zhp->zpool_hdl;
3145 boolean_t rootpool = zpool_is_bootable(zhp);
3148 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3149 "cannot replace %s with %s"), old_disk, new_disk);
3151 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3152 "cannot attach %s to %s"), new_disk, old_disk);
3154 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3155 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3157 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3160 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3163 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3165 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3166 zc.zc_cookie = replacing;
3168 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3169 &child, &children) != 0 || children != 1) {
3170 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3171 "new device must be a single disk"));
3172 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
3175 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3176 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
3178 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3182 * If the target is a hot spare that has been swapped in, we can only
3183 * replace it with another hot spare.
3186 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3187 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3188 NULL) == NULL || !avail_spare) &&
3189 is_replacing_spare(config_root, tgt, 1)) {
3190 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3191 "can only be replaced by another hot spare"));
3193 return (zfs_error(hdl, EZFS_BADTARGET, msg));
3198 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
3201 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3203 zcmd_free_nvlists(&zc);
3208 * XXX need a better way to prevent user from
3209 * booting up a half-baked vdev.
3211 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
3212 "sure to wait until resilver is done "
3213 "before rebooting.\n"));
3221 * Can't attach to or replace this type of vdev.
3224 uint64_t version = zpool_get_prop_int(zhp,
3225 ZPOOL_PROP_VERSION, NULL);
3228 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3229 "cannot replace a log with a spare"));
3230 else if (version >= SPA_VERSION_MULTI_REPLACE)
3231 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3232 "already in replacing/spare config; wait "
3233 "for completion or use 'zpool detach'"));
3235 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3236 "cannot replace a replacing device"));
3238 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3239 "can only attach to mirrors and top-level "
3242 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3247 * The new device must be a single disk.
3249 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3250 "new device must be a single disk"));
3251 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3255 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
3256 "or device removal is in progress"),
3258 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3263 * The new device is too small.
3265 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3266 "device is too small"));
3267 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3272 * The new device has a different optimal sector size.
3274 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3275 "new device has a different optimal sector size; use the "
3276 "option '-o ashift=N' to override the optimal size"));
3277 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3282 * The resulting top-level vdev spec won't fit in the label.
3284 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
3288 (void) zpool_standard_error(hdl, errno, msg);
3295 * Detach the specified device.
3298 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3300 zfs_cmd_t zc = {"\0"};
3303 boolean_t avail_spare, l2cache;
3304 libzfs_handle_t *hdl = zhp->zpool_hdl;
3306 (void) snprintf(msg, sizeof (msg),
3307 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3309 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3310 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3312 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3315 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3318 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3320 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3322 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3329 * Can't detach from this type of vdev.
3331 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3332 "applicable to mirror and replacing vdevs"));
3333 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3338 * There are no other replicas of this device.
3340 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
3344 (void) zpool_standard_error(hdl, errno, msg);
3351 * Find a mirror vdev in the source nvlist.
3353 * The mchild array contains a list of disks in one of the top-level mirrors
3354 * of the source pool. The schild array contains a list of disks that the
3355 * user specified on the command line. We loop over the mchild array to
3356 * see if any entry in the schild array matches.
3358 * If a disk in the mchild array is found in the schild array, we return
3359 * the index of that entry. Otherwise we return -1.
3362 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3363 nvlist_t **schild, uint_t schildren)
3367 for (mc = 0; mc < mchildren; mc++) {
3369 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3372 for (sc = 0; sc < schildren; sc++) {
3373 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3375 boolean_t result = (strcmp(mpath, spath) == 0);
3391 * Split a mirror pool. If newroot points to null, then a new nvlist
3392 * is generated and it is the responsibility of the caller to free it.
3395 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3396 nvlist_t *props, splitflags_t flags)
3398 zfs_cmd_t zc = {"\0"};
3400 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3401 nvlist_t **varray = NULL, *zc_props = NULL;
3402 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3403 libzfs_handle_t *hdl = zhp->zpool_hdl;
3404 uint64_t vers, readonly = B_FALSE;
3405 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3408 (void) snprintf(msg, sizeof (msg),
3409 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3411 if (!zpool_name_valid(hdl, B_FALSE, newname))
3412 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3414 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3415 (void) fprintf(stderr, gettext("Internal error: unable to "
3416 "retrieve pool configuration\n"));
3420 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3422 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3425 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3426 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3427 props, vers, flags, msg)) == NULL)
3429 (void) nvlist_lookup_uint64(zc_props,
3430 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3432 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3433 "property %s can only be set at import time"),
3434 zpool_prop_to_name(ZPOOL_PROP_READONLY));
3439 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3441 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3442 "Source pool is missing vdev tree"));
3443 nvlist_free(zc_props);
3447 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3450 if (*newroot == NULL ||
3451 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3452 &newchild, &newchildren) != 0)
3455 for (c = 0; c < children; c++) {
3456 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3458 nvlist_t **mchild, *vdev;
3463 * Unlike cache & spares, slogs are stored in the
3464 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3466 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3468 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3470 if (is_log || is_hole) {
3472 * Create a hole vdev and put it in the config.
3474 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3476 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3477 VDEV_TYPE_HOLE) != 0)
3479 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3484 varray[vcount++] = vdev;
3488 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3491 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
3493 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3496 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3498 "Source pool must be composed only of mirrors\n"));
3499 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3503 verify(nvlist_lookup_nvlist_array(child[c],
3504 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3506 /* find or add an entry for this top-level vdev */
3507 if (newchildren > 0 &&
3508 (entry = find_vdev_entry(zhp, mchild, mchildren,
3509 newchild, newchildren)) >= 0) {
3510 /* We found a disk that the user specified. */
3511 vdev = mchild[entry];
3514 /* User didn't specify a disk for this vdev. */
3515 vdev = mchild[mchildren - 1];
3518 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3522 /* did we find every disk the user specified? */
3523 if (found != newchildren) {
3524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3525 "include at most one disk from each mirror"));
3526 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3530 /* Prepare the nvlist for populating. */
3531 if (*newroot == NULL) {
3532 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3535 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3536 VDEV_TYPE_ROOT) != 0)
3539 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3542 /* Add all the children we found */
3543 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3544 lastlog == 0 ? vcount : lastlog) != 0)
3548 * If we're just doing a dry run, exit now with success.
3551 memory_err = B_FALSE;
3556 /* now build up the config list & call the ioctl */
3557 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3560 if (nvlist_add_nvlist(newconfig,
3561 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3562 nvlist_add_string(newconfig,
3563 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3564 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3568 * The new pool is automatically part of the namespace unless we
3569 * explicitly export it.
3572 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3573 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3574 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3575 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3577 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3580 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3581 retval = zpool_standard_error(hdl, errno, msg);
3586 memory_err = B_FALSE;
3589 if (varray != NULL) {
3592 for (v = 0; v < vcount; v++)
3593 nvlist_free(varray[v]);
3596 zcmd_free_nvlists(&zc);
3597 nvlist_free(zc_props);
3598 nvlist_free(newconfig);
3600 nvlist_free(*newroot);
3608 return (no_memory(hdl));
3614 * Remove the given device.
3617 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3619 zfs_cmd_t zc = {"\0"};
3622 boolean_t avail_spare, l2cache, islog;
3623 libzfs_handle_t *hdl = zhp->zpool_hdl;
3626 (void) snprintf(msg, sizeof (msg),
3627 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3629 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3630 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3632 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3634 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3635 if (islog && version < SPA_VERSION_HOLES) {
3636 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3637 "pool must be upgraded to support log removal"));
3638 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3641 if (!islog && !avail_spare && !l2cache && zpool_is_bootable(zhp)) {
3642 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3643 "root pool can not have removed devices, "
3644 "because GRUB does not understand them"));
3645 return (zfs_error(hdl, EINVAL, msg));
3648 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3650 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3656 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3657 "invalid config; all top-level vdevs must "
3658 "have the same sector size and not be raidz."));
3659 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3665 "Mount encrypted datasets to replay logs."));
3667 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3668 "Pool busy; removal may already be in progress"));
3670 (void) zfs_error(hdl, EZFS_BUSY, msg);
3675 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3676 "Mount encrypted datasets to replay logs."));
3677 (void) zfs_error(hdl, EZFS_BUSY, msg);
3679 (void) zpool_standard_error(hdl, errno, msg);
3684 (void) zpool_standard_error(hdl, errno, msg);
3690 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3694 libzfs_handle_t *hdl = zhp->zpool_hdl;
3696 (void) snprintf(msg, sizeof (msg),
3697 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3699 bzero(&zc, sizeof (zc));
3700 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3703 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3706 return (zpool_standard_error(hdl, errno, msg));
3710 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3715 boolean_t avail_spare, l2cache, islog;
3716 libzfs_handle_t *hdl = zhp->zpool_hdl;
3718 (void) snprintf(msg, sizeof (msg),
3719 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3722 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3724 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3726 if (avail_spare || l2cache || islog) {
3731 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3732 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3733 "indirect size not available"));
3734 return (zfs_error(hdl, EINVAL, msg));
3740 * Clear the errors for the pool, or the particular device if specified.
3743 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3745 zfs_cmd_t zc = {"\0"};
3748 zpool_load_policy_t policy;
3749 boolean_t avail_spare, l2cache;
3750 libzfs_handle_t *hdl = zhp->zpool_hdl;
3751 nvlist_t *nvi = NULL;
3755 (void) snprintf(msg, sizeof (msg),
3756 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3759 (void) snprintf(msg, sizeof (msg),
3760 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3763 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3765 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3766 &l2cache, NULL)) == NULL)
3767 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3770 * Don't allow error clearing for hot spares. Do allow
3771 * error clearing for l2cache devices.
3774 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3776 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3780 zpool_get_load_policy(rewindnvl, &policy);
3781 zc.zc_cookie = policy.zlp_rewind;
3783 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3786 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3789 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3791 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3792 zcmd_free_nvlists(&zc);
3797 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
3798 errno != EPERM && errno != EACCES)) {
3799 if (policy.zlp_rewind &
3800 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3801 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3802 zpool_rewind_exclaim(hdl, zc.zc_name,
3803 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
3807 zcmd_free_nvlists(&zc);
3811 zcmd_free_nvlists(&zc);
3812 return (zpool_standard_error(hdl, errno, msg));
3816 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3819 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3821 zfs_cmd_t zc = {"\0"};
3823 libzfs_handle_t *hdl = zhp->zpool_hdl;
3825 (void) snprintf(msg, sizeof (msg),
3826 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3827 (u_longlong_t)guid);
3829 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3831 zc.zc_cookie = ZPOOL_NO_REWIND;
3833 if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
3836 return (zpool_standard_error(hdl, errno, msg));
3840 * Change the GUID for a pool.
3843 zpool_reguid(zpool_handle_t *zhp)
3846 libzfs_handle_t *hdl = zhp->zpool_hdl;
3847 zfs_cmd_t zc = {"\0"};
3849 (void) snprintf(msg, sizeof (msg),
3850 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3852 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3853 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3856 return (zpool_standard_error(hdl, errno, msg));
3863 zpool_reopen_one(zpool_handle_t *zhp, void *data)
3865 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3866 const char *pool_name = zpool_get_name(zhp);
3867 boolean_t *scrub_restart = data;
3870 error = lzc_reopen(pool_name, *scrub_restart);
3872 return (zpool_standard_error_fmt(hdl, error,
3873 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
3879 /* call into libzfs_core to execute the sync IOCTL per pool */
3881 zpool_sync_one(zpool_handle_t *zhp, void *data)
3884 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3885 const char *pool_name = zpool_get_name(zhp);
3886 boolean_t *force = data;
3887 nvlist_t *innvl = fnvlist_alloc();
3889 fnvlist_add_boolean_value(innvl, "force", *force);
3890 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
3892 return (zpool_standard_error_fmt(hdl, ret,
3893 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
3900 #define PATH_BUF_LEN 64
3903 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3904 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3905 * We also check if this is a whole disk, in which case we strip off the
3906 * trailing 's0' slice name.
3908 * This routine is also responsible for identifying when disks have been
3909 * reconfigured in a new location. The kernel will have opened the device by
3910 * devid, but the path will still refer to the old location. To catch this, we
3911 * first do a path -> devid translation (which is fast for the common case). If
3912 * the devid matches, we're done. If not, we do a reverse devid -> path
3913 * translation and issue the appropriate ioctl() to update the path of the vdev.
3914 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3918 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3921 char *path, *type, *env;
3923 char buf[PATH_BUF_LEN];
3924 char tmpbuf[PATH_BUF_LEN];
3927 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
3928 * zpool name that will be displayed to the user.
3930 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3931 if (zhp != NULL && strcmp(type, "root") == 0)
3932 return (zfs_strdup(hdl, zpool_get_name(zhp)));
3934 env = getenv("ZPOOL_VDEV_NAME_PATH");
3935 if (env && (strtoul(env, NULL, 0) > 0 ||
3936 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3937 name_flags |= VDEV_NAME_PATH;
3939 env = getenv("ZPOOL_VDEV_NAME_GUID");
3940 if (env && (strtoul(env, NULL, 0) > 0 ||
3941 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3942 name_flags |= VDEV_NAME_GUID;
3944 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3945 if (env && (strtoul(env, NULL, 0) > 0 ||
3946 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3947 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3949 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3950 name_flags & VDEV_NAME_GUID) {
3951 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3952 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3954 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3955 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3956 char *rp = realpath(path, NULL);
3958 strlcpy(buf, rp, sizeof (buf));
3965 * For a block device only use the name.
3967 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3968 !(name_flags & VDEV_NAME_PATH)) {
3969 path = zfs_strip_path(path);
3973 * Remove the partition from the path it this is a whole disk.
3975 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3976 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3977 return (zfs_strip_partition(path));
3983 * If it's a raidz device, we need to stick in the parity level.
3985 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3986 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3988 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3989 (u_longlong_t)value);
3994 * We identify each top-level vdev by using a <type-id>
3995 * naming convention.
3997 if (name_flags & VDEV_NAME_TYPE_ID) {
3999 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
4001 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
4002 path, (u_longlong_t)id);
4007 return (zfs_strdup(hdl, path));
4011 zbookmark_mem_compare(const void *a, const void *b)
4013 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
4017 * Retrieve the persistent error log, uniquify the members, and return to the
4021 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4023 zfs_cmd_t zc = {"\0"};
4024 libzfs_handle_t *hdl = zhp->zpool_hdl;
4026 zbookmark_phys_t *zb = NULL;
4030 * Retrieve the raw error list from the kernel. If the number of errors
4031 * has increased, allocate more space and continue until we get the
4034 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
4038 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
4039 count * sizeof (zbookmark_phys_t));
4040 zc.zc_nvlist_dst_size = count;
4041 (void) strcpy(zc.zc_name, zhp->zpool_name);
4043 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4045 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4046 if (errno == ENOMEM) {
4049 count = zc.zc_nvlist_dst_size;
4050 dst = zfs_alloc(zhp->zpool_hdl, count *
4051 sizeof (zbookmark_phys_t));
4052 zc.zc_nvlist_dst = (uintptr_t)dst;
4054 return (zpool_standard_error_fmt(hdl, errno,
4055 dgettext(TEXT_DOMAIN, "errors: List of "
4056 "errors unavailable")));
4064 * Sort the resulting bookmarks. This is a little confusing due to the
4065 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
4066 * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4067 * _not_ copied as part of the process. So we point the start of our
4068 * array appropriate and decrement the total number of elements.
4070 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
4071 zc.zc_nvlist_dst_size;
4072 count -= zc.zc_nvlist_dst_size;
4074 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4076 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4079 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4081 for (i = 0; i < count; i++) {
4084 /* ignoring zb_blkid and zb_level for now */
4085 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4086 zb[i-1].zb_object == zb[i].zb_object)
4089 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4091 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4092 zb[i].zb_objset) != 0) {
4096 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4097 zb[i].zb_object) != 0) {
4101 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4108 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4112 free((void *)(uintptr_t)zc.zc_nvlist_dst);
4113 return (no_memory(zhp->zpool_hdl));
4117 * Upgrade a ZFS pool to the latest on-disk version.
4120 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4122 zfs_cmd_t zc = {"\0"};
4123 libzfs_handle_t *hdl = zhp->zpool_hdl;
4125 (void) strcpy(zc.zc_name, zhp->zpool_name);
4126 zc.zc_cookie = new_version;
4128 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4129 return (zpool_standard_error_fmt(hdl, errno,
4130 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4136 zfs_save_arguments(int argc, char **argv, char *string, int len)
4140 (void) strlcpy(string, basename(argv[0]), len);
4141 for (i = 1; i < argc; i++) {
4142 (void) strlcat(string, " ", len);
4143 (void) strlcat(string, argv[i], len);
4148 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4150 zfs_cmd_t zc = {"\0"};
4154 args = fnvlist_alloc();
4155 fnvlist_add_string(args, "message", message);
4156 err = zcmd_write_src_nvlist(hdl, &zc, args);
4158 err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4160 zcmd_free_nvlists(&zc);
4165 * Perform ioctl to get some command history of a pool.
4167 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4168 * logical offset of the history buffer to start reading from.
4170 * Upon return, 'off' is the next logical offset to read from and
4171 * 'len' is the actual amount of bytes read into 'buf'.
4174 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4176 zfs_cmd_t zc = {"\0"};
4177 libzfs_handle_t *hdl = zhp->zpool_hdl;
4179 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4181 zc.zc_history = (uint64_t)(uintptr_t)buf;
4182 zc.zc_history_len = *len;
4183 zc.zc_history_offset = *off;
4185 if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4188 return (zfs_error_fmt(hdl, EZFS_PERM,
4189 dgettext(TEXT_DOMAIN,
4190 "cannot show history for pool '%s'"),
4193 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4194 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4195 "'%s'"), zhp->zpool_name));
4197 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4198 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4199 "'%s', pool must be upgraded"), zhp->zpool_name));
4201 return (zpool_standard_error_fmt(hdl, errno,
4202 dgettext(TEXT_DOMAIN,
4203 "cannot get history for '%s'"), zhp->zpool_name));
4207 *len = zc.zc_history_len;
4208 *off = zc.zc_history_offset;
4214 * Retrieve the command history of a pool.
4217 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4221 int buflen = 128 * 1024;
4222 nvlist_t **records = NULL;
4223 uint_t numrecords = 0;
4225 uint64_t start = *off;
4227 buf = malloc(buflen);
4230 /* process about 1MB a time */
4231 while (*off - start < 1024 * 1024) {
4232 uint64_t bytes_read = buflen;
4235 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
4238 /* if nothing else was read in, we're at EOF, just return */
4244 if ((err = zpool_history_unpack(buf, bytes_read,
4245 &leftover, &records, &numrecords)) != 0)
4248 if (leftover == bytes_read) {
4250 * no progress made, because buffer is not big enough
4251 * to hold this record; resize and retry.
4255 buf = malloc(buflen);
4264 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4265 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4266 records, numrecords) == 0);
4268 for (i = 0; i < numrecords; i++)
4269 nvlist_free(records[i]);
4276 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4277 * If there is a new event available 'nvp' will contain a newly allocated
4278 * nvlist and 'dropped' will be set to the number of missed events since
4279 * the last call to this function. When 'nvp' is set to NULL it indicates
4280 * no new events are available. In either case the function returns 0 and
4281 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4282 * function will return a non-zero value. When the function is called in
4283 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4284 * it will not return until a new event is available.
4287 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4288 int *dropped, unsigned flags, int zevent_fd)
4290 zfs_cmd_t zc = {"\0"};
4295 zc.zc_cleanup_fd = zevent_fd;
4297 if (flags & ZEVENT_NONBLOCK)
4298 zc.zc_guid = ZEVENT_NONBLOCK;
4300 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4304 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4307 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4308 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4311 /* Blocking error case should not occur */
4312 if (!(flags & ZEVENT_NONBLOCK))
4313 error = zpool_standard_error_fmt(hdl, errno,
4314 dgettext(TEXT_DOMAIN, "cannot get event"));
4318 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4319 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4320 dgettext(TEXT_DOMAIN, "cannot get event"));
4326 error = zpool_standard_error_fmt(hdl, errno,
4327 dgettext(TEXT_DOMAIN, "cannot get event"));
4332 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4336 *dropped = (int)zc.zc_cookie;
4338 zcmd_free_nvlists(&zc);
4347 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4349 zfs_cmd_t zc = {"\0"};
4352 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
4353 "cannot clear events"));
4355 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4356 return (zpool_standard_error_fmt(hdl, errno, msg));
4359 *count = (int)zc.zc_cookie; /* # of events cleared */
4365 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4366 * the passed zevent_fd file handle. On success zero is returned,
4367 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4370 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4372 zfs_cmd_t zc = {"\0"};
4376 zc.zc_cleanup_fd = zevent_fd;
4378 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4381 error = zfs_error_fmt(hdl, EZFS_NOENT,
4382 dgettext(TEXT_DOMAIN, "cannot get event"));
4386 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4387 dgettext(TEXT_DOMAIN, "cannot get event"));
4391 error = zpool_standard_error_fmt(hdl, errno,
4392 dgettext(TEXT_DOMAIN, "cannot get event"));
4401 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4402 char *pathname, size_t len, boolean_t always_unmounted)
4404 zfs_cmd_t zc = {"\0"};
4405 boolean_t mounted = B_FALSE;
4406 char *mntpnt = NULL;
4407 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4410 /* special case for the MOS */
4411 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4416 /* get the dataset's name */
4417 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4419 if (zfs_ioctl(zhp->zpool_hdl,
4420 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4421 /* just write out a path of two object numbers */
4422 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4423 (longlong_t)dsobj, (longlong_t)obj);
4426 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4428 /* find out if the dataset is mounted */
4429 mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
4432 /* get the corrupted object's path */
4433 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4435 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
4438 (void) snprintf(pathname, len, "%s%s", mntpnt,
4441 (void) snprintf(pathname, len, "%s:%s",
4442 dsname, zc.zc_value);
4445 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4452 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4453 char *pathname, size_t len)
4455 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
4459 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4460 char *pathname, size_t len)
4462 zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
4465 * Wait while the specified activity is in progress in the pool.
4468 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
4472 int error = zpool_wait_status(zhp, activity, &missing, NULL);
4475 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
4476 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4485 * Wait for the given activity and return the status of the wait (whether or not
4486 * any waiting was done) in the 'waited' parameter. Non-existent pools are
4487 * reported via the 'missing' parameter, rather than by printing an error
4488 * message. This is convenient when this function is called in a loop over a
4489 * long period of time (as it is, for example, by zpool's wait cmd). In that
4490 * scenario, a pool being exported or destroyed should be considered a normal
4491 * event, so we don't want to print an error when we find that the pool doesn't
4495 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
4496 boolean_t *missing, boolean_t *waited)
4498 int error = lzc_wait(zhp->zpool_name, activity, waited);
4499 *missing = (error == ENOENT);
4504 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4505 dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4513 zpool_set_bootenv(zpool_handle_t *zhp, const char *envmap)
4515 int error = lzc_set_bootenv(zhp->zpool_name, envmap);
4517 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4518 dgettext(TEXT_DOMAIN,
4519 "error setting bootenv in pool '%s'"), zhp->zpool_name);
4526 zpool_get_bootenv(zpool_handle_t *zhp, char *outbuf, size_t size, off_t offset)
4528 nvlist_t *nvl = NULL;
4529 int error = lzc_get_bootenv(zhp->zpool_name, &nvl);
4531 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4532 dgettext(TEXT_DOMAIN,
4533 "error getting bootenv in pool '%s'"), zhp->zpool_name);
4536 char *envmap = fnvlist_lookup_string(nvl, "envmap");
4537 if (offset >= strlen(envmap)) {
4542 strncpy(outbuf, envmap + offset, size);
4543 int bytes = MIN(strlen(envmap + offset), size);