4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright (c) 2018 Datto Inc.
28 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
43 #include <sys/efi_partition.h>
44 #include <sys/systeminfo.h>
46 #include <sys/zfs_ioctl.h>
47 #include <sys/vdev_disk.h>
50 #include "zfs_namecheck.h"
52 #include "libzfs_impl.h"
53 #include "zfs_comutil.h"
54 #include "zfeature_common.h"
56 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
57 static boolean_t zpool_vdev_is_interior(const char *name);
59 typedef struct prop_flags {
60 int create:1; /* Validate property on creation */
61 int import:1; /* Validate property on import */
65 * ====================================================================
66 * zpool property functions
67 * ====================================================================
71 zpool_get_all_props(zpool_handle_t *zhp)
73 zfs_cmd_t zc = {"\0"};
74 libzfs_handle_t *hdl = zhp->zpool_hdl;
76 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
78 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
81 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
82 if (errno == ENOMEM) {
83 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
84 zcmd_free_nvlists(&zc);
88 zcmd_free_nvlists(&zc);
93 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
94 zcmd_free_nvlists(&zc);
98 zcmd_free_nvlists(&zc);
104 zpool_props_refresh(zpool_handle_t *zhp)
108 old_props = zhp->zpool_props;
110 if (zpool_get_all_props(zhp) != 0)
113 nvlist_free(old_props);
118 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
124 zprop_source_t source;
126 nvl = zhp->zpool_props;
127 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
128 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
130 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
132 source = ZPROP_SRC_DEFAULT;
133 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
144 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
148 zprop_source_t source;
150 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
152 * zpool_get_all_props() has most likely failed because
153 * the pool is faulted, but if all we need is the top level
154 * vdev's guid then get it from the zhp config nvlist.
156 if ((prop == ZPOOL_PROP_GUID) &&
157 (nvlist_lookup_nvlist(zhp->zpool_config,
158 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
159 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
163 return (zpool_prop_default_numeric(prop));
166 nvl = zhp->zpool_props;
167 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
168 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
170 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
172 source = ZPROP_SRC_DEFAULT;
173 value = zpool_prop_default_numeric(prop);
183 * Map VDEV STATE to printed strings.
186 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
189 case VDEV_STATE_CLOSED:
190 case VDEV_STATE_OFFLINE:
191 return (gettext("OFFLINE"));
192 case VDEV_STATE_REMOVED:
193 return (gettext("REMOVED"));
194 case VDEV_STATE_CANT_OPEN:
195 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
196 return (gettext("FAULTED"));
197 else if (aux == VDEV_AUX_SPLIT_POOL)
198 return (gettext("SPLIT"));
200 return (gettext("UNAVAIL"));
201 case VDEV_STATE_FAULTED:
202 return (gettext("FAULTED"));
203 case VDEV_STATE_DEGRADED:
204 return (gettext("DEGRADED"));
205 case VDEV_STATE_HEALTHY:
206 return (gettext("ONLINE"));
212 return (gettext("UNKNOWN"));
216 * Map POOL STATE to printed strings.
219 zpool_pool_state_to_name(pool_state_t state)
224 case POOL_STATE_ACTIVE:
225 return (gettext("ACTIVE"));
226 case POOL_STATE_EXPORTED:
227 return (gettext("EXPORTED"));
228 case POOL_STATE_DESTROYED:
229 return (gettext("DESTROYED"));
230 case POOL_STATE_SPARE:
231 return (gettext("SPARE"));
232 case POOL_STATE_L2CACHE:
233 return (gettext("L2CACHE"));
234 case POOL_STATE_UNINITIALIZED:
235 return (gettext("UNINITIALIZED"));
236 case POOL_STATE_UNAVAIL:
237 return (gettext("UNAVAIL"));
238 case POOL_STATE_POTENTIALLY_ACTIVE:
239 return (gettext("POTENTIALLY_ACTIVE"));
242 return (gettext("UNKNOWN"));
246 * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
250 zpool_get_state_str(zpool_handle_t *zhp)
252 zpool_errata_t errata;
253 zpool_status_t status;
259 status = zpool_get_status(zhp, NULL, &errata);
261 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
262 str = gettext("FAULTED");
263 } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
264 status == ZPOOL_STATUS_IO_FAILURE_MMP) {
265 str = gettext("SUSPENDED");
267 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
268 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
269 verify(nvlist_lookup_uint64_array(nvroot,
270 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
272 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
278 * Get a zpool property value for 'prop' and return the value in
279 * a pre-allocated buffer.
282 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
283 size_t len, zprop_source_t *srctype, boolean_t literal)
287 zprop_source_t src = ZPROP_SRC_NONE;
289 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
291 case ZPOOL_PROP_NAME:
292 (void) strlcpy(buf, zpool_get_name(zhp), len);
295 case ZPOOL_PROP_HEALTH:
296 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
299 case ZPOOL_PROP_GUID:
300 intval = zpool_get_prop_int(zhp, prop, &src);
301 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
304 case ZPOOL_PROP_ALTROOT:
305 case ZPOOL_PROP_CACHEFILE:
306 case ZPOOL_PROP_COMMENT:
307 if (zhp->zpool_props != NULL ||
308 zpool_get_all_props(zhp) == 0) {
310 zpool_get_prop_string(zhp, prop, &src),
316 (void) strlcpy(buf, "-", len);
325 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
326 prop != ZPOOL_PROP_NAME)
329 switch (zpool_prop_get_type(prop)) {
330 case PROP_TYPE_STRING:
331 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
335 case PROP_TYPE_NUMBER:
336 intval = zpool_get_prop_int(zhp, prop, &src);
339 case ZPOOL_PROP_SIZE:
340 case ZPOOL_PROP_ALLOCATED:
341 case ZPOOL_PROP_FREE:
342 case ZPOOL_PROP_FREEING:
343 case ZPOOL_PROP_LEAKED:
344 case ZPOOL_PROP_ASHIFT:
346 (void) snprintf(buf, len, "%llu",
347 (u_longlong_t)intval);
349 (void) zfs_nicenum(intval, buf, len);
352 case ZPOOL_PROP_EXPANDSZ:
353 case ZPOOL_PROP_CHECKPOINT:
355 (void) strlcpy(buf, "-", len);
356 } else if (literal) {
357 (void) snprintf(buf, len, "%llu",
358 (u_longlong_t)intval);
360 (void) zfs_nicebytes(intval, buf, len);
364 case ZPOOL_PROP_CAPACITY:
366 (void) snprintf(buf, len, "%llu",
367 (u_longlong_t)intval);
369 (void) snprintf(buf, len, "%llu%%",
370 (u_longlong_t)intval);
374 case ZPOOL_PROP_FRAGMENTATION:
375 if (intval == UINT64_MAX) {
376 (void) strlcpy(buf, "-", len);
377 } else if (literal) {
378 (void) snprintf(buf, len, "%llu",
379 (u_longlong_t)intval);
381 (void) snprintf(buf, len, "%llu%%",
382 (u_longlong_t)intval);
386 case ZPOOL_PROP_DEDUPRATIO:
388 (void) snprintf(buf, len, "%llu.%02llu",
389 (u_longlong_t)(intval / 100),
390 (u_longlong_t)(intval % 100));
392 (void) snprintf(buf, len, "%llu.%02llux",
393 (u_longlong_t)(intval / 100),
394 (u_longlong_t)(intval % 100));
397 case ZPOOL_PROP_HEALTH:
398 (void) strlcpy(buf, zpool_get_state_str(zhp), len);
400 case ZPOOL_PROP_VERSION:
401 if (intval >= SPA_VERSION_FEATURES) {
402 (void) snprintf(buf, len, "-");
407 (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
411 case PROP_TYPE_INDEX:
412 intval = zpool_get_prop_int(zhp, prop, &src);
413 if (zpool_prop_index_to_string(prop, intval, &strval)
416 (void) strlcpy(buf, strval, len);
430 * Check if the bootfs name has the same pool name as it is set to.
431 * Assuming bootfs is a valid dataset name.
434 bootfs_name_valid(const char *pool, char *bootfs)
436 int len = strlen(pool);
437 if (bootfs[0] == '\0')
440 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
443 if (strncmp(pool, bootfs, len) == 0 &&
444 (bootfs[len] == '/' || bootfs[len] == '\0'))
451 zpool_is_bootable(zpool_handle_t *zhp)
453 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
455 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
456 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
457 sizeof (bootfs)) != 0);
462 * Given an nvlist of zpool properties to be set, validate that they are
463 * correct, and parse any numeric properties (index, boolean, etc) if they are
464 * specified as strings.
467 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
468 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
476 struct stat64 statbuf;
479 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
480 (void) no_memory(hdl);
485 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
486 const char *propname = nvpair_name(elem);
488 prop = zpool_name_to_prop(propname);
489 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
491 char *fname = strchr(propname, '@') + 1;
493 err = zfeature_lookup_name(fname, NULL);
495 ASSERT3U(err, ==, ENOENT);
496 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
497 "invalid feature '%s'"), fname);
498 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
502 if (nvpair_type(elem) != DATA_TYPE_STRING) {
503 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
504 "'%s' must be a string"), propname);
505 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
509 (void) nvpair_value_string(elem, &strval);
510 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
511 strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
512 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
513 "property '%s' can only be set to "
514 "'enabled' or 'disabled'"), propname);
515 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
520 strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
521 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
522 "property '%s' can only be set to "
523 "'disabled' at creation time"), propname);
524 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
528 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
529 (void) no_memory(hdl);
536 * Make sure this property is valid and applies to this type.
538 if (prop == ZPOOL_PROP_INVAL) {
539 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
540 "invalid property '%s'"), propname);
541 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
545 if (zpool_prop_readonly(prop)) {
546 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
547 "is readonly"), propname);
548 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
552 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
553 &strval, &intval, errbuf) != 0)
557 * Perform additional checking for specific properties.
560 case ZPOOL_PROP_VERSION:
561 if (intval < version ||
562 !SPA_VERSION_IS_SUPPORTED(intval)) {
563 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
564 "property '%s' number %d is invalid."),
566 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
571 case ZPOOL_PROP_ASHIFT:
573 (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
574 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
575 "invalid '%s=%d' property: only values "
576 "between %" PRId32 " and %" PRId32 " "
578 propname, intval, ASHIFT_MIN, ASHIFT_MAX);
579 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
584 case ZPOOL_PROP_BOOTFS:
585 if (flags.create || flags.import) {
586 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
587 "property '%s' cannot be set at creation "
588 "or import time"), propname);
589 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
593 if (version < SPA_VERSION_BOOTFS) {
594 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
595 "pool must be upgraded to support "
596 "'%s' property"), propname);
597 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
602 * bootfs property value has to be a dataset name and
603 * the dataset has to be in the same pool as it sets to.
605 if (!bootfs_name_valid(poolname, strval)) {
606 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
607 "is an invalid name"), strval);
608 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
612 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
614 "could not open pool '%s'"), poolname);
615 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
621 case ZPOOL_PROP_ALTROOT:
622 if (!flags.create && !flags.import) {
623 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
624 "property '%s' can only be set during pool "
625 "creation or import"), propname);
626 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
630 if (strval[0] != '/') {
631 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
632 "bad alternate root '%s'"), strval);
633 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
638 case ZPOOL_PROP_CACHEFILE:
639 if (strval[0] == '\0')
642 if (strcmp(strval, "none") == 0)
645 if (strval[0] != '/') {
646 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
647 "property '%s' must be empty, an "
648 "absolute path, or 'none'"), propname);
649 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
653 slash = strrchr(strval, '/');
655 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
656 strcmp(slash, "/..") == 0) {
657 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
658 "'%s' is not a valid file"), strval);
659 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
665 if (strval[0] != '\0' &&
666 (stat64(strval, &statbuf) != 0 ||
667 !S_ISDIR(statbuf.st_mode))) {
668 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
669 "'%s' is not a valid directory"),
671 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
678 case ZPOOL_PROP_COMMENT:
679 for (check = strval; *check != '\0'; check++) {
680 if (!isprint(*check)) {
682 dgettext(TEXT_DOMAIN,
683 "comment may only have printable "
685 (void) zfs_error(hdl, EZFS_BADPROP,
690 if (strlen(strval) > ZPROP_MAX_COMMENT) {
691 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
692 "comment must not exceed %d characters"),
694 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
698 case ZPOOL_PROP_READONLY:
700 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
701 "property '%s' can only be set at "
702 "import time"), propname);
703 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
707 case ZPOOL_PROP_TNAME:
709 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
710 "property '%s' can only be set at "
711 "creation time"), propname);
712 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
716 case ZPOOL_PROP_MULTIHOST:
717 if (get_system_hostid() == 0) {
718 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
719 "requires a non-zero system hostid"));
720 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
725 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
726 "property '%s'(%d) not defined"), propname, prop);
733 nvlist_free(retprops);
738 * Set zpool property : propname=propval.
741 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
743 zfs_cmd_t zc = {"\0"};
746 nvlist_t *nvl = NULL;
749 prop_flags_t flags = { 0 };
751 (void) snprintf(errbuf, sizeof (errbuf),
752 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
755 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
756 return (no_memory(zhp->zpool_hdl));
758 if (nvlist_add_string(nvl, propname, propval) != 0) {
760 return (no_memory(zhp->zpool_hdl));
763 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
764 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
765 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
774 * Execute the corresponding ioctl() to set this property.
776 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
778 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
783 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
785 zcmd_free_nvlists(&zc);
789 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
791 (void) zpool_props_refresh(zhp);
797 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
799 libzfs_handle_t *hdl = zhp->zpool_hdl;
801 char buf[ZFS_MAXPROPLEN];
802 nvlist_t *features = NULL;
805 boolean_t firstexpand = (NULL == *plp);
808 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
812 while (*last != NULL)
813 last = &(*last)->pl_next;
816 features = zpool_get_features(zhp);
818 if ((*plp)->pl_all && firstexpand) {
819 for (i = 0; i < SPA_FEATURES; i++) {
820 zprop_list_t *entry = zfs_alloc(hdl,
821 sizeof (zprop_list_t));
822 entry->pl_prop = ZPROP_INVAL;
823 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
824 spa_feature_table[i].fi_uname);
825 entry->pl_width = strlen(entry->pl_user_prop);
826 entry->pl_all = B_TRUE;
829 last = &entry->pl_next;
833 /* add any unsupported features */
834 for (nvp = nvlist_next_nvpair(features, NULL);
835 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
840 if (zfeature_is_supported(nvpair_name(nvp)))
843 propname = zfs_asprintf(hdl, "unsupported@%s",
847 * Before adding the property to the list make sure that no
848 * other pool already added the same property.
852 while (entry != NULL) {
853 if (entry->pl_user_prop != NULL &&
854 strcmp(propname, entry->pl_user_prop) == 0) {
858 entry = entry->pl_next;
865 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
866 entry->pl_prop = ZPROP_INVAL;
867 entry->pl_user_prop = propname;
868 entry->pl_width = strlen(entry->pl_user_prop);
869 entry->pl_all = B_TRUE;
872 last = &entry->pl_next;
875 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
880 if (entry->pl_prop != ZPROP_INVAL &&
881 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
882 NULL, B_FALSE) == 0) {
883 if (strlen(buf) > entry->pl_width)
884 entry->pl_width = strlen(buf);
892 * Get the state for the given feature on the given ZFS pool.
895 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
899 boolean_t found = B_FALSE;
900 nvlist_t *features = zpool_get_features(zhp);
902 const char *feature = strchr(propname, '@') + 1;
904 supported = zpool_prop_feature(propname);
905 ASSERT(supported || zpool_prop_unsupported(propname));
908 * Convert from feature name to feature guid. This conversion is
909 * unnecessary for unsupported@... properties because they already
916 ret = zfeature_lookup_name(feature, &fid);
918 (void) strlcpy(buf, "-", len);
921 feature = spa_feature_table[fid].fi_guid;
924 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
929 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
932 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
934 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
939 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
941 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
944 (void) strlcpy(buf, "-", len);
953 * Validate the given pool name, optionally putting an extended error message in
957 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
963 ret = pool_namecheck(pool, &why, &what);
966 * The rules for reserved pool names were extended at a later point.
967 * But we need to support users with existing pools that may now be
968 * invalid. So we only check for this expanded set of names during a
969 * create (or import), and only in userland.
971 if (ret == 0 && !isopen &&
972 (strncmp(pool, "mirror", 6) == 0 ||
973 strncmp(pool, "raidz", 5) == 0 ||
974 strncmp(pool, "spare", 5) == 0 ||
975 strcmp(pool, "log") == 0)) {
978 dgettext(TEXT_DOMAIN, "name is reserved"));
986 case NAME_ERR_TOOLONG:
988 dgettext(TEXT_DOMAIN, "name is too long"));
991 case NAME_ERR_INVALCHAR:
993 dgettext(TEXT_DOMAIN, "invalid character "
994 "'%c' in pool name"), what);
997 case NAME_ERR_NOLETTER:
998 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
999 "name must begin with a letter"));
1002 case NAME_ERR_RESERVED:
1003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1004 "name is reserved"));
1007 case NAME_ERR_DISKLIKE:
1008 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1009 "pool name is reserved"));
1012 case NAME_ERR_LEADING_SLASH:
1013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1014 "leading slash in name"));
1017 case NAME_ERR_EMPTY_COMPONENT:
1018 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1019 "empty component in name"));
1022 case NAME_ERR_TRAILING_SLASH:
1023 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1024 "trailing slash in name"));
1027 case NAME_ERR_MULTIPLE_DELIMITERS:
1028 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1029 "multiple '@' and/or '#' delimiters in "
1033 case NAME_ERR_NO_AT:
1034 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1035 "permission set is missing '@'"));
1039 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1040 "(%d) not defined"), why);
1051 * Open a handle to the given pool, even if the pool is currently in the FAULTED
1055 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1057 zpool_handle_t *zhp;
1061 * Make sure the pool name is valid.
1063 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1064 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1065 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1070 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1073 zhp->zpool_hdl = hdl;
1074 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1076 if (zpool_refresh_stats(zhp, &missing) != 0) {
1082 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1083 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1084 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1093 * Like the above, but silent on error. Used when iterating over pools (because
1094 * the configuration cache may be out of date).
1097 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1099 zpool_handle_t *zhp;
1102 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1105 zhp->zpool_hdl = hdl;
1106 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1108 if (zpool_refresh_stats(zhp, &missing) != 0) {
1124 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1128 zpool_open(libzfs_handle_t *hdl, const char *pool)
1130 zpool_handle_t *zhp;
1132 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1135 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1136 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1137 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1146 * Close the handle. Simply frees the memory associated with the handle.
1149 zpool_close(zpool_handle_t *zhp)
1151 nvlist_free(zhp->zpool_config);
1152 nvlist_free(zhp->zpool_old_config);
1153 nvlist_free(zhp->zpool_props);
1158 * Return the name of the pool.
1161 zpool_get_name(zpool_handle_t *zhp)
1163 return (zhp->zpool_name);
1168 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1171 zpool_get_state(zpool_handle_t *zhp)
1173 return (zhp->zpool_state);
1177 * Create the named pool, using the provided vdev list. It is assumed
1178 * that the consumer has already validated the contents of the nvlist, so we
1179 * don't have to worry about error semantics.
1182 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1183 nvlist_t *props, nvlist_t *fsprops)
1185 zfs_cmd_t zc = {"\0"};
1186 nvlist_t *zc_fsprops = NULL;
1187 nvlist_t *zc_props = NULL;
1188 nvlist_t *hidden_args = NULL;
1189 uint8_t *wkeydata = NULL;
1194 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1195 "cannot create '%s'"), pool);
1197 if (!zpool_name_valid(hdl, B_FALSE, pool))
1198 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1200 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1204 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1206 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1207 SPA_VERSION_1, flags, msg)) == NULL) {
1216 zoned = ((nvlist_lookup_string(fsprops,
1217 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1218 strcmp(zonestr, "on") == 0);
1220 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1221 fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
1225 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1228 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props,
1229 &wkeydata, &wkeylen) != 0) {
1230 zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1233 if (nvlist_add_nvlist(zc_props,
1234 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1237 if (wkeydata != NULL) {
1238 if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1241 if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1242 wkeydata, wkeylen) != 0)
1245 if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1251 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1254 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1256 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1258 zcmd_free_nvlists(&zc);
1259 nvlist_free(zc_props);
1260 nvlist_free(zc_fsprops);
1261 nvlist_free(hidden_args);
1262 if (wkeydata != NULL)
1268 * This can happen if the user has specified the same
1269 * device multiple times. We can't reliably detect this
1270 * until we try to add it and see we already have a
1271 * label. This can also happen under if the device is
1272 * part of an active md or lvm device.
1274 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1275 "one or more vdevs refer to the same device, or "
1276 "one of\nthe devices is part of an active md or "
1278 return (zfs_error(hdl, EZFS_BADDEV, msg));
1282 * This happens if the record size is smaller or larger
1283 * than the allowed size range, or not a power of 2.
1285 * NOTE: although zfs_valid_proplist is called earlier,
1286 * this case may have slipped through since the
1287 * pool does not exist yet and it is therefore
1288 * impossible to read properties e.g. max blocksize
1291 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1292 "record size invalid"));
1293 return (zfs_error(hdl, EZFS_BADPROP, msg));
1297 * This occurs when one of the devices is below
1298 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1299 * device was the problem device since there's no
1300 * reliable way to determine device size from userland.
1305 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1308 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1309 "one or more devices is less than the "
1310 "minimum size (%s)"), buf);
1312 return (zfs_error(hdl, EZFS_BADDEV, msg));
1315 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1316 "one or more devices is out of space"));
1317 return (zfs_error(hdl, EZFS_BADDEV, msg));
1320 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1321 "cache device must be a disk or disk slice"));
1322 return (zfs_error(hdl, EZFS_BADDEV, msg));
1325 return (zpool_standard_error(hdl, errno, msg));
1330 zcmd_free_nvlists(&zc);
1331 nvlist_free(zc_props);
1332 nvlist_free(zc_fsprops);
1333 nvlist_free(hidden_args);
1334 if (wkeydata != NULL)
1340 * Destroy the given pool. It is up to the caller to ensure that there are no
1341 * datasets left in the pool.
1344 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1346 zfs_cmd_t zc = {"\0"};
1347 zfs_handle_t *zfp = NULL;
1348 libzfs_handle_t *hdl = zhp->zpool_hdl;
1351 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1352 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1355 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1356 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1358 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1359 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1360 "cannot destroy '%s'"), zhp->zpool_name);
1362 if (errno == EROFS) {
1363 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1364 "one or more devices is read only"));
1365 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1367 (void) zpool_standard_error(hdl, errno, msg);
1376 remove_mountpoint(zfp);
1384 * Create a checkpoint in the given pool.
1387 zpool_checkpoint(zpool_handle_t *zhp)
1389 libzfs_handle_t *hdl = zhp->zpool_hdl;
1393 error = lzc_pool_checkpoint(zhp->zpool_name);
1395 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1396 "cannot checkpoint '%s'"), zhp->zpool_name);
1397 (void) zpool_standard_error(hdl, error, msg);
1405 * Discard the checkpoint from the given pool.
1408 zpool_discard_checkpoint(zpool_handle_t *zhp)
1410 libzfs_handle_t *hdl = zhp->zpool_hdl;
1414 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1416 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1417 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1418 (void) zpool_standard_error(hdl, error, msg);
1426 * Add the given vdevs to the pool. The caller must have already performed the
1427 * necessary verification to ensure that the vdev specification is well-formed.
1430 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1432 zfs_cmd_t zc = {"\0"};
1434 libzfs_handle_t *hdl = zhp->zpool_hdl;
1436 nvlist_t **spares, **l2cache;
1437 uint_t nspares, nl2cache;
1439 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1440 "cannot add to '%s'"), zhp->zpool_name);
1442 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1443 SPA_VERSION_SPARES &&
1444 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1445 &spares, &nspares) == 0) {
1446 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1447 "upgraded to add hot spares"));
1448 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1451 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1452 SPA_VERSION_L2CACHE &&
1453 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1454 &l2cache, &nl2cache) == 0) {
1455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1456 "upgraded to add cache devices"));
1457 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1460 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1462 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1464 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1468 * This can happen if the user has specified the same
1469 * device multiple times. We can't reliably detect this
1470 * until we try to add it and see we already have a
1473 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1474 "one or more vdevs refer to the same device"));
1475 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1479 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1480 "invalid config; a pool with removing/removed "
1481 "vdevs does not support adding raidz vdevs"));
1482 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1487 * This occurrs when one of the devices is below
1488 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1489 * device was the problem device since there's no
1490 * reliable way to determine device size from userland.
1495 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1498 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1499 "device is less than the minimum "
1502 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1506 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1507 "pool must be upgraded to add these vdevs"));
1508 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1512 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1513 "cache device must be a disk or disk slice"));
1514 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1518 (void) zpool_standard_error(hdl, errno, msg);
1526 zcmd_free_nvlists(&zc);
1532 * Exports the pool from the system. The caller must ensure that there are no
1533 * mounted datasets in the pool.
1536 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1537 const char *log_str)
1539 zfs_cmd_t zc = {"\0"};
1542 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1543 "cannot export '%s'"), zhp->zpool_name);
1545 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1546 zc.zc_cookie = force;
1547 zc.zc_guid = hardforce;
1548 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1550 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1553 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1554 "use '-f' to override the following errors:\n"
1555 "'%s' has an active shared spare which could be"
1556 " used by other pools once '%s' is exported."),
1557 zhp->zpool_name, zhp->zpool_name);
1558 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1561 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1570 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1572 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1576 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1578 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1582 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1585 nvlist_t *nv = NULL;
1591 if (!hdl->libzfs_printerr || config == NULL)
1594 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1595 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1599 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1601 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1603 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1604 strftime(timestr, 128, "%c", &t) != 0) {
1606 (void) printf(dgettext(TEXT_DOMAIN,
1607 "Would be able to return %s "
1608 "to its state as of %s.\n"),
1611 (void) printf(dgettext(TEXT_DOMAIN,
1612 "Pool %s returned to its state as of %s.\n"),
1616 (void) printf(dgettext(TEXT_DOMAIN,
1617 "%s approximately %lld "),
1618 dryrun ? "Would discard" : "Discarded",
1619 ((longlong_t)loss + 30) / 60);
1620 (void) printf(dgettext(TEXT_DOMAIN,
1621 "minutes of transactions.\n"));
1622 } else if (loss > 0) {
1623 (void) printf(dgettext(TEXT_DOMAIN,
1624 "%s approximately %lld "),
1625 dryrun ? "Would discard" : "Discarded",
1627 (void) printf(dgettext(TEXT_DOMAIN,
1628 "seconds of transactions.\n"));
1634 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1637 nvlist_t *nv = NULL;
1639 uint64_t edata = UINT64_MAX;
1644 if (!hdl->libzfs_printerr)
1648 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1650 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1652 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1653 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1654 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1655 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1658 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1659 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1662 (void) printf(dgettext(TEXT_DOMAIN,
1663 "Recovery is possible, but will result in some data loss.\n"));
1665 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1666 strftime(timestr, 128, "%c", &t) != 0) {
1667 (void) printf(dgettext(TEXT_DOMAIN,
1668 "\tReturning the pool to its state as of %s\n"
1669 "\tshould correct the problem. "),
1672 (void) printf(dgettext(TEXT_DOMAIN,
1673 "\tReverting the pool to an earlier state "
1674 "should correct the problem.\n\t"));
1678 (void) printf(dgettext(TEXT_DOMAIN,
1679 "Approximately %lld minutes of data\n"
1680 "\tmust be discarded, irreversibly. "),
1681 ((longlong_t)loss + 30) / 60);
1682 } else if (loss > 0) {
1683 (void) printf(dgettext(TEXT_DOMAIN,
1684 "Approximately %lld seconds of data\n"
1685 "\tmust be discarded, irreversibly. "),
1688 if (edata != 0 && edata != UINT64_MAX) {
1690 (void) printf(dgettext(TEXT_DOMAIN,
1691 "After rewind, at least\n"
1692 "\tone persistent user-data error will remain. "));
1694 (void) printf(dgettext(TEXT_DOMAIN,
1695 "After rewind, several\n"
1696 "\tpersistent user-data errors will remain. "));
1699 (void) printf(dgettext(TEXT_DOMAIN,
1700 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1701 reason >= 0 ? "clear" : "import", name);
1703 (void) printf(dgettext(TEXT_DOMAIN,
1704 "A scrub of the pool\n"
1705 "\tis strongly recommended after recovery.\n"));
1709 (void) printf(dgettext(TEXT_DOMAIN,
1710 "Destroy and re-create the pool from\n\ta backup source.\n"));
1714 * zpool_import() is a contracted interface. Should be kept the same
1717 * Applications should use zpool_import_props() to import a pool with
1718 * new properties value to be set.
1721 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1724 nvlist_t *props = NULL;
1727 if (altroot != NULL) {
1728 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1729 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1730 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1734 if (nvlist_add_string(props,
1735 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1736 nvlist_add_string(props,
1737 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1739 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1740 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1745 ret = zpool_import_props(hdl, config, newname, props,
1752 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1758 uint64_t is_log = 0;
1760 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1764 (void) printf("\t%*s%s%s\n", indent, "", name,
1765 is_log ? " [log]" : "");
1767 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1768 &child, &children) != 0)
1771 for (c = 0; c < children; c++) {
1772 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1773 print_vdev_tree(hdl, vname, child[c], indent + 2);
1779 zpool_print_unsup_feat(nvlist_t *config)
1781 nvlist_t *nvinfo, *unsup_feat;
1784 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1786 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1789 for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1790 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1793 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1794 verify(nvpair_value_string(nvp, &desc) == 0);
1796 if (strlen(desc) > 0)
1797 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1799 (void) printf("\t%s\n", nvpair_name(nvp));
1804 * Import the given pool using the known configuration and a list of
1805 * properties to be set. The configuration should have come from
1806 * zpool_find_import(). The 'newname' parameters control whether the pool
1807 * is imported with a different name.
1810 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1811 nvlist_t *props, int flags)
1813 zfs_cmd_t zc = {"\0"};
1814 zpool_load_policy_t policy;
1815 nvlist_t *nv = NULL;
1816 nvlist_t *nvinfo = NULL;
1817 nvlist_t *missing = NULL;
1824 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1827 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1828 "cannot import pool '%s'"), origname);
1830 if (newname != NULL) {
1831 if (!zpool_name_valid(hdl, B_FALSE, newname))
1832 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1833 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1835 thename = (char *)newname;
1840 if (props != NULL) {
1842 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1844 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1847 if ((props = zpool_valid_proplist(hdl, origname,
1848 props, version, flags, errbuf)) == NULL)
1850 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1857 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1859 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1862 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1863 zcmd_free_nvlists(&zc);
1866 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1867 zcmd_free_nvlists(&zc);
1871 zc.zc_cookie = flags;
1872 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1874 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1875 zcmd_free_nvlists(&zc);
1882 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1884 zcmd_free_nvlists(&zc);
1886 zpool_get_load_policy(config, &policy);
1893 * Dry-run failed, but we print out what success
1894 * looks like if we found a best txg
1896 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
1897 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1903 if (newname == NULL)
1904 (void) snprintf(desc, sizeof (desc),
1905 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1908 (void) snprintf(desc, sizeof (desc),
1909 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1914 if (nv != NULL && nvlist_lookup_nvlist(nv,
1915 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1916 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1917 (void) printf(dgettext(TEXT_DOMAIN, "This "
1918 "pool uses the following feature(s) not "
1919 "supported by this system:\n"));
1920 zpool_print_unsup_feat(nv);
1921 if (nvlist_exists(nvinfo,
1922 ZPOOL_CONFIG_CAN_RDONLY)) {
1923 (void) printf(dgettext(TEXT_DOMAIN,
1924 "All unsupported features are only "
1925 "required for writing to the pool."
1926 "\nThe pool can be imported using "
1927 "'-o readonly=on'.\n"));
1931 * Unsupported version.
1933 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1937 if (nv != NULL && nvlist_lookup_nvlist(nv,
1938 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
1939 char *hostname = "<unknown>";
1940 uint64_t hostid = 0;
1941 mmp_state_t mmp_state;
1943 mmp_state = fnvlist_lookup_uint64(nvinfo,
1944 ZPOOL_CONFIG_MMP_STATE);
1946 if (nvlist_exists(nvinfo,
1947 ZPOOL_CONFIG_MMP_HOSTNAME))
1948 hostname = fnvlist_lookup_string(nvinfo,
1949 ZPOOL_CONFIG_MMP_HOSTNAME);
1951 if (nvlist_exists(nvinfo,
1952 ZPOOL_CONFIG_MMP_HOSTID))
1953 hostid = fnvlist_lookup_uint64(nvinfo,
1954 ZPOOL_CONFIG_MMP_HOSTID);
1956 if (mmp_state == MMP_STATE_ACTIVE) {
1957 (void) snprintf(aux, sizeof (aux),
1958 dgettext(TEXT_DOMAIN, "pool is imp"
1959 "orted on host '%s' (hostid=%lx).\n"
1960 "Export the pool on the other "
1961 "system, then run 'zpool import'."),
1962 hostname, (unsigned long) hostid);
1963 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
1964 (void) snprintf(aux, sizeof (aux),
1965 dgettext(TEXT_DOMAIN, "pool has "
1966 "the multihost property on and "
1967 "the\nsystem's hostid is not set. "
1968 "Set a unique system hostid with "
1969 "the zgenhostid(8) command.\n"));
1972 (void) zfs_error_aux(hdl, aux);
1974 (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
1978 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1982 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1983 "one or more devices is read only"));
1984 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1988 if (nv && nvlist_lookup_nvlist(nv,
1989 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1990 nvlist_lookup_nvlist(nvinfo,
1991 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1992 (void) printf(dgettext(TEXT_DOMAIN,
1993 "The devices below are missing or "
1994 "corrupted, use '-m' to import the pool "
1996 print_vdev_tree(hdl, NULL, missing, 2);
1997 (void) printf("\n");
1999 (void) zpool_standard_error(hdl, error, desc);
2003 (void) zpool_standard_error(hdl, error, desc);
2007 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2008 "one or more devices are already in use\n"));
2009 (void) zfs_error(hdl, EZFS_BADDEV, desc);
2012 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2013 "new name of at least one dataset is longer than "
2014 "the maximum allowable length"));
2015 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2018 (void) zpool_standard_error(hdl, error, desc);
2019 zpool_explain_recover(hdl,
2020 newname ? origname : thename, -error, nv);
2027 zpool_handle_t *zhp;
2030 * This should never fail, but play it safe anyway.
2032 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2034 else if (zhp != NULL)
2036 if (policy.zlp_rewind &
2037 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2038 zpool_rewind_exclaim(hdl, newname ? origname : thename,
2039 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2052 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
2054 zfs_cmd_t zc = {"\0"};
2057 libzfs_handle_t *hdl = zhp->zpool_hdl;
2059 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2060 zc.zc_cookie = func;
2063 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2068 /* ECANCELED on a scrub means we resumed a paused scrub */
2069 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
2070 cmd == POOL_SCRUB_NORMAL)
2073 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
2076 if (func == POOL_SCAN_SCRUB) {
2077 if (cmd == POOL_SCRUB_PAUSE) {
2078 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2079 "cannot pause scrubbing %s"), zc.zc_name);
2081 assert(cmd == POOL_SCRUB_NORMAL);
2082 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2083 "cannot scrub %s"), zc.zc_name);
2085 } else if (func == POOL_SCAN_NONE) {
2086 (void) snprintf(msg, sizeof (msg),
2087 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2090 assert(!"unexpected result");
2095 pool_scan_stat_t *ps = NULL;
2098 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2099 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2100 (void) nvlist_lookup_uint64_array(nvroot,
2101 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2102 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
2103 if (cmd == POOL_SCRUB_PAUSE)
2104 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2106 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2108 return (zfs_error(hdl, EZFS_RESILVERING, msg));
2110 } else if (err == ENOENT) {
2111 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2113 return (zpool_standard_error(hdl, err, msg));
2118 * Find a vdev that matches the search criteria specified. We use the
2119 * the nvpair name to determine how we should look for the device.
2120 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2121 * spare; but FALSE if its an INUSE spare.
2124 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2125 boolean_t *l2cache, boolean_t *log)
2132 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2134 /* Nothing to look for */
2135 if (search == NULL || pair == NULL)
2138 /* Obtain the key we will use to search */
2139 srchkey = nvpair_name(pair);
2141 switch (nvpair_type(pair)) {
2142 case DATA_TYPE_UINT64:
2143 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2144 uint64_t srchval, theguid;
2146 verify(nvpair_value_uint64(pair, &srchval) == 0);
2147 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2149 if (theguid == srchval)
2154 case DATA_TYPE_STRING: {
2155 char *srchval, *val;
2157 verify(nvpair_value_string(pair, &srchval) == 0);
2158 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2162 * Search for the requested value. Special cases:
2164 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
2165 * "-part1", or "p1". The suffix is hidden from the user,
2166 * but included in the string, so this matches around it.
2167 * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2168 * is used to check all possible expanded paths.
2169 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2171 * Otherwise, all other searches are simple string compares.
2173 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2174 uint64_t wholedisk = 0;
2176 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2178 if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2181 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2182 char *type, *idx, *end, *p;
2183 uint64_t id, vdev_id;
2186 * Determine our vdev type, keeping in mind
2187 * that the srchval is composed of a type and
2188 * vdev id pair (i.e. mirror-4).
2190 if ((type = strdup(srchval)) == NULL)
2193 if ((p = strrchr(type, '-')) == NULL) {
2201 * If the types don't match then keep looking.
2203 if (strncmp(val, type, strlen(val)) != 0) {
2208 verify(zpool_vdev_is_interior(type));
2209 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2213 vdev_id = strtoull(idx, &end, 10);
2220 * Now verify that we have the correct vdev id.
2229 if (strcmp(srchval, val) == 0)
2238 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2239 &child, &children) != 0)
2242 for (c = 0; c < children; c++) {
2243 if ((ret = vdev_to_nvlist_iter(child[c], search,
2244 avail_spare, l2cache, NULL)) != NULL) {
2246 * The 'is_log' value is only set for the toplevel
2247 * vdev, not the leaf vdevs. So we always lookup the
2248 * log device from the root of the vdev tree (where
2249 * 'log' is non-NULL).
2252 nvlist_lookup_uint64(child[c],
2253 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2261 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2262 &child, &children) == 0) {
2263 for (c = 0; c < children; c++) {
2264 if ((ret = vdev_to_nvlist_iter(child[c], search,
2265 avail_spare, l2cache, NULL)) != NULL) {
2266 *avail_spare = B_TRUE;
2272 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2273 &child, &children) == 0) {
2274 for (c = 0; c < children; c++) {
2275 if ((ret = vdev_to_nvlist_iter(child[c], search,
2276 avail_spare, l2cache, NULL)) != NULL) {
2287 * Given a physical path (minus the "/devices" prefix), find the
2291 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2292 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2294 nvlist_t *search, *nvroot, *ret;
2296 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2297 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2299 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2302 *avail_spare = B_FALSE;
2306 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2307 nvlist_free(search);
2313 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2316 zpool_vdev_is_interior(const char *name)
2318 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2319 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2321 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2322 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2328 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2329 boolean_t *l2cache, boolean_t *log)
2332 nvlist_t *nvroot, *search, *ret;
2335 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2337 guid = strtoull(path, &end, 0);
2338 if (guid != 0 && *end == '\0') {
2339 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2340 } else if (zpool_vdev_is_interior(path)) {
2341 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2343 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2346 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2349 *avail_spare = B_FALSE;
2353 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2354 nvlist_free(search);
2360 vdev_is_online(nvlist_t *nv)
2364 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2365 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2366 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2373 * Helper function for zpool_get_physpaths().
2376 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2377 size_t *bytes_written)
2379 size_t bytes_left, pos, rsz;
2383 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2385 return (EZFS_NODEVICE);
2387 pos = *bytes_written;
2388 bytes_left = physpath_size - pos;
2389 format = (pos == 0) ? "%s" : " %s";
2391 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2392 *bytes_written += rsz;
2394 if (rsz >= bytes_left) {
2395 /* if physpath was not copied properly, clear it */
2396 if (bytes_left != 0) {
2399 return (EZFS_NOSPC);
2405 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2406 size_t *rsz, boolean_t is_spare)
2411 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2412 return (EZFS_INVALCONFIG);
2414 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2416 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2417 * For a spare vdev, we only want to boot from the active
2422 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2425 return (EZFS_INVALCONFIG);
2428 if (vdev_is_online(nv)) {
2429 if ((ret = vdev_get_one_physpath(nv, physpath,
2430 phypath_size, rsz)) != 0)
2433 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2434 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2435 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2436 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2441 if (nvlist_lookup_nvlist_array(nv,
2442 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2443 return (EZFS_INVALCONFIG);
2445 for (i = 0; i < count; i++) {
2446 ret = vdev_get_physpaths(child[i], physpath,
2447 phypath_size, rsz, is_spare);
2448 if (ret == EZFS_NOSPC)
2453 return (EZFS_POOL_INVALARG);
2457 * Get phys_path for a root pool config.
2458 * Return 0 on success; non-zero on failure.
2461 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2464 nvlist_t *vdev_root;
2471 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2473 return (EZFS_INVALCONFIG);
2475 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2476 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2477 &child, &count) != 0)
2478 return (EZFS_INVALCONFIG);
2481 * root pool can only have a single top-level vdev.
2483 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2484 return (EZFS_POOL_INVALARG);
2486 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2489 /* No online devices */
2491 return (EZFS_NODEVICE);
2497 * Get phys_path for a root pool
2498 * Return 0 on success; non-zero on failure.
2501 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2503 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2508 * If the device has being dynamically expanded then we need to relabel
2509 * the disk to use the new unallocated space.
2512 zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
2516 if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
2517 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2518 "relabel '%s': unable to open device: %d"), path, errno);
2519 return (zfs_error(hdl, EZFS_OPENFAILED, msg));
2523 * It's possible that we might encounter an error if the device
2524 * does not have any unallocated space left. If so, we simply
2525 * ignore that error and continue on.
2527 * Also, we don't call efi_rescan() - that would just return EBUSY.
2528 * The module will do it for us in vdev_disk_open().
2530 error = efi_use_whole_disk(fd);
2532 /* Flush the buffers to disk and invalidate the page cache. */
2534 (void) ioctl(fd, BLKFLSBUF);
2537 if (error && error != VT_ENOSPC) {
2538 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2539 "relabel '%s': unable to read disk capacity"), path);
2540 return (zfs_error(hdl, EZFS_NOCAP, msg));
2547 * Convert a vdev path to a GUID. Returns GUID or 0 on error.
2549 * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2550 * if the VDEV is a spare, l2cache, or log device. If they're NULL then
2554 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2555 boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2558 boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2561 if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2565 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2566 if (is_spare != NULL)
2568 if (is_l2cache != NULL)
2569 *is_l2cache = l2cache;
2576 /* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
2578 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2580 return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2584 * Bring the specified vdev online. The 'flags' parameter is a set of the
2585 * ZFS_ONLINE_* flags.
2588 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2589 vdev_state_t *newstate)
2591 zfs_cmd_t zc = {"\0"};
2595 boolean_t avail_spare, l2cache, islog;
2596 libzfs_handle_t *hdl = zhp->zpool_hdl;
2599 if (flags & ZFS_ONLINE_EXPAND) {
2600 (void) snprintf(msg, sizeof (msg),
2601 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2603 (void) snprintf(msg, sizeof (msg),
2604 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2607 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2608 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2610 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2612 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2615 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2617 if ((flags & ZFS_ONLINE_EXPAND ||
2618 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2619 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2620 uint64_t wholedisk = 0;
2622 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2626 * XXX - L2ARC 1.0 devices can't support expansion.
2629 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2630 "cannot expand cache devices"));
2631 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2635 const char *fullpath = path;
2636 char buf[MAXPATHLEN];
2638 if (path[0] != '/') {
2639 error = zfs_resolve_shortname(path, buf,
2642 return (zfs_error(hdl, EZFS_NODEVICE,
2648 error = zpool_relabel_disk(hdl, fullpath, msg);
2654 zc.zc_cookie = VDEV_STATE_ONLINE;
2657 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2658 if (errno == EINVAL) {
2659 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2660 "from this pool into a new one. Use '%s' "
2661 "instead"), "zpool detach");
2662 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2664 return (zpool_standard_error(hdl, errno, msg));
2667 *newstate = zc.zc_cookie;
2672 * Take the specified vdev offline
2675 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2677 zfs_cmd_t zc = {"\0"};
2680 boolean_t avail_spare, l2cache;
2681 libzfs_handle_t *hdl = zhp->zpool_hdl;
2683 (void) snprintf(msg, sizeof (msg),
2684 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2686 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2687 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2689 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2691 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2694 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2696 zc.zc_cookie = VDEV_STATE_OFFLINE;
2697 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2699 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2706 * There are no other replicas of this device.
2708 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2712 * The log device has unplayed logs
2714 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2717 return (zpool_standard_error(hdl, errno, msg));
2722 * Mark the given vdev faulted.
2725 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2727 zfs_cmd_t zc = {"\0"};
2729 libzfs_handle_t *hdl = zhp->zpool_hdl;
2731 (void) snprintf(msg, sizeof (msg),
2732 dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
2734 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2736 zc.zc_cookie = VDEV_STATE_FAULTED;
2739 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2746 * There are no other replicas of this device.
2748 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2751 return (zpool_standard_error(hdl, errno, msg));
2757 * Mark the given vdev degraded.
2760 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2762 zfs_cmd_t zc = {"\0"};
2764 libzfs_handle_t *hdl = zhp->zpool_hdl;
2766 (void) snprintf(msg, sizeof (msg),
2767 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
2769 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2771 zc.zc_cookie = VDEV_STATE_DEGRADED;
2774 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2777 return (zpool_standard_error(hdl, errno, msg));
2781 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2785 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2791 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2793 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2796 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2797 children == 2 && child[which] == tgt)
2800 for (c = 0; c < children; c++)
2801 if (is_replacing_spare(child[c], tgt, which))
2809 * Attach new_disk (fully described by nvroot) to old_disk.
2810 * If 'replacing' is specified, the new disk will replace the old one.
2813 zpool_vdev_attach(zpool_handle_t *zhp,
2814 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2816 zfs_cmd_t zc = {"\0"};
2820 boolean_t avail_spare, l2cache, islog;
2825 nvlist_t *config_root;
2826 libzfs_handle_t *hdl = zhp->zpool_hdl;
2827 boolean_t rootpool = zpool_is_bootable(zhp);
2830 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2831 "cannot replace %s with %s"), old_disk, new_disk);
2833 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2834 "cannot attach %s to %s"), new_disk, old_disk);
2836 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2837 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2839 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2842 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2845 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2847 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2848 zc.zc_cookie = replacing;
2850 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2851 &child, &children) != 0 || children != 1) {
2852 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2853 "new device must be a single disk"));
2854 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2857 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2858 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2860 if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
2864 * If the target is a hot spare that has been swapped in, we can only
2865 * replace it with another hot spare.
2868 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2869 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2870 NULL) == NULL || !avail_spare) &&
2871 is_replacing_spare(config_root, tgt, 1)) {
2872 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2873 "can only be replaced by another hot spare"));
2875 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2880 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2883 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2885 zcmd_free_nvlists(&zc);
2890 * XXX need a better way to prevent user from
2891 * booting up a half-baked vdev.
2893 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2894 "sure to wait until resilver is done "
2895 "before rebooting.\n"));
2903 * Can't attach to or replace this type of vdev.
2906 uint64_t version = zpool_get_prop_int(zhp,
2907 ZPOOL_PROP_VERSION, NULL);
2910 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2911 "cannot replace a log with a spare"));
2912 else if (version >= SPA_VERSION_MULTI_REPLACE)
2913 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2914 "already in replacing/spare config; wait "
2915 "for completion or use 'zpool detach'"));
2917 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2918 "cannot replace a replacing device"));
2920 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2921 "can only attach to mirrors and top-level "
2924 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2929 * The new device must be a single disk.
2931 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2932 "new device must be a single disk"));
2933 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2937 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
2938 "or device removal is in progress"),
2940 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2945 * The new device is too small.
2947 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2948 "device is too small"));
2949 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2954 * The new device has a different optimal sector size.
2956 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2957 "new device has a different optimal sector size; use the "
2958 "option '-o ashift=N' to override the optimal size"));
2959 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2964 * The resulting top-level vdev spec won't fit in the label.
2966 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2970 (void) zpool_standard_error(hdl, errno, msg);
2977 * Detach the specified device.
2980 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2982 zfs_cmd_t zc = {"\0"};
2985 boolean_t avail_spare, l2cache;
2986 libzfs_handle_t *hdl = zhp->zpool_hdl;
2988 (void) snprintf(msg, sizeof (msg),
2989 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2991 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2992 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2994 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2997 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3000 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3002 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3004 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3011 * Can't detach from this type of vdev.
3013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3014 "applicable to mirror and replacing vdevs"));
3015 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3020 * There are no other replicas of this device.
3022 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
3026 (void) zpool_standard_error(hdl, errno, msg);
3033 * Find a mirror vdev in the source nvlist.
3035 * The mchild array contains a list of disks in one of the top-level mirrors
3036 * of the source pool. The schild array contains a list of disks that the
3037 * user specified on the command line. We loop over the mchild array to
3038 * see if any entry in the schild array matches.
3040 * If a disk in the mchild array is found in the schild array, we return
3041 * the index of that entry. Otherwise we return -1.
3044 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3045 nvlist_t **schild, uint_t schildren)
3049 for (mc = 0; mc < mchildren; mc++) {
3051 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3054 for (sc = 0; sc < schildren; sc++) {
3055 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3057 boolean_t result = (strcmp(mpath, spath) == 0);
3073 * Split a mirror pool. If newroot points to null, then a new nvlist
3074 * is generated and it is the responsibility of the caller to free it.
3077 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3078 nvlist_t *props, splitflags_t flags)
3080 zfs_cmd_t zc = {"\0"};
3082 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3083 nvlist_t **varray = NULL, *zc_props = NULL;
3084 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3085 libzfs_handle_t *hdl = zhp->zpool_hdl;
3086 uint64_t vers, readonly = B_FALSE;
3087 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3090 (void) snprintf(msg, sizeof (msg),
3091 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3093 if (!zpool_name_valid(hdl, B_FALSE, newname))
3094 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3096 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3097 (void) fprintf(stderr, gettext("Internal error: unable to "
3098 "retrieve pool configuration\n"));
3102 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3104 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3107 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3108 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3109 props, vers, flags, msg)) == NULL)
3111 (void) nvlist_lookup_uint64(zc_props,
3112 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3114 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3115 "property %s can only be set at import time"),
3116 zpool_prop_to_name(ZPOOL_PROP_READONLY));
3121 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3123 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3124 "Source pool is missing vdev tree"));
3125 nvlist_free(zc_props);
3129 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3132 if (*newroot == NULL ||
3133 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3134 &newchild, &newchildren) != 0)
3137 for (c = 0; c < children; c++) {
3138 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3140 nvlist_t **mchild, *vdev;
3145 * Unlike cache & spares, slogs are stored in the
3146 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3148 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3150 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3152 if (is_log || is_hole) {
3154 * Create a hole vdev and put it in the config.
3156 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3158 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3159 VDEV_TYPE_HOLE) != 0)
3161 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3166 varray[vcount++] = vdev;
3170 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3172 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3173 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3174 "Source pool must be composed only of mirrors\n"));
3175 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3179 verify(nvlist_lookup_nvlist_array(child[c],
3180 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3182 /* find or add an entry for this top-level vdev */
3183 if (newchildren > 0 &&
3184 (entry = find_vdev_entry(zhp, mchild, mchildren,
3185 newchild, newchildren)) >= 0) {
3186 /* We found a disk that the user specified. */
3187 vdev = mchild[entry];
3190 /* User didn't specify a disk for this vdev. */
3191 vdev = mchild[mchildren - 1];
3194 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3198 /* did we find every disk the user specified? */
3199 if (found != newchildren) {
3200 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3201 "include at most one disk from each mirror"));
3202 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3206 /* Prepare the nvlist for populating. */
3207 if (*newroot == NULL) {
3208 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3211 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3212 VDEV_TYPE_ROOT) != 0)
3215 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3218 /* Add all the children we found */
3219 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3220 lastlog == 0 ? vcount : lastlog) != 0)
3224 * If we're just doing a dry run, exit now with success.
3227 memory_err = B_FALSE;
3232 /* now build up the config list & call the ioctl */
3233 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3236 if (nvlist_add_nvlist(newconfig,
3237 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3238 nvlist_add_string(newconfig,
3239 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3240 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3244 * The new pool is automatically part of the namespace unless we
3245 * explicitly export it.
3248 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3249 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3250 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3251 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3253 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3256 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3257 retval = zpool_standard_error(hdl, errno, msg);
3262 memory_err = B_FALSE;
3265 if (varray != NULL) {
3268 for (v = 0; v < vcount; v++)
3269 nvlist_free(varray[v]);
3272 zcmd_free_nvlists(&zc);
3273 nvlist_free(zc_props);
3274 nvlist_free(newconfig);
3276 nvlist_free(*newroot);
3284 return (no_memory(hdl));
3290 * Remove the given device.
3293 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3295 zfs_cmd_t zc = {"\0"};
3298 boolean_t avail_spare, l2cache, islog;
3299 libzfs_handle_t *hdl = zhp->zpool_hdl;
3302 (void) snprintf(msg, sizeof (msg),
3303 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3305 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3306 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3308 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3310 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3311 if (islog && version < SPA_VERSION_HOLES) {
3312 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3313 "pool must be upgraded to support log removal"));
3314 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3317 if (!islog && !avail_spare && !l2cache && zpool_is_bootable(zhp)) {
3318 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3319 "root pool can not have removed devices, "
3320 "because GRUB does not understand them"));
3321 return (zfs_error(hdl, EINVAL, msg));
3324 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3326 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3332 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3333 "invalid config; all top-level vdevs must "
3334 "have the same sector size and not be raidz."));
3335 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3340 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3341 "Mount encrypted datasets to replay logs."));
3343 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3344 "Pool busy; removal may already be in progress"));
3346 (void) zfs_error(hdl, EZFS_BUSY, msg);
3351 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3352 "Mount encrypted datasets to replay logs."));
3353 (void) zfs_error(hdl, EZFS_BUSY, msg);
3355 (void) zpool_standard_error(hdl, errno, msg);
3360 (void) zpool_standard_error(hdl, errno, msg);
3366 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3370 libzfs_handle_t *hdl = zhp->zpool_hdl;
3372 (void) snprintf(msg, sizeof (msg),
3373 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3375 bzero(&zc, sizeof (zc));
3376 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3379 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3382 return (zpool_standard_error(hdl, errno, msg));
3386 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3391 boolean_t avail_spare, l2cache, islog;
3392 libzfs_handle_t *hdl = zhp->zpool_hdl;
3394 (void) snprintf(msg, sizeof (msg),
3395 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3398 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3400 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3402 if (avail_spare || l2cache || islog) {
3407 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3408 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3409 "indirect size not available"));
3410 return (zfs_error(hdl, EINVAL, msg));
3416 * Clear the errors for the pool, or the particular device if specified.
3419 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3421 zfs_cmd_t zc = {"\0"};
3424 zpool_load_policy_t policy;
3425 boolean_t avail_spare, l2cache;
3426 libzfs_handle_t *hdl = zhp->zpool_hdl;
3427 nvlist_t *nvi = NULL;
3431 (void) snprintf(msg, sizeof (msg),
3432 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3435 (void) snprintf(msg, sizeof (msg),
3436 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3439 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3441 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3442 &l2cache, NULL)) == NULL)
3443 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3446 * Don't allow error clearing for hot spares. Do allow
3447 * error clearing for l2cache devices.
3450 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3452 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3456 zpool_get_load_policy(rewindnvl, &policy);
3457 zc.zc_cookie = policy.zlp_rewind;
3459 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3462 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3465 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3467 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3468 zcmd_free_nvlists(&zc);
3473 if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
3474 errno != EPERM && errno != EACCES)) {
3475 if (policy.zlp_rewind &
3476 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3477 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3478 zpool_rewind_exclaim(hdl, zc.zc_name,
3479 ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
3483 zcmd_free_nvlists(&zc);
3487 zcmd_free_nvlists(&zc);
3488 return (zpool_standard_error(hdl, errno, msg));
3492 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3495 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3497 zfs_cmd_t zc = {"\0"};
3499 libzfs_handle_t *hdl = zhp->zpool_hdl;
3501 (void) snprintf(msg, sizeof (msg),
3502 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3503 (u_longlong_t)guid);
3505 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3507 zc.zc_cookie = ZPOOL_NO_REWIND;
3509 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3512 return (zpool_standard_error(hdl, errno, msg));
3516 * Change the GUID for a pool.
3519 zpool_reguid(zpool_handle_t *zhp)
3522 libzfs_handle_t *hdl = zhp->zpool_hdl;
3523 zfs_cmd_t zc = {"\0"};
3525 (void) snprintf(msg, sizeof (msg),
3526 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3528 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3529 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3532 return (zpool_standard_error(hdl, errno, msg));
3539 zpool_reopen_one(zpool_handle_t *zhp, void *data)
3541 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3542 const char *pool_name = zpool_get_name(zhp);
3543 boolean_t *scrub_restart = data;
3546 error = lzc_reopen(pool_name, *scrub_restart);
3548 return (zpool_standard_error_fmt(hdl, error,
3549 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
3555 /* call into libzfs_core to execute the sync IOCTL per pool */
3557 zpool_sync_one(zpool_handle_t *zhp, void *data)
3560 libzfs_handle_t *hdl = zpool_get_handle(zhp);
3561 const char *pool_name = zpool_get_name(zhp);
3562 boolean_t *force = data;
3563 nvlist_t *innvl = fnvlist_alloc();
3565 fnvlist_add_boolean_value(innvl, "force", *force);
3566 if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
3568 return (zpool_standard_error_fmt(hdl, ret,
3569 dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
3576 #if defined(__sun__) || defined(__sun)
3578 * Convert from a devid string to a path.
3581 devid_to_path(char *devid_str)
3586 devid_nmlist_t *list = NULL;
3589 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3592 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3594 devid_str_free(minor);
3601 * In a case the strdup() fails, we will just return NULL below.
3603 path = strdup(list[0].devname);
3605 devid_free_nmlist(list);
3611 * Convert from a path to a devid string.
3614 path_to_devid(const char *path)
3620 if ((fd = open(path, O_RDONLY)) < 0)
3625 if (devid_get(fd, &devid) == 0) {
3626 if (devid_get_minor_name(fd, &minor) == 0)
3627 ret = devid_str_encode(devid, minor);
3629 devid_str_free(minor);
3638 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3639 * ignore any failure here, since a common case is for an unprivileged user to
3640 * type 'zpool status', and we'll display the correct information anyway.
3643 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3645 zfs_cmd_t zc = {"\0"};
3647 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3648 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3649 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3652 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3657 * Remove partition suffix from a vdev path. Partition suffixes may take three
3658 * forms: "-partX", "pX", or "X", where X is a string of digits. The second
3659 * case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
3660 * third case only occurs when preceded by a string matching the regular
3661 * expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
3663 * caller must free the returned string
3666 zfs_strip_partition(char *path)
3668 char *tmp = strdup(path);
3669 char *part = NULL, *d = NULL;
3673 if ((part = strstr(tmp, "-part")) && part != tmp) {
3675 } else if ((part = strrchr(tmp, 'p')) &&
3676 part > tmp + 1 && isdigit(*(part-1))) {
3678 } else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
3680 for (d = &tmp[2]; isalpha(*d); part = ++d) { }
3681 } else if (strncmp("xvd", tmp, 3) == 0) {
3682 for (d = &tmp[3]; isalpha(*d); part = ++d) { }
3684 if (part && d && *d != '\0') {
3685 for (; isdigit(*d); d++) { }
3694 * Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
3699 * Returned string must be freed.
3702 zfs_strip_partition_path(char *path)
3704 char *newpath = strdup(path);
3711 /* Point to "sda1" part of "/dev/sda1" */
3712 sd_offset = strrchr(newpath, '/') + 1;
3714 /* Get our new name "sda" */
3715 new_sd = zfs_strip_partition(sd_offset);
3721 /* Paste the "sda" where "sda1" was */
3722 strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
3724 /* Free temporary "sda" */
3730 #define PATH_BUF_LEN 64
3733 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3734 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3735 * We also check if this is a whole disk, in which case we strip off the
3736 * trailing 's0' slice name.
3738 * This routine is also responsible for identifying when disks have been
3739 * reconfigured in a new location. The kernel will have opened the device by
3740 * devid, but the path will still refer to the old location. To catch this, we
3741 * first do a path -> devid translation (which is fast for the common case). If
3742 * the devid matches, we're done. If not, we do a reverse devid -> path
3743 * translation and issue the appropriate ioctl() to update the path of the vdev.
3744 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3748 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3751 char *path, *type, *env;
3753 char buf[PATH_BUF_LEN];
3754 char tmpbuf[PATH_BUF_LEN];
3757 * vdev_name will be "root"/"root-0" for the root vdev, but it is the
3758 * zpool name that will be displayed to the user.
3760 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3761 if (zhp != NULL && strcmp(type, "root") == 0)
3762 return (zfs_strdup(hdl, zpool_get_name(zhp)));
3764 env = getenv("ZPOOL_VDEV_NAME_PATH");
3765 if (env && (strtoul(env, NULL, 0) > 0 ||
3766 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3767 name_flags |= VDEV_NAME_PATH;
3769 env = getenv("ZPOOL_VDEV_NAME_GUID");
3770 if (env && (strtoul(env, NULL, 0) > 0 ||
3771 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3772 name_flags |= VDEV_NAME_GUID;
3774 env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3775 if (env && (strtoul(env, NULL, 0) > 0 ||
3776 !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3777 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3779 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3780 name_flags & VDEV_NAME_GUID) {
3781 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3782 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3784 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3785 #if defined(__sun__) || defined(__sun)
3787 * Live VDEV path updates to a kernel VDEV during a
3788 * zpool_vdev_name lookup are not supported on Linux.
3795 * If the device is dead (faulted, offline, etc) then don't
3796 * bother opening it. Otherwise we may be forcing the user to
3797 * open a misbehaving device, which can have undesirable
3800 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3801 (uint64_t **)&vs, &vsc) != 0 ||
3802 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3804 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3806 * Determine if the current path is correct.
3808 char *newdevid = path_to_devid(path);
3810 if (newdevid == NULL ||
3811 strcmp(devid, newdevid) != 0) {
3814 if ((newpath = devid_to_path(devid)) != NULL) {
3816 * Update the path appropriately.
3818 set_path(zhp, nv, newpath);
3819 if (nvlist_add_string(nv,
3820 ZPOOL_CONFIG_PATH, newpath) == 0)
3821 verify(nvlist_lookup_string(nv,
3829 devid_str_free(newdevid);
3833 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3834 char *rp = realpath(path, NULL);
3836 strlcpy(buf, rp, sizeof (buf));
3843 * For a block device only use the name.
3845 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3846 !(name_flags & VDEV_NAME_PATH)) {
3847 path = strrchr(path, '/');
3852 * Remove the partition from the path it this is a whole disk.
3854 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3855 == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3856 return (zfs_strip_partition(path));
3862 * If it's a raidz device, we need to stick in the parity level.
3864 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3865 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3867 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3868 (u_longlong_t)value);
3873 * We identify each top-level vdev by using a <type-id>
3874 * naming convention.
3876 if (name_flags & VDEV_NAME_TYPE_ID) {
3878 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3880 (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3881 path, (u_longlong_t)id);
3886 return (zfs_strdup(hdl, path));
3890 zbookmark_mem_compare(const void *a, const void *b)
3892 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3896 * Retrieve the persistent error log, uniquify the members, and return to the
3900 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3902 zfs_cmd_t zc = {"\0"};
3903 libzfs_handle_t *hdl = zhp->zpool_hdl;
3905 zbookmark_phys_t *zb = NULL;
3909 * Retrieve the raw error list from the kernel. If the number of errors
3910 * has increased, allocate more space and continue until we get the
3913 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3917 zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3918 count * sizeof (zbookmark_phys_t));
3919 zc.zc_nvlist_dst_size = count;
3920 (void) strcpy(zc.zc_name, zhp->zpool_name);
3922 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3924 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3925 if (errno == ENOMEM) {
3928 count = zc.zc_nvlist_dst_size;
3929 dst = zfs_alloc(zhp->zpool_hdl, count *
3930 sizeof (zbookmark_phys_t));
3931 zc.zc_nvlist_dst = (uintptr_t)dst;
3933 return (zpool_standard_error_fmt(hdl, errno,
3934 dgettext(TEXT_DOMAIN, "errors: List of "
3935 "errors unavailable")));
3943 * Sort the resulting bookmarks. This is a little confusing due to the
3944 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3945 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3946 * _not_ copied as part of the process. So we point the start of our
3947 * array appropriate and decrement the total number of elements.
3949 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3950 zc.zc_nvlist_dst_size;
3951 count -= zc.zc_nvlist_dst_size;
3953 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3955 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3958 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3960 for (i = 0; i < count; i++) {
3963 /* ignoring zb_blkid and zb_level for now */
3964 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3965 zb[i-1].zb_object == zb[i].zb_object)
3968 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3970 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3971 zb[i].zb_objset) != 0) {
3975 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3976 zb[i].zb_object) != 0) {
3980 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3987 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3991 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3992 return (no_memory(zhp->zpool_hdl));
3996 * Upgrade a ZFS pool to the latest on-disk version.
3999 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4001 zfs_cmd_t zc = {"\0"};
4002 libzfs_handle_t *hdl = zhp->zpool_hdl;
4004 (void) strcpy(zc.zc_name, zhp->zpool_name);
4005 zc.zc_cookie = new_version;
4007 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4008 return (zpool_standard_error_fmt(hdl, errno,
4009 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4015 zfs_save_arguments(int argc, char **argv, char *string, int len)
4019 (void) strlcpy(string, basename(argv[0]), len);
4020 for (i = 1; i < argc; i++) {
4021 (void) strlcat(string, " ", len);
4022 (void) strlcat(string, argv[i], len);
4027 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4029 zfs_cmd_t zc = {"\0"};
4033 args = fnvlist_alloc();
4034 fnvlist_add_string(args, "message", message);
4035 err = zcmd_write_src_nvlist(hdl, &zc, args);
4037 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
4039 zcmd_free_nvlists(&zc);
4044 * Perform ioctl to get some command history of a pool.
4046 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
4047 * logical offset of the history buffer to start reading from.
4049 * Upon return, 'off' is the next logical offset to read from and
4050 * 'len' is the actual amount of bytes read into 'buf'.
4053 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4055 zfs_cmd_t zc = {"\0"};
4056 libzfs_handle_t *hdl = zhp->zpool_hdl;
4058 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4060 zc.zc_history = (uint64_t)(uintptr_t)buf;
4061 zc.zc_history_len = *len;
4062 zc.zc_history_offset = *off;
4064 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4067 return (zfs_error_fmt(hdl, EZFS_PERM,
4068 dgettext(TEXT_DOMAIN,
4069 "cannot show history for pool '%s'"),
4072 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4073 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4074 "'%s'"), zhp->zpool_name));
4076 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4077 dgettext(TEXT_DOMAIN, "cannot get history for pool "
4078 "'%s', pool must be upgraded"), zhp->zpool_name));
4080 return (zpool_standard_error_fmt(hdl, errno,
4081 dgettext(TEXT_DOMAIN,
4082 "cannot get history for '%s'"), zhp->zpool_name));
4086 *len = zc.zc_history_len;
4087 *off = zc.zc_history_offset;
4093 * Process the buffer of nvlists, unpacking and storing each nvlist record
4094 * into 'records'. 'leftover' is set to the number of bytes that weren't
4095 * processed as there wasn't a complete record.
4098 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
4099 nvlist_t ***records, uint_t *numrecords)
4106 while (bytes_read > sizeof (reclen)) {
4108 /* get length of packed record (stored as little endian) */
4109 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
4110 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
4112 if (bytes_read < sizeof (reclen) + reclen)
4116 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
4118 bytes_read -= sizeof (reclen) + reclen;
4119 buf += sizeof (reclen) + reclen;
4121 /* add record to nvlist array */
4123 if (ISP2(*numrecords + 1)) {
4124 tmp = realloc(*records,
4125 *numrecords * 2 * sizeof (nvlist_t *));
4133 (*records)[*numrecords - 1] = nv;
4136 *leftover = bytes_read;
4141 * Retrieve the command history of a pool.
4144 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
4147 int buflen = 128 * 1024;
4149 nvlist_t **records = NULL;
4150 uint_t numrecords = 0;
4153 buf = malloc(buflen);
4157 uint64_t bytes_read = buflen;
4160 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
4163 /* if nothing else was read in, we're at EOF, just return */
4167 if ((err = zpool_history_unpack(buf, bytes_read,
4168 &leftover, &records, &numrecords)) != 0)
4171 if (leftover == bytes_read) {
4173 * no progress made, because buffer is not big enough
4174 * to hold this record; resize and retry.
4178 buf = malloc(buflen);
4189 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4190 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4191 records, numrecords) == 0);
4193 for (i = 0; i < numrecords; i++)
4194 nvlist_free(records[i]);
4201 * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4202 * If there is a new event available 'nvp' will contain a newly allocated
4203 * nvlist and 'dropped' will be set to the number of missed events since
4204 * the last call to this function. When 'nvp' is set to NULL it indicates
4205 * no new events are available. In either case the function returns 0 and
4206 * it is up to the caller to free 'nvp'. In the case of a fatal error the
4207 * function will return a non-zero value. When the function is called in
4208 * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4209 * it will not return until a new event is available.
4212 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4213 int *dropped, unsigned flags, int zevent_fd)
4215 zfs_cmd_t zc = {"\0"};
4220 zc.zc_cleanup_fd = zevent_fd;
4222 if (flags & ZEVENT_NONBLOCK)
4223 zc.zc_guid = ZEVENT_NONBLOCK;
4225 if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4229 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4232 error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4233 dgettext(TEXT_DOMAIN, "zfs shutdown"));
4236 /* Blocking error case should not occur */
4237 if (!(flags & ZEVENT_NONBLOCK))
4238 error = zpool_standard_error_fmt(hdl, errno,
4239 dgettext(TEXT_DOMAIN, "cannot get event"));
4243 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4244 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4245 dgettext(TEXT_DOMAIN, "cannot get event"));
4251 error = zpool_standard_error_fmt(hdl, errno,
4252 dgettext(TEXT_DOMAIN, "cannot get event"));
4257 error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4261 *dropped = (int)zc.zc_cookie;
4263 zcmd_free_nvlists(&zc);
4272 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4274 zfs_cmd_t zc = {"\0"};
4277 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
4278 "cannot clear events"));
4280 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4281 return (zpool_standard_error_fmt(hdl, errno, msg));
4284 *count = (int)zc.zc_cookie; /* # of events cleared */
4290 * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4291 * the passed zevent_fd file handle. On success zero is returned,
4292 * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4295 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4297 zfs_cmd_t zc = {"\0"};
4301 zc.zc_cleanup_fd = zevent_fd;
4303 if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4306 error = zfs_error_fmt(hdl, EZFS_NOENT,
4307 dgettext(TEXT_DOMAIN, "cannot get event"));
4311 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4312 dgettext(TEXT_DOMAIN, "cannot get event"));
4316 error = zpool_standard_error_fmt(hdl, errno,
4317 dgettext(TEXT_DOMAIN, "cannot get event"));
4326 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4327 char *pathname, size_t len)
4329 zfs_cmd_t zc = {"\0"};
4330 boolean_t mounted = B_FALSE;
4331 char *mntpnt = NULL;
4332 char dsname[ZFS_MAX_DATASET_NAME_LEN];
4335 /* special case for the MOS */
4336 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4341 /* get the dataset's name */
4342 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4344 if (ioctl(zhp->zpool_hdl->libzfs_fd,
4345 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4346 /* just write out a path of two object numbers */
4347 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4348 (longlong_t)dsobj, (longlong_t)obj);
4351 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4353 /* find out if the dataset is mounted */
4354 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
4356 /* get the corrupted object's path */
4357 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4359 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
4362 (void) snprintf(pathname, len, "%s%s", mntpnt,
4365 (void) snprintf(pathname, len, "%s:%s",
4366 dsname, zc.zc_value);
4369 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4376 * Read the EFI label from the config, if a label does not exist then
4377 * pass back the error to the caller. If the caller has passed a non-NULL
4378 * diskaddr argument then we set it to the starting address of the EFI
4382 read_efi_label(nvlist_t *config, diskaddr_t *sb)
4386 char diskname[MAXPATHLEN];
4389 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4392 (void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
4393 strrchr(path, '/'));
4394 if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
4395 struct dk_gpt *vtoc;
4397 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4399 *sb = vtoc->efi_parts[0].p_start;
4408 * determine where a partition starts on a disk in the current
4412 find_start_block(nvlist_t *config)
4416 diskaddr_t sb = MAXOFFSET_T;
4419 if (nvlist_lookup_nvlist_array(config,
4420 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4421 if (nvlist_lookup_uint64(config,
4422 ZPOOL_CONFIG_WHOLE_DISK,
4423 &wholedisk) != 0 || !wholedisk) {
4424 return (MAXOFFSET_T);
4426 if (read_efi_label(config, &sb) < 0)
4431 for (c = 0; c < children; c++) {
4432 sb = find_start_block(child[c]);
4433 if (sb != MAXOFFSET_T) {
4437 return (MAXOFFSET_T);
4441 zpool_label_disk_check(char *path)
4443 struct dk_gpt *vtoc;
4446 if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
4449 if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
4454 if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
4466 * Generate a unique partition name for the ZFS member. Partitions must
4467 * have unique names to ensure udev will be able to create symlinks under
4468 * /dev/disk/by-partlabel/ for all pool members. The partition names are
4469 * of the form <pool>-<unique-id>.
4472 zpool_label_name(char *label_name, int label_size)
4477 fd = open("/dev/urandom", O_RDONLY);
4479 if (read(fd, &id, sizeof (id)) != sizeof (id))
4486 id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
4488 snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
4492 * Label an individual disk. The name provided is the short name,
4493 * stripped of any leading /dev path.
4496 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
4498 char path[MAXPATHLEN];
4499 struct dk_gpt *vtoc;
4501 size_t resv = EFI_MIN_RESV_SIZE;
4502 uint64_t slice_size;
4503 diskaddr_t start_block;
4506 /* prepare an error message just in case */
4507 (void) snprintf(errbuf, sizeof (errbuf),
4508 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4513 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4514 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4516 if (zhp->zpool_start_block == 0)
4517 start_block = find_start_block(nvroot);
4519 start_block = zhp->zpool_start_block;
4520 zhp->zpool_start_block = start_block;
4523 start_block = NEW_START_BLOCK;
4526 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4528 if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
4530 * This shouldn't happen. We've long since verified that this
4531 * is a valid device.
4533 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4534 "label '%s': unable to open device: %d"), path, errno);
4535 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4538 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4540 * The only way this can fail is if we run out of memory, or we
4541 * were unable to read the disk's capacity
4543 if (errno == ENOMEM)
4544 (void) no_memory(hdl);
4547 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
4548 "label '%s': unable to read disk capacity"), path);
4550 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4553 slice_size = vtoc->efi_last_u_lba + 1;
4554 slice_size -= EFI_MIN_RESV_SIZE;
4555 if (start_block == MAXOFFSET_T)
4556 start_block = NEW_START_BLOCK;
4557 slice_size -= start_block;
4558 slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
4560 vtoc->efi_parts[0].p_start = start_block;
4561 vtoc->efi_parts[0].p_size = slice_size;
4564 * Why we use V_USR: V_BACKUP confuses users, and is considered
4565 * disposable by some EFI utilities (since EFI doesn't have a backup
4566 * slice). V_UNASSIGNED is supposed to be used only for zero size
4567 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4568 * etc. were all pretty specific. V_USR is as close to reality as we
4569 * can get, in the absence of V_OTHER.
4571 vtoc->efi_parts[0].p_tag = V_USR;
4572 zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
4574 vtoc->efi_parts[8].p_start = slice_size + start_block;
4575 vtoc->efi_parts[8].p_size = resv;
4576 vtoc->efi_parts[8].p_tag = V_RESERVED;
4578 rval = efi_write(fd, vtoc);
4580 /* Flush the buffers to disk and invalidate the page cache. */
4582 (void) ioctl(fd, BLKFLSBUF);
4585 rval = efi_rescan(fd);
4588 * Some block drivers (like pcata) may not support EFI GPT labels.
4589 * Print out a helpful error message directing the user to manually
4590 * label the disk and give a specific slice.
4596 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
4597 "parted(8) and then provide a specific slice: %d"), rval);
4598 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4604 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4605 (void) zfs_append_partition(path, MAXPATHLEN);
4607 /* Wait to udev to signal use the device has settled. */
4608 rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
4610 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
4611 "detect device partitions on '%s': %d"), path, rval);
4612 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4615 /* We can't be to paranoid. Read the label back and verify it. */
4616 (void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
4617 rval = zpool_label_disk_check(path);
4619 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
4620 "EFI label on '%s' is damaged. Ensure\nthis device "
4621 "is not in in use, and is functioning properly: %d"),
4623 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4630 * Allocate and return the underlying device name for a device mapper device.
4631 * If a device mapper device maps to multiple devices, return the first device.
4633 * For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
4634 * DM device (like /dev/disk/by-vdev/A0) are also allowed.
4636 * Returns device name, or NULL on error or no match. If dm_name is not a DM
4637 * device then return NULL.
4639 * NOTE: The returned name string must be *freed*.
4642 dm_get_underlying_path(char *dm_name)
4652 if (dm_name == NULL)
4655 /* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
4656 realp = realpath(dm_name, NULL);
4661 * If they preface 'dev' with a path (like "/dev") then strip it off.
4662 * We just want the 'dm-N' part.
4664 tmp = strrchr(realp, '/');
4666 dev_str = tmp + 1; /* +1 since we want the chr after '/' */
4670 size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
4671 if (size == -1 || !tmp)
4678 /* Return first sd* entry in /sys/block/dm-N/slaves/ */
4679 while ((ep = readdir(dp))) {
4680 if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
4681 size = asprintf(&path, "/dev/%s", ep->d_name);
4695 * Return 1 if device is a device mapper or multipath device.
4699 zfs_dev_is_dm(char *dev_name)
4703 tmp = dm_get_underlying_path(dev_name);
4712 * By "whole disk" we mean an entire physical disk (something we can
4713 * label, toggle the write cache on, etc.) as opposed to the full
4714 * capacity of a pseudo-device such as lofi or did. We act as if we
4715 * are labeling the disk, which should be a pretty good test of whether
4716 * it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
4720 zfs_dev_is_whole_disk(char *dev_name)
4722 struct dk_gpt *label;
4725 if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
4728 if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
4740 * Lookup the underlying device for a device name
4742 * Often you'll have a symlink to a device, a partition device,
4743 * or a multipath device, and want to look up the underlying device.
4744 * This function returns the underlying device name. If the device
4745 * name is already the underlying device, then just return the same
4746 * name. If the device is a DM device with multiple underlying devices
4747 * then return the first one.
4751 * 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
4752 * dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
4755 * 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
4756 * dev_name: /dev/mapper/mpatha
4757 * returns: /dev/sda (first device)
4759 * 3. /dev/sda (already the underlying device)
4760 * dev_name: /dev/sda
4763 * 4. /dev/dm-3 (mapped to /dev/sda)
4764 * dev_name: /dev/dm-3
4767 * 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
4768 * dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
4771 * 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
4772 * dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
4775 * Returns underlying device name, or NULL on error or no match.
4777 * NOTE: The returned name string must be *freed*.
4780 zfs_get_underlying_path(char *dev_name)
4785 if (dev_name == NULL)
4788 tmp = dm_get_underlying_path(dev_name);
4790 /* dev_name not a DM device, so just un-symlinkize it */
4792 tmp = realpath(dev_name, NULL);
4795 name = zfs_strip_partition_path(tmp);
4803 * Given a dev name like "sda", return the full enclosure sysfs path to
4804 * the disk. You can also pass in the name with "/dev" prepended
4805 * to it (like /dev/sda).
4807 * For example, disk "sda" in enclosure slot 1:
4809 * returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
4811 * 'dev' must be a non-devicemapper device.
4813 * Returned string must be freed.
4816 zfs_get_enclosure_sysfs_path(char *dev_name)
4820 char buf[MAXPATHLEN];
4828 if (dev_name == NULL)
4831 /* If they preface 'dev' with a path (like "/dev") then strip it off */
4832 tmp1 = strrchr(dev_name, '/');
4834 dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
4836 tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
4837 if (tmpsize == -1 || tmp1 == NULL) {
4844 tmp1 = NULL; /* To make free() at the end a NOP */
4849 * Look though all sysfs entries in /sys/block/<dev>/device for
4850 * the enclosure symlink.
4852 while ((ep = readdir(dp))) {
4853 /* Ignore everything that's not our enclosure_device link */
4854 if (strstr(ep->d_name, "enclosure_device") == NULL)
4857 if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
4861 size = readlink(tmp2, buf, sizeof (buf));
4863 /* Did readlink fail or crop the link name? */
4864 if (size == -1 || size >= sizeof (buf)) {
4866 tmp2 = NULL; /* To make free() at the end a NOP */
4871 * We got a valid link. readlink() doesn't terminate strings
4872 * so we have to do it.
4877 * Our link will look like:
4879 * "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
4881 * We want to grab the "enclosure/1:0:3:0/SLOT 1" part
4883 tmp3 = strstr(buf, "enclosure");
4887 if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
4888 /* If asprintf() fails, 'path' is undefined */