4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright 2016 Nexenta Systems, Inc.
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
30 #include <sys/types.h>
42 #include <sys/zfs_ioctl.h>
45 #include "zfs_namecheck.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
53 #define BACKUP_SLICE "s2"
55 typedef struct prop_flags {
56 int create:1; /* Validate property on creation */
57 int import:1; /* Validate property on import */
61 * ====================================================================
62 * zpool property functions
63 * ====================================================================
67 zpool_get_all_props(zpool_handle_t *zhp)
70 libzfs_handle_t *hdl = zhp->zpool_hdl;
72 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
74 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
77 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
78 if (errno == ENOMEM) {
79 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
80 zcmd_free_nvlists(&zc);
84 zcmd_free_nvlists(&zc);
89 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
90 zcmd_free_nvlists(&zc);
94 zcmd_free_nvlists(&zc);
100 zpool_props_refresh(zpool_handle_t *zhp)
104 old_props = zhp->zpool_props;
106 if (zpool_get_all_props(zhp) != 0)
109 nvlist_free(old_props);
114 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
120 zprop_source_t source;
122 nvl = zhp->zpool_props;
123 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
124 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
126 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
128 source = ZPROP_SRC_DEFAULT;
129 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
140 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
144 zprop_source_t source;
146 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
148 * zpool_get_all_props() has most likely failed because
149 * the pool is faulted, but if all we need is the top level
150 * vdev's guid then get it from the zhp config nvlist.
152 if ((prop == ZPOOL_PROP_GUID) &&
153 (nvlist_lookup_nvlist(zhp->zpool_config,
154 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
155 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
159 return (zpool_prop_default_numeric(prop));
162 nvl = zhp->zpool_props;
163 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
164 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
166 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
168 source = ZPROP_SRC_DEFAULT;
169 value = zpool_prop_default_numeric(prop);
179 * Map VDEV STATE to printed strings.
182 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
192 return (gettext("FAULTED"));
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
208 return (gettext("UNKNOWN"));
212 * Map POOL STATE to printed strings.
215 zpool_pool_state_to_name(pool_state_t state)
218 case POOL_STATE_ACTIVE:
219 return (gettext("ACTIVE"));
220 case POOL_STATE_EXPORTED:
221 return (gettext("EXPORTED"));
222 case POOL_STATE_DESTROYED:
223 return (gettext("DESTROYED"));
224 case POOL_STATE_SPARE:
225 return (gettext("SPARE"));
226 case POOL_STATE_L2CACHE:
227 return (gettext("L2CACHE"));
228 case POOL_STATE_UNINITIALIZED:
229 return (gettext("UNINITIALIZED"));
230 case POOL_STATE_UNAVAIL:
231 return (gettext("UNAVAIL"));
232 case POOL_STATE_POTENTIALLY_ACTIVE:
233 return (gettext("POTENTIALLY_ACTIVE"));
236 return (gettext("UNKNOWN"));
240 * Get a zpool property value for 'prop' and return the value in
241 * a pre-allocated buffer.
244 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
245 zprop_source_t *srctype, boolean_t literal)
249 zprop_source_t src = ZPROP_SRC_NONE;
254 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
256 case ZPOOL_PROP_NAME:
257 (void) strlcpy(buf, zpool_get_name(zhp), len);
260 case ZPOOL_PROP_HEALTH:
262 zpool_pool_state_to_name(POOL_STATE_UNAVAIL), len);
265 case ZPOOL_PROP_GUID:
266 intval = zpool_get_prop_int(zhp, prop, &src);
267 (void) snprintf(buf, len, "%llu", intval);
270 case ZPOOL_PROP_ALTROOT:
271 case ZPOOL_PROP_CACHEFILE:
272 case ZPOOL_PROP_COMMENT:
273 if (zhp->zpool_props != NULL ||
274 zpool_get_all_props(zhp) == 0) {
276 zpool_get_prop_string(zhp, prop, &src),
282 (void) strlcpy(buf, "-", len);
291 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
292 prop != ZPOOL_PROP_NAME)
295 switch (zpool_prop_get_type(prop)) {
296 case PROP_TYPE_STRING:
297 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
301 case PROP_TYPE_NUMBER:
302 intval = zpool_get_prop_int(zhp, prop, &src);
305 case ZPOOL_PROP_SIZE:
306 case ZPOOL_PROP_ALLOCATED:
307 case ZPOOL_PROP_FREE:
308 case ZPOOL_PROP_FREEING:
309 case ZPOOL_PROP_LEAKED:
311 (void) snprintf(buf, len, "%llu",
312 (u_longlong_t)intval);
314 (void) zfs_nicenum(intval, buf, len);
317 case ZPOOL_PROP_EXPANDSZ:
319 (void) strlcpy(buf, "-", len);
320 } else if (literal) {
321 (void) snprintf(buf, len, "%llu",
322 (u_longlong_t)intval);
324 (void) zfs_nicenum(intval, buf, len);
327 case ZPOOL_PROP_CAPACITY:
329 (void) snprintf(buf, len, "%llu",
330 (u_longlong_t)intval);
332 (void) snprintf(buf, len, "%llu%%",
333 (u_longlong_t)intval);
336 case ZPOOL_PROP_FRAGMENTATION:
337 if (intval == UINT64_MAX) {
338 (void) strlcpy(buf, "-", len);
340 (void) snprintf(buf, len, "%llu%%",
341 (u_longlong_t)intval);
344 case ZPOOL_PROP_DEDUPRATIO:
345 (void) snprintf(buf, len, "%llu.%02llux",
346 (u_longlong_t)(intval / 100),
347 (u_longlong_t)(intval % 100));
349 case ZPOOL_PROP_HEALTH:
350 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
351 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
352 verify(nvlist_lookup_uint64_array(nvroot,
353 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
356 (void) strlcpy(buf, zpool_state_to_name(intval,
359 case ZPOOL_PROP_VERSION:
360 if (intval >= SPA_VERSION_FEATURES) {
361 (void) snprintf(buf, len, "-");
366 (void) snprintf(buf, len, "%llu", intval);
370 case PROP_TYPE_INDEX:
371 intval = zpool_get_prop_int(zhp, prop, &src);
372 if (zpool_prop_index_to_string(prop, intval, &strval)
375 (void) strlcpy(buf, strval, len);
389 * Check if the bootfs name has the same pool name as it is set to.
390 * Assuming bootfs is a valid dataset name.
393 bootfs_name_valid(const char *pool, char *bootfs)
395 int len = strlen(pool);
397 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
400 if (strncmp(pool, bootfs, len) == 0 &&
401 (bootfs[len] == '/' || bootfs[len] == '\0'))
408 zpool_is_bootable(zpool_handle_t *zhp)
410 char bootfs[ZPOOL_MAXNAMELEN];
412 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
413 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
414 sizeof (bootfs)) != 0);
419 * Given an nvlist of zpool properties to be set, validate that they are
420 * correct, and parse any numeric properties (index, boolean, etc) if they are
421 * specified as strings.
424 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
425 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
433 struct stat64 statbuf;
436 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
437 (void) no_memory(hdl);
442 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
443 const char *propname = nvpair_name(elem);
445 prop = zpool_name_to_prop(propname);
446 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
448 char *fname = strchr(propname, '@') + 1;
450 err = zfeature_lookup_name(fname, NULL);
452 ASSERT3U(err, ==, ENOENT);
453 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
454 "invalid feature '%s'"), fname);
455 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
459 if (nvpair_type(elem) != DATA_TYPE_STRING) {
460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
461 "'%s' must be a string"), propname);
462 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
466 (void) nvpair_value_string(elem, &strval);
467 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
468 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
469 "property '%s' can only be set to "
470 "'enabled'"), propname);
471 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
475 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
476 (void) no_memory(hdl);
483 * Make sure this property is valid and applies to this type.
485 if (prop == ZPROP_INVAL) {
486 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
487 "invalid property '%s'"), propname);
488 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
492 if (zpool_prop_readonly(prop)) {
493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
494 "is readonly"), propname);
495 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
499 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
500 &strval, &intval, errbuf) != 0)
504 * Perform additional checking for specific properties.
507 case ZPOOL_PROP_VERSION:
508 if (intval < version ||
509 !SPA_VERSION_IS_SUPPORTED(intval)) {
510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
511 "property '%s' number %d is invalid."),
513 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
518 case ZPOOL_PROP_BOOTFS:
519 if (flags.create || flags.import) {
520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
521 "property '%s' cannot be set at creation "
522 "or import time"), propname);
523 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
527 if (version < SPA_VERSION_BOOTFS) {
528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
529 "pool must be upgraded to support "
530 "'%s' property"), propname);
531 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
536 * bootfs property value has to be a dataset name and
537 * the dataset has to be in the same pool as it sets to.
539 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
542 "is an invalid name"), strval);
543 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
547 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
549 "could not open pool '%s'"), poolname);
550 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
556 case ZPOOL_PROP_ALTROOT:
557 if (!flags.create && !flags.import) {
558 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 "property '%s' can only be set during pool "
560 "creation or import"), propname);
561 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
565 if (strval[0] != '/') {
566 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
567 "bad alternate root '%s'"), strval);
568 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
573 case ZPOOL_PROP_CACHEFILE:
574 if (strval[0] == '\0')
577 if (strcmp(strval, "none") == 0)
580 if (strval[0] != '/') {
581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
582 "property '%s' must be empty, an "
583 "absolute path, or 'none'"), propname);
584 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
588 slash = strrchr(strval, '/');
590 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
591 strcmp(slash, "/..") == 0) {
592 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
593 "'%s' is not a valid file"), strval);
594 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
600 if (strval[0] != '\0' &&
601 (stat64(strval, &statbuf) != 0 ||
602 !S_ISDIR(statbuf.st_mode))) {
603 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
604 "'%s' is not a valid directory"),
606 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
613 case ZPOOL_PROP_COMMENT:
614 for (check = strval; *check != '\0'; check++) {
615 if (!isprint(*check)) {
617 dgettext(TEXT_DOMAIN,
618 "comment may only have printable "
620 (void) zfs_error(hdl, EZFS_BADPROP,
625 if (strlen(strval) > ZPROP_MAX_COMMENT) {
626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
627 "comment must not exceed %d characters"),
629 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
633 case ZPOOL_PROP_READONLY:
635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
636 "property '%s' can only be set at "
637 "import time"), propname);
638 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
644 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
645 "property '%s'(%d) not defined"), propname, prop);
652 nvlist_free(retprops);
657 * Set zpool property : propname=propval.
660 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
662 zfs_cmd_t zc = { 0 };
665 nvlist_t *nvl = NULL;
668 prop_flags_t flags = { 0 };
670 (void) snprintf(errbuf, sizeof (errbuf),
671 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
674 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
675 return (no_memory(zhp->zpool_hdl));
677 if (nvlist_add_string(nvl, propname, propval) != 0) {
679 return (no_memory(zhp->zpool_hdl));
682 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
683 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
684 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
693 * Execute the corresponding ioctl() to set this property.
695 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
697 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
702 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
704 zcmd_free_nvlists(&zc);
708 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
710 (void) zpool_props_refresh(zhp);
716 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
718 libzfs_handle_t *hdl = zhp->zpool_hdl;
720 char buf[ZFS_MAXPROPLEN];
721 nvlist_t *features = NULL;
723 boolean_t firstexpand = (NULL == *plp);
725 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
729 while (*last != NULL)
730 last = &(*last)->pl_next;
733 features = zpool_get_features(zhp);
735 if ((*plp)->pl_all && firstexpand) {
736 for (int i = 0; i < SPA_FEATURES; i++) {
737 zprop_list_t *entry = zfs_alloc(hdl,
738 sizeof (zprop_list_t));
739 entry->pl_prop = ZPROP_INVAL;
740 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
741 spa_feature_table[i].fi_uname);
742 entry->pl_width = strlen(entry->pl_user_prop);
743 entry->pl_all = B_TRUE;
746 last = &entry->pl_next;
750 /* add any unsupported features */
751 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
752 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
757 if (zfeature_is_supported(nvpair_name(nvp)))
760 propname = zfs_asprintf(hdl, "unsupported@%s",
764 * Before adding the property to the list make sure that no
765 * other pool already added the same property.
769 while (entry != NULL) {
770 if (entry->pl_user_prop != NULL &&
771 strcmp(propname, entry->pl_user_prop) == 0) {
775 entry = entry->pl_next;
782 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
783 entry->pl_prop = ZPROP_INVAL;
784 entry->pl_user_prop = propname;
785 entry->pl_width = strlen(entry->pl_user_prop);
786 entry->pl_all = B_TRUE;
789 last = &entry->pl_next;
792 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
797 if (entry->pl_prop != ZPROP_INVAL &&
798 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
799 NULL, B_FALSE) == 0) {
800 if (strlen(buf) > entry->pl_width)
801 entry->pl_width = strlen(buf);
809 * Get the state for the given feature on the given ZFS pool.
812 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
816 boolean_t found = B_FALSE;
817 nvlist_t *features = zpool_get_features(zhp);
819 const char *feature = strchr(propname, '@') + 1;
821 supported = zpool_prop_feature(propname);
822 ASSERT(supported || zpool_prop_unsupported(propname));
825 * Convert from feature name to feature guid. This conversion is
826 * unecessary for unsupported@... properties because they already
833 ret = zfeature_lookup_name(feature, &fid);
835 (void) strlcpy(buf, "-", len);
838 feature = spa_feature_table[fid].fi_guid;
841 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
846 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
849 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
851 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
856 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
858 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
861 (void) strlcpy(buf, "-", len);
870 * Don't start the slice at the default block of 34; many storage
871 * devices will use a stripe width of 128k, so start there instead.
873 #define NEW_START_BLOCK 256
876 * Validate the given pool name, optionally putting an extended error message in
880 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
886 ret = pool_namecheck(pool, &why, &what);
889 * The rules for reserved pool names were extended at a later point.
890 * But we need to support users with existing pools that may now be
891 * invalid. So we only check for this expanded set of names during a
892 * create (or import), and only in userland.
894 if (ret == 0 && !isopen &&
895 (strncmp(pool, "mirror", 6) == 0 ||
896 strncmp(pool, "raidz", 5) == 0 ||
897 strncmp(pool, "spare", 5) == 0 ||
898 strcmp(pool, "log") == 0)) {
901 dgettext(TEXT_DOMAIN, "name is reserved"));
909 case NAME_ERR_TOOLONG:
911 dgettext(TEXT_DOMAIN, "name is too long"));
914 case NAME_ERR_INVALCHAR:
916 dgettext(TEXT_DOMAIN, "invalid character "
917 "'%c' in pool name"), what);
920 case NAME_ERR_NOLETTER:
921 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
922 "name must begin with a letter"));
925 case NAME_ERR_RESERVED:
926 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
927 "name is reserved"));
930 case NAME_ERR_DISKLIKE:
931 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
932 "pool name is reserved"));
935 case NAME_ERR_LEADING_SLASH:
936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
937 "leading slash in name"));
940 case NAME_ERR_EMPTY_COMPONENT:
941 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
942 "empty component in name"));
945 case NAME_ERR_TRAILING_SLASH:
946 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
947 "trailing slash in name"));
950 case NAME_ERR_MULTIPLE_AT:
951 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
952 "multiple '@' delimiters in name"));
956 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
957 "(%d) not defined"), why);
968 * Open a handle to the given pool, even if the pool is currently in the FAULTED
972 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
978 * Make sure the pool name is valid.
980 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
981 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
982 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
987 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
990 zhp->zpool_hdl = hdl;
991 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
993 if (zpool_refresh_stats(zhp, &missing) != 0) {
999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1000 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1001 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1010 * Like the above, but silent on error. Used when iterating over pools (because
1011 * the configuration cache may be out of date).
1014 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1016 zpool_handle_t *zhp;
1019 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1022 zhp->zpool_hdl = hdl;
1023 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1025 if (zpool_refresh_stats(zhp, &missing) != 0) {
1041 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1045 zpool_open(libzfs_handle_t *hdl, const char *pool)
1047 zpool_handle_t *zhp;
1049 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1052 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1053 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1054 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1063 * Close the handle. Simply frees the memory associated with the handle.
1066 zpool_close(zpool_handle_t *zhp)
1068 nvlist_free(zhp->zpool_config);
1069 nvlist_free(zhp->zpool_old_config);
1070 nvlist_free(zhp->zpool_props);
1075 * Return the name of the pool.
1078 zpool_get_name(zpool_handle_t *zhp)
1080 return (zhp->zpool_name);
1085 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1088 zpool_get_state(zpool_handle_t *zhp)
1090 return (zhp->zpool_state);
1094 * Create the named pool, using the provided vdev list. It is assumed
1095 * that the consumer has already validated the contents of the nvlist, so we
1096 * don't have to worry about error semantics.
1099 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1100 nvlist_t *props, nvlist_t *fsprops)
1102 zfs_cmd_t zc = { 0 };
1103 nvlist_t *zc_fsprops = NULL;
1104 nvlist_t *zc_props = NULL;
1108 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1109 "cannot create '%s'"), pool);
1111 if (!zpool_name_valid(hdl, B_FALSE, pool))
1112 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1114 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1118 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1120 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1121 SPA_VERSION_1, flags, msg)) == NULL) {
1130 zoned = ((nvlist_lookup_string(fsprops,
1131 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1132 strcmp(zonestr, "on") == 0);
1134 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1135 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1139 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1142 if (nvlist_add_nvlist(zc_props,
1143 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1148 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1151 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1153 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1155 zcmd_free_nvlists(&zc);
1156 nvlist_free(zc_props);
1157 nvlist_free(zc_fsprops);
1162 * This can happen if the user has specified the same
1163 * device multiple times. We can't reliably detect this
1164 * until we try to add it and see we already have a
1167 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1168 "one or more vdevs refer to the same device"));
1169 return (zfs_error(hdl, EZFS_BADDEV, msg));
1173 * This happens if the record size is smaller or larger
1174 * than the allowed size range, or not a power of 2.
1176 * NOTE: although zfs_valid_proplist is called earlier,
1177 * this case may have slipped through since the
1178 * pool does not exist yet and it is therefore
1179 * impossible to read properties e.g. max blocksize
1182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1183 "record size invalid"));
1184 return (zfs_error(hdl, EZFS_BADPROP, msg));
1188 * This occurs when one of the devices is below
1189 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1190 * device was the problem device since there's no
1191 * reliable way to determine device size from userland.
1196 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1198 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1199 "one or more devices is less than the "
1200 "minimum size (%s)"), buf);
1202 return (zfs_error(hdl, EZFS_BADDEV, msg));
1205 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1206 "one or more devices is out of space"));
1207 return (zfs_error(hdl, EZFS_BADDEV, msg));
1210 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1211 "cache device must be a disk or disk slice"));
1212 return (zfs_error(hdl, EZFS_BADDEV, msg));
1215 return (zpool_standard_error(hdl, errno, msg));
1220 zcmd_free_nvlists(&zc);
1221 nvlist_free(zc_props);
1222 nvlist_free(zc_fsprops);
1227 * Destroy the given pool. It is up to the caller to ensure that there are no
1228 * datasets left in the pool.
1231 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1233 zfs_cmd_t zc = { 0 };
1234 zfs_handle_t *zfp = NULL;
1235 libzfs_handle_t *hdl = zhp->zpool_hdl;
1238 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1239 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1242 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1243 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1245 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1246 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1247 "cannot destroy '%s'"), zhp->zpool_name);
1249 if (errno == EROFS) {
1250 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1251 "one or more devices is read only"));
1252 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1254 (void) zpool_standard_error(hdl, errno, msg);
1263 remove_mountpoint(zfp);
1271 * Add the given vdevs to the pool. The caller must have already performed the
1272 * necessary verification to ensure that the vdev specification is well-formed.
1275 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1277 zfs_cmd_t zc = { 0 };
1279 libzfs_handle_t *hdl = zhp->zpool_hdl;
1281 nvlist_t **spares, **l2cache;
1282 uint_t nspares, nl2cache;
1284 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1285 "cannot add to '%s'"), zhp->zpool_name);
1287 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1288 SPA_VERSION_SPARES &&
1289 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1290 &spares, &nspares) == 0) {
1291 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1292 "upgraded to add hot spares"));
1293 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1296 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1297 SPA_VERSION_L2CACHE &&
1298 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1299 &l2cache, &nl2cache) == 0) {
1300 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1301 "upgraded to add cache devices"));
1302 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1305 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1307 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1309 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1313 * This can happen if the user has specified the same
1314 * device multiple times. We can't reliably detect this
1315 * until we try to add it and see we already have a
1318 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1319 "one or more vdevs refer to the same device"));
1320 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1325 * This occurrs when one of the devices is below
1326 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1327 * device was the problem device since there's no
1328 * reliable way to determine device size from userland.
1333 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1335 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1336 "device is less than the minimum "
1339 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1343 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1344 "pool must be upgraded to add these vdevs"));
1345 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1349 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1350 "root pool can not have multiple vdevs"
1351 " or separate logs"));
1352 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1356 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1357 "cache device must be a disk or disk slice"));
1358 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1362 (void) zpool_standard_error(hdl, errno, msg);
1370 zcmd_free_nvlists(&zc);
1376 * Exports the pool from the system. The caller must ensure that there are no
1377 * mounted datasets in the pool.
1380 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1381 const char *log_str)
1383 zfs_cmd_t zc = { 0 };
1386 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1387 "cannot export '%s'"), zhp->zpool_name);
1389 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1390 zc.zc_cookie = force;
1391 zc.zc_guid = hardforce;
1392 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1394 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1397 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1398 "use '-f' to override the following errors:\n"
1399 "'%s' has an active shared spare which could be"
1400 " used by other pools once '%s' is exported."),
1401 zhp->zpool_name, zhp->zpool_name);
1402 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1405 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1414 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1416 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1420 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1422 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1426 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1429 nvlist_t *nv = NULL;
1435 if (!hdl->libzfs_printerr || config == NULL)
1438 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1439 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1443 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1445 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1447 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1448 strftime(timestr, 128, 0, &t) != 0) {
1450 (void) printf(dgettext(TEXT_DOMAIN,
1451 "Would be able to return %s "
1452 "to its state as of %s.\n"),
1455 (void) printf(dgettext(TEXT_DOMAIN,
1456 "Pool %s returned to its state as of %s.\n"),
1460 (void) printf(dgettext(TEXT_DOMAIN,
1461 "%s approximately %lld "),
1462 dryrun ? "Would discard" : "Discarded",
1464 (void) printf(dgettext(TEXT_DOMAIN,
1465 "minutes of transactions.\n"));
1466 } else if (loss > 0) {
1467 (void) printf(dgettext(TEXT_DOMAIN,
1468 "%s approximately %lld "),
1469 dryrun ? "Would discard" : "Discarded", loss);
1470 (void) printf(dgettext(TEXT_DOMAIN,
1471 "seconds of transactions.\n"));
1477 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1480 nvlist_t *nv = NULL;
1482 uint64_t edata = UINT64_MAX;
1487 if (!hdl->libzfs_printerr)
1491 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1493 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1495 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1496 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1497 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1498 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1501 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1502 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1505 (void) printf(dgettext(TEXT_DOMAIN,
1506 "Recovery is possible, but will result in some data loss.\n"));
1508 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1509 strftime(timestr, 128, 0, &t) != 0) {
1510 (void) printf(dgettext(TEXT_DOMAIN,
1511 "\tReturning the pool to its state as of %s\n"
1512 "\tshould correct the problem. "),
1515 (void) printf(dgettext(TEXT_DOMAIN,
1516 "\tReverting the pool to an earlier state "
1517 "should correct the problem.\n\t"));
1521 (void) printf(dgettext(TEXT_DOMAIN,
1522 "Approximately %lld minutes of data\n"
1523 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1524 } else if (loss > 0) {
1525 (void) printf(dgettext(TEXT_DOMAIN,
1526 "Approximately %lld seconds of data\n"
1527 "\tmust be discarded, irreversibly. "), loss);
1529 if (edata != 0 && edata != UINT64_MAX) {
1531 (void) printf(dgettext(TEXT_DOMAIN,
1532 "After rewind, at least\n"
1533 "\tone persistent user-data error will remain. "));
1535 (void) printf(dgettext(TEXT_DOMAIN,
1536 "After rewind, several\n"
1537 "\tpersistent user-data errors will remain. "));
1540 (void) printf(dgettext(TEXT_DOMAIN,
1541 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1542 reason >= 0 ? "clear" : "import", name);
1544 (void) printf(dgettext(TEXT_DOMAIN,
1545 "A scrub of the pool\n"
1546 "\tis strongly recommended after recovery.\n"));
1550 (void) printf(dgettext(TEXT_DOMAIN,
1551 "Destroy and re-create the pool from\n\ta backup source.\n"));
1555 * zpool_import() is a contracted interface. Should be kept the same
1558 * Applications should use zpool_import_props() to import a pool with
1559 * new properties value to be set.
1562 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1565 nvlist_t *props = NULL;
1568 if (altroot != NULL) {
1569 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1570 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1571 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1575 if (nvlist_add_string(props,
1576 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1577 nvlist_add_string(props,
1578 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1580 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1581 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1586 ret = zpool_import_props(hdl, config, newname, props,
1593 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1599 uint64_t is_log = 0;
1601 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1605 (void) printf("\t%*s%s%s\n", indent, "", name,
1606 is_log ? " [log]" : "");
1608 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1609 &child, &children) != 0)
1612 for (c = 0; c < children; c++) {
1613 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1614 print_vdev_tree(hdl, vname, child[c], indent + 2);
1620 zpool_print_unsup_feat(nvlist_t *config)
1622 nvlist_t *nvinfo, *unsup_feat;
1624 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1626 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1629 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1630 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1633 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1634 verify(nvpair_value_string(nvp, &desc) == 0);
1636 if (strlen(desc) > 0)
1637 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1639 (void) printf("\t%s\n", nvpair_name(nvp));
1644 * Import the given pool using the known configuration and a list of
1645 * properties to be set. The configuration should have come from
1646 * zpool_find_import(). The 'newname' parameters control whether the pool
1647 * is imported with a different name.
1650 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1651 nvlist_t *props, int flags)
1653 zfs_cmd_t zc = { 0 };
1654 zpool_rewind_policy_t policy;
1655 nvlist_t *nv = NULL;
1656 nvlist_t *nvinfo = NULL;
1657 nvlist_t *missing = NULL;
1664 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1667 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1668 "cannot import pool '%s'"), origname);
1670 if (newname != NULL) {
1671 if (!zpool_name_valid(hdl, B_FALSE, newname))
1672 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1673 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1675 thename = (char *)newname;
1680 if (props != NULL) {
1682 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1684 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1687 if ((props = zpool_valid_proplist(hdl, origname,
1688 props, version, flags, errbuf)) == NULL)
1690 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1697 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1699 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1702 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1703 zcmd_free_nvlists(&zc);
1706 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1707 zcmd_free_nvlists(&zc);
1711 zc.zc_cookie = flags;
1712 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1714 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1715 zcmd_free_nvlists(&zc);
1722 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1724 zcmd_free_nvlists(&zc);
1726 zpool_get_rewind_policy(config, &policy);
1732 * Dry-run failed, but we print out what success
1733 * looks like if we found a best txg
1735 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1736 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1742 if (newname == NULL)
1743 (void) snprintf(desc, sizeof (desc),
1744 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1747 (void) snprintf(desc, sizeof (desc),
1748 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1753 if (nv != NULL && nvlist_lookup_nvlist(nv,
1754 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1755 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1756 (void) printf(dgettext(TEXT_DOMAIN, "This "
1757 "pool uses the following feature(s) not "
1758 "supported by this system:\n"));
1759 zpool_print_unsup_feat(nv);
1760 if (nvlist_exists(nvinfo,
1761 ZPOOL_CONFIG_CAN_RDONLY)) {
1762 (void) printf(dgettext(TEXT_DOMAIN,
1763 "All unsupported features are only "
1764 "required for writing to the pool."
1765 "\nThe pool can be imported using "
1766 "'-o readonly=on'.\n"));
1770 * Unsupported version.
1772 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1776 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1780 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1781 "one or more devices is read only"));
1782 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1786 if (nv && nvlist_lookup_nvlist(nv,
1787 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1788 nvlist_lookup_nvlist(nvinfo,
1789 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1790 (void) printf(dgettext(TEXT_DOMAIN,
1791 "The devices below are missing, use "
1792 "'-m' to import the pool anyway:\n"));
1793 print_vdev_tree(hdl, NULL, missing, 2);
1794 (void) printf("\n");
1796 (void) zpool_standard_error(hdl, error, desc);
1800 (void) zpool_standard_error(hdl, error, desc);
1804 (void) zpool_standard_error(hdl, error, desc);
1805 zpool_explain_recover(hdl,
1806 newname ? origname : thename, -error, nv);
1813 zpool_handle_t *zhp;
1816 * This should never fail, but play it safe anyway.
1818 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1820 else if (zhp != NULL)
1822 if (policy.zrp_request &
1823 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1824 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1825 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1838 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1840 zfs_cmd_t zc = { 0 };
1842 libzfs_handle_t *hdl = zhp->zpool_hdl;
1844 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1845 zc.zc_cookie = func;
1847 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1848 (errno == ENOENT && func != POOL_SCAN_NONE))
1851 if (func == POOL_SCAN_SCRUB) {
1852 (void) snprintf(msg, sizeof (msg),
1853 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1854 } else if (func == POOL_SCAN_NONE) {
1855 (void) snprintf(msg, sizeof (msg),
1856 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1859 assert(!"unexpected result");
1862 if (errno == EBUSY) {
1864 pool_scan_stat_t *ps = NULL;
1867 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1868 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1869 (void) nvlist_lookup_uint64_array(nvroot,
1870 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1871 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1872 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1874 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1875 } else if (errno == ENOENT) {
1876 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1878 return (zpool_standard_error(hdl, errno, msg));
1884 * This provides a very minimal check whether a given string is likely a
1885 * c#t#d# style string. Users of this are expected to do their own
1886 * verification of the s# part.
1888 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1891 * More elaborate version for ones which may start with "/dev/dsk/"
1895 ctd_check_path(char *str)
1898 * If it starts with a slash, check the last component.
1900 if (str && str[0] == '/') {
1901 char *tmp = strrchr(str, '/');
1904 * If it ends in "/old", check the second-to-last
1905 * component of the string instead.
1907 if (tmp != str && strcmp(tmp, "/old") == 0) {
1908 for (tmp--; *tmp != '/'; tmp--)
1913 return (CTD_CHECK(str));
1918 * Find a vdev that matches the search criteria specified. We use the
1919 * the nvpair name to determine how we should look for the device.
1920 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1921 * spare; but FALSE if its an INUSE spare.
1924 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1925 boolean_t *l2cache, boolean_t *log)
1932 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1934 /* Nothing to look for */
1935 if (search == NULL || pair == NULL)
1938 /* Obtain the key we will use to search */
1939 srchkey = nvpair_name(pair);
1941 switch (nvpair_type(pair)) {
1942 case DATA_TYPE_UINT64:
1943 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1944 uint64_t srchval, theguid;
1946 verify(nvpair_value_uint64(pair, &srchval) == 0);
1947 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1949 if (theguid == srchval)
1954 case DATA_TYPE_STRING: {
1955 char *srchval, *val;
1957 verify(nvpair_value_string(pair, &srchval) == 0);
1958 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1962 * Search for the requested value. Special cases:
1964 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1965 * "s0" or "s0/old". The "s0" part is hidden from the user,
1966 * but included in the string, so this matches around it.
1967 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1969 * Otherwise, all other searches are simple string compares.
1972 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1973 ctd_check_path(val)) {
1974 uint64_t wholedisk = 0;
1976 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1979 int slen = strlen(srchval);
1980 int vlen = strlen(val);
1982 if (slen != vlen - 2)
1986 * make_leaf_vdev() should only set
1987 * wholedisk for ZPOOL_CONFIG_PATHs which
1988 * will include "/dev/dsk/", giving plenty of
1989 * room for the indices used next.
1994 * strings identical except trailing "s0"
1996 if (strcmp(&val[vlen - 2], "s0") == 0 &&
1997 strncmp(srchval, val, slen) == 0)
2001 * strings identical except trailing "s0/old"
2003 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
2004 strcmp(&srchval[slen - 4], "/old") == 0 &&
2005 strncmp(srchval, val, slen - 4) == 0)
2010 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2012 if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2014 char *type, *idx, *end, *p;
2015 uint64_t id, vdev_id;
2018 * Determine our vdev type, keeping in mind
2019 * that the srchval is composed of a type and
2020 * vdev id pair (i.e. mirror-4).
2022 if ((type = strdup(srchval)) == NULL)
2025 if ((p = strrchr(type, '-')) == NULL) {
2033 * If the types don't match then keep looking.
2035 if (strncmp(val, type, strlen(val)) != 0) {
2040 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2041 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2042 strncmp(type, VDEV_TYPE_MIRROR,
2043 strlen(VDEV_TYPE_MIRROR)) == 0);
2044 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2048 vdev_id = strtoull(idx, &end, 10);
2055 * Now verify that we have the correct vdev id.
2064 if (strcmp(srchval, val) == 0)
2073 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2074 &child, &children) != 0)
2077 for (c = 0; c < children; c++) {
2078 if ((ret = vdev_to_nvlist_iter(child[c], search,
2079 avail_spare, l2cache, NULL)) != NULL) {
2081 * The 'is_log' value is only set for the toplevel
2082 * vdev, not the leaf vdevs. So we always lookup the
2083 * log device from the root of the vdev tree (where
2084 * 'log' is non-NULL).
2087 nvlist_lookup_uint64(child[c],
2088 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2096 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2097 &child, &children) == 0) {
2098 for (c = 0; c < children; c++) {
2099 if ((ret = vdev_to_nvlist_iter(child[c], search,
2100 avail_spare, l2cache, NULL)) != NULL) {
2101 *avail_spare = B_TRUE;
2107 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2108 &child, &children) == 0) {
2109 for (c = 0; c < children; c++) {
2110 if ((ret = vdev_to_nvlist_iter(child[c], search,
2111 avail_spare, l2cache, NULL)) != NULL) {
2122 * Given a physical path (minus the "/devices" prefix), find the
2126 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2127 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2129 nvlist_t *search, *nvroot, *ret;
2131 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2132 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2134 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2137 *avail_spare = B_FALSE;
2141 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2142 nvlist_free(search);
2148 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2151 zpool_vdev_is_interior(const char *name)
2153 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2154 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2160 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2161 boolean_t *l2cache, boolean_t *log)
2163 char buf[MAXPATHLEN];
2165 nvlist_t *nvroot, *search, *ret;
2168 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2170 guid = strtoull(path, &end, 10);
2171 if (guid != 0 && *end == '\0') {
2172 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2173 } else if (zpool_vdev_is_interior(path)) {
2174 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2175 } else if (path[0] != '/') {
2176 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
2177 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2179 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2182 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2185 *avail_spare = B_FALSE;
2189 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2190 nvlist_free(search);
2196 vdev_online(nvlist_t *nv)
2200 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2201 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2202 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2209 * Helper function for zpool_get_physpaths().
2212 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2213 size_t *bytes_written)
2215 size_t bytes_left, pos, rsz;
2219 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2221 return (EZFS_NODEVICE);
2223 pos = *bytes_written;
2224 bytes_left = physpath_size - pos;
2225 format = (pos == 0) ? "%s" : " %s";
2227 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2228 *bytes_written += rsz;
2230 if (rsz >= bytes_left) {
2231 /* if physpath was not copied properly, clear it */
2232 if (bytes_left != 0) {
2235 return (EZFS_NOSPC);
2241 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2242 size_t *rsz, boolean_t is_spare)
2247 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2248 return (EZFS_INVALCONFIG);
2250 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2252 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2253 * For a spare vdev, we only want to boot from the active
2258 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2261 return (EZFS_INVALCONFIG);
2264 if (vdev_online(nv)) {
2265 if ((ret = vdev_get_one_physpath(nv, physpath,
2266 phypath_size, rsz)) != 0)
2269 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2270 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2271 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2276 if (nvlist_lookup_nvlist_array(nv,
2277 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2278 return (EZFS_INVALCONFIG);
2280 for (i = 0; i < count; i++) {
2281 ret = vdev_get_physpaths(child[i], physpath,
2282 phypath_size, rsz, is_spare);
2283 if (ret == EZFS_NOSPC)
2288 return (EZFS_POOL_INVALARG);
2292 * Get phys_path for a root pool config.
2293 * Return 0 on success; non-zero on failure.
2296 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2299 nvlist_t *vdev_root;
2306 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2308 return (EZFS_INVALCONFIG);
2310 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2311 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2312 &child, &count) != 0)
2313 return (EZFS_INVALCONFIG);
2316 * root pool can only have a single top-level vdev.
2318 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2319 return (EZFS_POOL_INVALARG);
2321 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2324 /* No online devices */
2326 return (EZFS_NODEVICE);
2332 * Get phys_path for a root pool
2333 * Return 0 on success; non-zero on failure.
2336 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2338 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2343 * If the device has being dynamically expanded then we need to relabel
2344 * the disk to use the new unallocated space.
2347 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2350 char path[MAXPATHLEN];
2353 int (*_efi_use_whole_disk)(int);
2355 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2356 "efi_use_whole_disk")) == NULL)
2359 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name);
2361 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2362 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2363 "relabel '%s': unable to open device"), name);
2364 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2368 * It's possible that we might encounter an error if the device
2369 * does not have any unallocated space left. If so, we simply
2370 * ignore that error and continue on.
2372 error = _efi_use_whole_disk(fd);
2374 if (error && error != VT_ENOSPC) {
2375 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2376 "relabel '%s': unable to read disk capacity"), name);
2377 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2379 #endif /* illumos */
2384 * Bring the specified vdev online. The 'flags' parameter is a set of the
2385 * ZFS_ONLINE_* flags.
2388 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2389 vdev_state_t *newstate)
2391 zfs_cmd_t zc = { 0 };
2394 boolean_t avail_spare, l2cache, islog;
2395 libzfs_handle_t *hdl = zhp->zpool_hdl;
2397 if (flags & ZFS_ONLINE_EXPAND) {
2398 (void) snprintf(msg, sizeof (msg),
2399 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2401 (void) snprintf(msg, sizeof (msg),
2402 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2405 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2406 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2408 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2410 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2413 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2415 if (flags & ZFS_ONLINE_EXPAND ||
2416 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2417 char *pathname = NULL;
2418 uint64_t wholedisk = 0;
2420 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2422 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2426 * XXX - L2ARC 1.0 devices can't support expansion.
2429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2430 "cannot expand cache devices"));
2431 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2435 pathname += strlen(ZFS_DISK_ROOT) + 1;
2436 (void) zpool_relabel_disk(hdl, pathname);
2440 zc.zc_cookie = VDEV_STATE_ONLINE;
2443 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2444 if (errno == EINVAL) {
2445 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2446 "from this pool into a new one. Use '%s' "
2447 "instead"), "zpool detach");
2448 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2450 return (zpool_standard_error(hdl, errno, msg));
2453 *newstate = zc.zc_cookie;
2458 * Take the specified vdev offline
2461 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2463 zfs_cmd_t zc = { 0 };
2466 boolean_t avail_spare, l2cache;
2467 libzfs_handle_t *hdl = zhp->zpool_hdl;
2469 (void) snprintf(msg, sizeof (msg),
2470 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2472 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2473 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2475 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2477 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2480 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2482 zc.zc_cookie = VDEV_STATE_OFFLINE;
2483 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2485 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2492 * There are no other replicas of this device.
2494 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2498 * The log device has unplayed logs
2500 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2503 return (zpool_standard_error(hdl, errno, msg));
2508 * Mark the given vdev faulted.
2511 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2513 zfs_cmd_t zc = { 0 };
2515 libzfs_handle_t *hdl = zhp->zpool_hdl;
2517 (void) snprintf(msg, sizeof (msg),
2518 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2520 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2522 zc.zc_cookie = VDEV_STATE_FAULTED;
2525 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2532 * There are no other replicas of this device.
2534 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2537 return (zpool_standard_error(hdl, errno, msg));
2543 * Mark the given vdev degraded.
2546 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2548 zfs_cmd_t zc = { 0 };
2550 libzfs_handle_t *hdl = zhp->zpool_hdl;
2552 (void) snprintf(msg, sizeof (msg),
2553 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2555 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2557 zc.zc_cookie = VDEV_STATE_DEGRADED;
2560 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2563 return (zpool_standard_error(hdl, errno, msg));
2567 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2571 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2577 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2579 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2582 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2583 children == 2 && child[which] == tgt)
2586 for (c = 0; c < children; c++)
2587 if (is_replacing_spare(child[c], tgt, which))
2595 * Attach new_disk (fully described by nvroot) to old_disk.
2596 * If 'replacing' is specified, the new disk will replace the old one.
2599 zpool_vdev_attach(zpool_handle_t *zhp,
2600 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2602 zfs_cmd_t zc = { 0 };
2606 boolean_t avail_spare, l2cache, islog;
2611 nvlist_t *config_root;
2612 libzfs_handle_t *hdl = zhp->zpool_hdl;
2613 boolean_t rootpool = zpool_is_bootable(zhp);
2616 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2617 "cannot replace %s with %s"), old_disk, new_disk);
2619 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2620 "cannot attach %s to %s"), new_disk, old_disk);
2622 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2623 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2625 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2628 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2631 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2633 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2634 zc.zc_cookie = replacing;
2636 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2637 &child, &children) != 0 || children != 1) {
2638 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2639 "new device must be a single disk"));
2640 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2643 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2644 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2646 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2650 * If the target is a hot spare that has been swapped in, we can only
2651 * replace it with another hot spare.
2654 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2655 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2656 NULL) == NULL || !avail_spare) &&
2657 is_replacing_spare(config_root, tgt, 1)) {
2658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2659 "can only be replaced by another hot spare"));
2661 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2666 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2669 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2671 zcmd_free_nvlists(&zc);
2676 * XXX need a better way to prevent user from
2677 * booting up a half-baked vdev.
2679 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2680 "sure to wait until resilver is done "
2681 "before rebooting.\n"));
2682 (void) fprintf(stderr, "\n");
2683 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If "
2684 "you boot from pool '%s', you may need to update\n"
2685 "boot code on newly attached disk '%s'.\n\n"
2686 "Assuming you use GPT partitioning and 'da0' is "
2687 "your new boot disk\n"
2688 "you may use the following command:\n\n"
2689 "\tgpart bootcode -b /boot/pmbr -p "
2690 "/boot/gptzfsboot -i 1 da0\n\n"),
2691 zhp->zpool_name, new_disk);
2699 * Can't attach to or replace this type of vdev.
2702 uint64_t version = zpool_get_prop_int(zhp,
2703 ZPOOL_PROP_VERSION, NULL);
2706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2707 "cannot replace a log with a spare"));
2708 else if (version >= SPA_VERSION_MULTI_REPLACE)
2709 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2710 "already in replacing/spare config; wait "
2711 "for completion or use 'zpool detach'"));
2713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2714 "cannot replace a replacing device"));
2716 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2717 "can only attach to mirrors and top-level "
2720 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2725 * The new device must be a single disk.
2727 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2728 "new device must be a single disk"));
2729 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2735 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2740 * The new device is too small.
2742 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2743 "device is too small"));
2744 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2749 * The new device has a different alignment requirement.
2751 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2752 "devices have different sector alignment"));
2753 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2758 * The resulting top-level vdev spec won't fit in the label.
2760 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2764 (void) zpool_standard_error(hdl, errno, msg);
2771 * Detach the specified device.
2774 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2776 zfs_cmd_t zc = { 0 };
2779 boolean_t avail_spare, l2cache;
2780 libzfs_handle_t *hdl = zhp->zpool_hdl;
2782 (void) snprintf(msg, sizeof (msg),
2783 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2785 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2786 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2788 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2791 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2794 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2796 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2798 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2805 * Can't detach from this type of vdev.
2807 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2808 "applicable to mirror and replacing vdevs"));
2809 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2814 * There are no other replicas of this device.
2816 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2820 (void) zpool_standard_error(hdl, errno, msg);
2827 * Find a mirror vdev in the source nvlist.
2829 * The mchild array contains a list of disks in one of the top-level mirrors
2830 * of the source pool. The schild array contains a list of disks that the
2831 * user specified on the command line. We loop over the mchild array to
2832 * see if any entry in the schild array matches.
2834 * If a disk in the mchild array is found in the schild array, we return
2835 * the index of that entry. Otherwise we return -1.
2838 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2839 nvlist_t **schild, uint_t schildren)
2843 for (mc = 0; mc < mchildren; mc++) {
2845 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2846 mchild[mc], B_FALSE);
2848 for (sc = 0; sc < schildren; sc++) {
2849 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2850 schild[sc], B_FALSE);
2851 boolean_t result = (strcmp(mpath, spath) == 0);
2867 * Split a mirror pool. If newroot points to null, then a new nvlist
2868 * is generated and it is the responsibility of the caller to free it.
2871 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2872 nvlist_t *props, splitflags_t flags)
2874 zfs_cmd_t zc = { 0 };
2876 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2877 nvlist_t **varray = NULL, *zc_props = NULL;
2878 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2879 libzfs_handle_t *hdl = zhp->zpool_hdl;
2881 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2884 (void) snprintf(msg, sizeof (msg),
2885 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2887 if (!zpool_name_valid(hdl, B_FALSE, newname))
2888 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2890 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2891 (void) fprintf(stderr, gettext("Internal error: unable to "
2892 "retrieve pool configuration\n"));
2896 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2898 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2901 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2902 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2903 props, vers, flags, msg)) == NULL)
2907 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2909 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2910 "Source pool is missing vdev tree"));
2911 nvlist_free(zc_props);
2915 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2918 if (*newroot == NULL ||
2919 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2920 &newchild, &newchildren) != 0)
2923 for (c = 0; c < children; c++) {
2924 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2926 nvlist_t **mchild, *vdev;
2931 * Unlike cache & spares, slogs are stored in the
2932 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2934 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2936 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2938 if (is_log || is_hole) {
2940 * Create a hole vdev and put it in the config.
2942 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2944 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2945 VDEV_TYPE_HOLE) != 0)
2947 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2952 varray[vcount++] = vdev;
2956 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2958 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2959 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2960 "Source pool must be composed only of mirrors\n"));
2961 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2965 verify(nvlist_lookup_nvlist_array(child[c],
2966 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2968 /* find or add an entry for this top-level vdev */
2969 if (newchildren > 0 &&
2970 (entry = find_vdev_entry(zhp, mchild, mchildren,
2971 newchild, newchildren)) >= 0) {
2972 /* We found a disk that the user specified. */
2973 vdev = mchild[entry];
2976 /* User didn't specify a disk for this vdev. */
2977 vdev = mchild[mchildren - 1];
2980 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2984 /* did we find every disk the user specified? */
2985 if (found != newchildren) {
2986 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2987 "include at most one disk from each mirror"));
2988 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2992 /* Prepare the nvlist for populating. */
2993 if (*newroot == NULL) {
2994 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2997 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2998 VDEV_TYPE_ROOT) != 0)
3001 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3004 /* Add all the children we found */
3005 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3006 lastlog == 0 ? vcount : lastlog) != 0)
3010 * If we're just doing a dry run, exit now with success.
3013 memory_err = B_FALSE;
3018 /* now build up the config list & call the ioctl */
3019 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3022 if (nvlist_add_nvlist(newconfig,
3023 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3024 nvlist_add_string(newconfig,
3025 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3026 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3030 * The new pool is automatically part of the namespace unless we
3031 * explicitly export it.
3034 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3035 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3036 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3037 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3039 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3042 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3043 retval = zpool_standard_error(hdl, errno, msg);
3048 memory_err = B_FALSE;
3051 if (varray != NULL) {
3054 for (v = 0; v < vcount; v++)
3055 nvlist_free(varray[v]);
3058 zcmd_free_nvlists(&zc);
3059 nvlist_free(zc_props);
3060 nvlist_free(newconfig);
3062 nvlist_free(*newroot);
3070 return (no_memory(hdl));
3076 * Remove the given device. Currently, this is supported only for hot spares
3077 * and level 2 cache devices.
3080 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3082 zfs_cmd_t zc = { 0 };
3085 boolean_t avail_spare, l2cache, islog;
3086 libzfs_handle_t *hdl = zhp->zpool_hdl;
3089 (void) snprintf(msg, sizeof (msg),
3090 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3092 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3093 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3095 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3097 * XXX - this should just go away.
3099 if (!avail_spare && !l2cache && !islog) {
3100 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3101 "only inactive hot spares, cache, top-level, "
3102 "or log devices can be removed"));
3103 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3106 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3107 if (islog && version < SPA_VERSION_HOLES) {
3108 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3109 "pool must be upgrade to support log removal"));
3110 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3113 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3115 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3118 return (zpool_standard_error(hdl, errno, msg));
3122 * Clear the errors for the pool, or the particular device if specified.
3125 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3127 zfs_cmd_t zc = { 0 };
3130 zpool_rewind_policy_t policy;
3131 boolean_t avail_spare, l2cache;
3132 libzfs_handle_t *hdl = zhp->zpool_hdl;
3133 nvlist_t *nvi = NULL;
3137 (void) snprintf(msg, sizeof (msg),
3138 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3141 (void) snprintf(msg, sizeof (msg),
3142 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3145 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3147 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3148 &l2cache, NULL)) == 0)
3149 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3152 * Don't allow error clearing for hot spares. Do allow
3153 * error clearing for l2cache devices.
3156 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3158 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3162 zpool_get_rewind_policy(rewindnvl, &policy);
3163 zc.zc_cookie = policy.zrp_request;
3165 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3168 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3171 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3173 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3174 zcmd_free_nvlists(&zc);
3179 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3180 errno != EPERM && errno != EACCES)) {
3181 if (policy.zrp_request &
3182 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3183 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3184 zpool_rewind_exclaim(hdl, zc.zc_name,
3185 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3189 zcmd_free_nvlists(&zc);
3193 zcmd_free_nvlists(&zc);
3194 return (zpool_standard_error(hdl, errno, msg));
3198 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3201 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3203 zfs_cmd_t zc = { 0 };
3205 libzfs_handle_t *hdl = zhp->zpool_hdl;
3207 (void) snprintf(msg, sizeof (msg),
3208 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3211 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3213 zc.zc_cookie = ZPOOL_NO_REWIND;
3215 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3218 return (zpool_standard_error(hdl, errno, msg));
3222 * Change the GUID for a pool.
3225 zpool_reguid(zpool_handle_t *zhp)
3228 libzfs_handle_t *hdl = zhp->zpool_hdl;
3229 zfs_cmd_t zc = { 0 };
3231 (void) snprintf(msg, sizeof (msg),
3232 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3234 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3235 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3238 return (zpool_standard_error(hdl, errno, msg));
3245 zpool_reopen(zpool_handle_t *zhp)
3247 zfs_cmd_t zc = { 0 };
3249 libzfs_handle_t *hdl = zhp->zpool_hdl;
3251 (void) snprintf(msg, sizeof (msg),
3252 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3255 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3256 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3258 return (zpool_standard_error(hdl, errno, msg));
3262 * Convert from a devid string to a path.
3265 devid_to_path(char *devid_str)
3270 devid_nmlist_t *list = NULL;
3273 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3276 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3278 devid_str_free(minor);
3285 * In a case the strdup() fails, we will just return NULL below.
3287 path = strdup(list[0].devname);
3289 devid_free_nmlist(list);
3295 * Convert from a path to a devid string.
3298 path_to_devid(const char *path)
3305 if ((fd = open(path, O_RDONLY)) < 0)
3310 if (devid_get(fd, &devid) == 0) {
3311 if (devid_get_minor_name(fd, &minor) == 0)
3312 ret = devid_str_encode(devid, minor);
3314 devid_str_free(minor);
3326 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3327 * ignore any failure here, since a common case is for an unprivileged user to
3328 * type 'zpool status', and we'll display the correct information anyway.
3331 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3333 zfs_cmd_t zc = { 0 };
3335 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3336 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3337 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3340 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3344 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3345 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3346 * We also check if this is a whole disk, in which case we strip off the
3347 * trailing 's0' slice name.
3349 * This routine is also responsible for identifying when disks have been
3350 * reconfigured in a new location. The kernel will have opened the device by
3351 * devid, but the path will still refer to the old location. To catch this, we
3352 * first do a path -> devid translation (which is fast for the common case). If
3353 * the devid matches, we're done. If not, we do a reverse devid -> path
3354 * translation and issue the appropriate ioctl() to update the path of the vdev.
3355 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3359 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3370 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3371 (uint64_t **)&vs, &vsc) == 0;
3372 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0;
3375 * If the device is not currently present, assume it will not
3376 * come back at the same device path. Display the device by GUID.
3378 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3379 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) {
3380 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3382 (void) snprintf(buf, sizeof (buf), "%llu",
3383 (u_longlong_t)value);
3385 } else if (have_path) {
3388 * If the device is dead (faulted, offline, etc) then don't
3389 * bother opening it. Otherwise we may be forcing the user to
3390 * open a misbehaving device, which can have undesirable
3393 if ((have_stats == 0 ||
3394 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3396 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3398 * Determine if the current path is correct.
3400 char *newdevid = path_to_devid(path);
3402 if (newdevid == NULL ||
3403 strcmp(devid, newdevid) != 0) {
3406 if ((newpath = devid_to_path(devid)) != NULL) {
3408 * Update the path appropriately.
3410 set_path(zhp, nv, newpath);
3411 if (nvlist_add_string(nv,
3412 ZPOOL_CONFIG_PATH, newpath) == 0)
3413 verify(nvlist_lookup_string(nv,
3421 devid_str_free(newdevid);
3425 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0)
3426 path += strlen(ZFS_DISK_ROOTD);
3428 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3429 &value) == 0 && value) {
3430 int pathlen = strlen(path);
3431 char *tmp = zfs_strdup(hdl, path);
3434 * If it starts with c#, and ends with "s0", chop
3435 * the "s0" off, or if it ends with "s0/old", remove
3436 * the "s0" from the middle.
3438 if (CTD_CHECK(tmp)) {
3439 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3440 tmp[pathlen - 2] = '\0';
3441 } else if (pathlen > 6 &&
3442 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3443 (void) strcpy(&tmp[pathlen - 6],
3449 #else /* !illumos */
3450 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
3451 path += sizeof(_PATH_DEV) - 1;
3452 #endif /* illumos */
3454 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3457 * If it's a raidz device, we need to stick in the parity level.
3459 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3460 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3462 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3463 (u_longlong_t)value);
3468 * We identify each top-level vdev by using a <type-id>
3469 * naming convention.
3474 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3476 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3482 return (zfs_strdup(hdl, path));
3486 zbookmark_mem_compare(const void *a, const void *b)
3488 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3492 * Retrieve the persistent error log, uniquify the members, and return to the
3496 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3498 zfs_cmd_t zc = { 0 };
3500 zbookmark_phys_t *zb = NULL;
3504 * Retrieve the raw error list from the kernel. If the number of errors
3505 * has increased, allocate more space and continue until we get the
3508 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3512 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3513 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3515 zc.zc_nvlist_dst_size = count;
3516 (void) strcpy(zc.zc_name, zhp->zpool_name);
3518 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3520 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3521 if (errno == ENOMEM) {
3524 count = zc.zc_nvlist_dst_size;
3525 dst = zfs_alloc(zhp->zpool_hdl, count *
3526 sizeof (zbookmark_phys_t));
3529 zc.zc_nvlist_dst = (uintptr_t)dst;
3539 * Sort the resulting bookmarks. This is a little confusing due to the
3540 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3541 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3542 * _not_ copied as part of the process. So we point the start of our
3543 * array appropriate and decrement the total number of elements.
3545 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3546 zc.zc_nvlist_dst_size;
3547 count -= zc.zc_nvlist_dst_size;
3549 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3551 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3554 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3556 for (i = 0; i < count; i++) {
3559 /* ignoring zb_blkid and zb_level for now */
3560 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3561 zb[i-1].zb_object == zb[i].zb_object)
3564 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3566 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3567 zb[i].zb_objset) != 0) {
3571 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3572 zb[i].zb_object) != 0) {
3576 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3583 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3587 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3588 return (no_memory(zhp->zpool_hdl));
3592 * Upgrade a ZFS pool to the latest on-disk version.
3595 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3597 zfs_cmd_t zc = { 0 };
3598 libzfs_handle_t *hdl = zhp->zpool_hdl;
3600 (void) strcpy(zc.zc_name, zhp->zpool_name);
3601 zc.zc_cookie = new_version;
3603 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3604 return (zpool_standard_error_fmt(hdl, errno,
3605 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3611 zfs_save_arguments(int argc, char **argv, char *string, int len)
3613 (void) strlcpy(string, basename(argv[0]), len);
3614 for (int i = 1; i < argc; i++) {
3615 (void) strlcat(string, " ", len);
3616 (void) strlcat(string, argv[i], len);
3621 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3623 zfs_cmd_t zc = { 0 };
3627 args = fnvlist_alloc();
3628 fnvlist_add_string(args, "message", message);
3629 err = zcmd_write_src_nvlist(hdl, &zc, args);
3631 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3633 zcmd_free_nvlists(&zc);
3638 * Perform ioctl to get some command history of a pool.
3640 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3641 * logical offset of the history buffer to start reading from.
3643 * Upon return, 'off' is the next logical offset to read from and
3644 * 'len' is the actual amount of bytes read into 'buf'.
3647 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3649 zfs_cmd_t zc = { 0 };
3650 libzfs_handle_t *hdl = zhp->zpool_hdl;
3652 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3654 zc.zc_history = (uint64_t)(uintptr_t)buf;
3655 zc.zc_history_len = *len;
3656 zc.zc_history_offset = *off;
3658 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3661 return (zfs_error_fmt(hdl, EZFS_PERM,
3662 dgettext(TEXT_DOMAIN,
3663 "cannot show history for pool '%s'"),
3666 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3667 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3668 "'%s'"), zhp->zpool_name));
3670 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3671 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3672 "'%s', pool must be upgraded"), zhp->zpool_name));
3674 return (zpool_standard_error_fmt(hdl, errno,
3675 dgettext(TEXT_DOMAIN,
3676 "cannot get history for '%s'"), zhp->zpool_name));
3680 *len = zc.zc_history_len;
3681 *off = zc.zc_history_offset;
3687 * Process the buffer of nvlists, unpacking and storing each nvlist record
3688 * into 'records'. 'leftover' is set to the number of bytes that weren't
3689 * processed as there wasn't a complete record.
3692 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3693 nvlist_t ***records, uint_t *numrecords)
3699 while (bytes_read > sizeof (reclen)) {
3701 /* get length of packed record (stored as little endian) */
3702 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3703 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3705 if (bytes_read < sizeof (reclen) + reclen)
3709 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3711 bytes_read -= sizeof (reclen) + reclen;
3712 buf += sizeof (reclen) + reclen;
3714 /* add record to nvlist array */
3716 if (ISP2(*numrecords + 1)) {
3717 *records = realloc(*records,
3718 *numrecords * 2 * sizeof (nvlist_t *));
3720 (*records)[*numrecords - 1] = nv;
3723 *leftover = bytes_read;
3727 /* from spa_history.c: spa_history_create_obj() */
3728 #define HIS_BUF_LEN_DEF (128 << 10)
3729 #define HIS_BUF_LEN_MAX (1 << 30)
3732 * Retrieve the command history of a pool.
3735 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3738 uint64_t buflen = HIS_BUF_LEN_DEF;
3740 nvlist_t **records = NULL;
3741 uint_t numrecords = 0;
3744 buf = malloc(buflen);
3748 uint64_t bytes_read = buflen;
3751 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3754 /* if nothing else was read in, we're at EOF, just return */
3755 if (bytes_read == 0)
3758 if ((err = zpool_history_unpack(buf, bytes_read,
3759 &leftover, &records, &numrecords)) != 0)
3762 if (leftover == bytes_read) {
3764 * no progress made, because buffer is not big enough
3765 * to hold this record; resize and retry.
3770 if ((buflen >= HIS_BUF_LEN_MAX) ||
3771 ((buf = malloc(buflen)) == NULL)) {
3783 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3784 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3785 records, numrecords) == 0);
3787 for (i = 0; i < numrecords; i++)
3788 nvlist_free(records[i]);
3795 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3796 char *pathname, size_t len)
3798 zfs_cmd_t zc = { 0 };
3799 boolean_t mounted = B_FALSE;
3800 char *mntpnt = NULL;
3801 char dsname[MAXNAMELEN];
3804 /* special case for the MOS */
3805 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3809 /* get the dataset's name */
3810 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3812 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3813 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3814 /* just write out a path of two object numbers */
3815 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3819 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3821 /* find out if the dataset is mounted */
3822 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3824 /* get the corrupted object's path */
3825 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3827 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3830 (void) snprintf(pathname, len, "%s%s", mntpnt,
3833 (void) snprintf(pathname, len, "%s:%s",
3834 dsname, zc.zc_value);
3837 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3844 * Read the EFI label from the config, if a label does not exist then
3845 * pass back the error to the caller. If the caller has passed a non-NULL
3846 * diskaddr argument then we set it to the starting address of the EFI
3850 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3854 char diskname[MAXPATHLEN];
3857 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3860 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT,
3861 strrchr(path, '/'));
3862 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3863 struct dk_gpt *vtoc;
3865 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3867 *sb = vtoc->efi_parts[0].p_start;
3876 * determine where a partition starts on a disk in the current
3880 find_start_block(nvlist_t *config)
3884 diskaddr_t sb = MAXOFFSET_T;
3887 if (nvlist_lookup_nvlist_array(config,
3888 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3889 if (nvlist_lookup_uint64(config,
3890 ZPOOL_CONFIG_WHOLE_DISK,
3891 &wholedisk) != 0 || !wholedisk) {
3892 return (MAXOFFSET_T);
3894 if (read_efi_label(config, &sb) < 0)
3899 for (c = 0; c < children; c++) {
3900 sb = find_start_block(child[c]);
3901 if (sb != MAXOFFSET_T) {
3905 return (MAXOFFSET_T);
3907 #endif /* illumos */
3910 * Label an individual disk. The name provided is the short name,
3911 * stripped of any leading /dev path.
3914 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name)
3917 char path[MAXPATHLEN];
3918 struct dk_gpt *vtoc;
3920 size_t resv = EFI_MIN_RESV_SIZE;
3921 uint64_t slice_size;
3922 diskaddr_t start_block;
3925 /* prepare an error message just in case */
3926 (void) snprintf(errbuf, sizeof (errbuf),
3927 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3932 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3933 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3935 if (zhp->zpool_start_block == 0)
3936 start_block = find_start_block(nvroot);
3938 start_block = zhp->zpool_start_block;
3939 zhp->zpool_start_block = start_block;
3942 start_block = NEW_START_BLOCK;
3945 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name,
3948 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3950 * This shouldn't happen. We've long since verified that this
3951 * is a valid device.
3954 dgettext(TEXT_DOMAIN, "unable to open device"));
3955 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3958 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3960 * The only way this can fail is if we run out of memory, or we
3961 * were unable to read the disk's capacity
3963 if (errno == ENOMEM)
3964 (void) no_memory(hdl);
3967 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3968 "unable to read disk capacity"), name);
3970 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3973 slice_size = vtoc->efi_last_u_lba + 1;
3974 slice_size -= EFI_MIN_RESV_SIZE;
3975 if (start_block == MAXOFFSET_T)
3976 start_block = NEW_START_BLOCK;
3977 slice_size -= start_block;
3979 vtoc->efi_parts[0].p_start = start_block;
3980 vtoc->efi_parts[0].p_size = slice_size;
3983 * Why we use V_USR: V_BACKUP confuses users, and is considered
3984 * disposable by some EFI utilities (since EFI doesn't have a backup
3985 * slice). V_UNASSIGNED is supposed to be used only for zero size
3986 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3987 * etc. were all pretty specific. V_USR is as close to reality as we
3988 * can get, in the absence of V_OTHER.
3990 vtoc->efi_parts[0].p_tag = V_USR;
3991 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3993 vtoc->efi_parts[8].p_start = slice_size + start_block;
3994 vtoc->efi_parts[8].p_size = resv;
3995 vtoc->efi_parts[8].p_tag = V_RESERVED;
3997 if (efi_write(fd, vtoc) != 0) {
3999 * Some block drivers (like pcata) may not support EFI
4000 * GPT labels. Print out a helpful error message dir-
4001 * ecting the user to manually label the disk and give
4007 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4008 "try using fdisk(1M) and then provide a specific slice"));
4009 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4014 #endif /* illumos */
4019 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
4025 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
4026 if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
4027 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
4028 strcmp(type, VDEV_TYPE_MISSING) == 0) {
4029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4030 "vdev type '%s' is not supported"), type);
4031 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
4034 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
4035 &child, &children) == 0) {
4036 for (c = 0; c < children; c++) {
4037 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
4045 * Check if this zvol is allowable for use as a dump device; zero if
4046 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
4048 * Allowable storage configurations include mirrors, all raidz variants, and
4049 * pools with log, cache, and spare devices. Pools which are backed by files or
4050 * have missing/hole vdevs are not suitable.
4053 zvol_check_dump_config(char *arg)
4055 zpool_handle_t *zhp = NULL;
4056 nvlist_t *config, *nvroot;
4060 libzfs_handle_t *hdl;
4062 char poolname[ZPOOL_MAXNAMELEN];
4063 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4066 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4070 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4071 "dump is not supported on device '%s'"), arg);
4073 if ((hdl = libzfs_init()) == NULL)
4075 libzfs_print_on_error(hdl, B_TRUE);
4077 volname = arg + pathlen;
4079 /* check the configuration of the pool */
4080 if ((p = strchr(volname, '/')) == NULL) {
4081 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4082 "malformed dataset name"));
4083 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4085 } else if (p - volname >= ZFS_MAXNAMELEN) {
4086 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4087 "dataset name is too long"));
4088 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4091 (void) strncpy(poolname, volname, p - volname);
4092 poolname[p - volname] = '\0';
4095 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4096 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4097 "could not open pool '%s'"), poolname);
4098 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4101 config = zpool_get_config(zhp, NULL);
4102 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4104 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4105 "could not obtain vdev configuration for '%s'"), poolname);
4106 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4110 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4111 &top, &toplevels) == 0);
4113 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {