4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright 2016 Nexenta Systems, Inc.
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
30 #include <sys/types.h>
42 #include <sys/zfs_ioctl.h>
45 #include "zfs_namecheck.h"
47 #include "libzfs_impl.h"
48 #include "zfs_comutil.h"
49 #include "zfeature_common.h"
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
53 #define BACKUP_SLICE "s2"
55 typedef struct prop_flags {
56 int create:1; /* Validate property on creation */
57 int import:1; /* Validate property on import */
61 * ====================================================================
62 * zpool property functions
63 * ====================================================================
67 zpool_get_all_props(zpool_handle_t *zhp)
70 libzfs_handle_t *hdl = zhp->zpool_hdl;
72 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
74 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
77 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
78 if (errno == ENOMEM) {
79 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
80 zcmd_free_nvlists(&zc);
84 zcmd_free_nvlists(&zc);
89 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
90 zcmd_free_nvlists(&zc);
94 zcmd_free_nvlists(&zc);
100 zpool_props_refresh(zpool_handle_t *zhp)
104 old_props = zhp->zpool_props;
106 if (zpool_get_all_props(zhp) != 0)
109 nvlist_free(old_props);
114 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
120 zprop_source_t source;
122 nvl = zhp->zpool_props;
123 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
124 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
126 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
128 source = ZPROP_SRC_DEFAULT;
129 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
140 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
144 zprop_source_t source;
146 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
148 * zpool_get_all_props() has most likely failed because
149 * the pool is faulted, but if all we need is the top level
150 * vdev's guid then get it from the zhp config nvlist.
152 if ((prop == ZPOOL_PROP_GUID) &&
153 (nvlist_lookup_nvlist(zhp->zpool_config,
154 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
155 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
159 return (zpool_prop_default_numeric(prop));
162 nvl = zhp->zpool_props;
163 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
164 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
166 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
168 source = ZPROP_SRC_DEFAULT;
169 value = zpool_prop_default_numeric(prop);
179 * Map VDEV STATE to printed strings.
182 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
192 return (gettext("FAULTED"));
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
208 return (gettext("UNKNOWN"));
212 * Map POOL STATE to printed strings.
215 zpool_pool_state_to_name(pool_state_t state)
218 case POOL_STATE_ACTIVE:
219 return (gettext("ACTIVE"));
220 case POOL_STATE_EXPORTED:
221 return (gettext("EXPORTED"));
222 case POOL_STATE_DESTROYED:
223 return (gettext("DESTROYED"));
224 case POOL_STATE_SPARE:
225 return (gettext("SPARE"));
226 case POOL_STATE_L2CACHE:
227 return (gettext("L2CACHE"));
228 case POOL_STATE_UNINITIALIZED:
229 return (gettext("UNINITIALIZED"));
230 case POOL_STATE_UNAVAIL:
231 return (gettext("UNAVAIL"));
232 case POOL_STATE_POTENTIALLY_ACTIVE:
233 return (gettext("POTENTIALLY_ACTIVE"));
236 return (gettext("UNKNOWN"));
240 * Get a zpool property value for 'prop' and return the value in
241 * a pre-allocated buffer.
244 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
245 zprop_source_t *srctype, boolean_t literal)
249 zprop_source_t src = ZPROP_SRC_NONE;
254 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
256 case ZPOOL_PROP_NAME:
257 (void) strlcpy(buf, zpool_get_name(zhp), len);
260 case ZPOOL_PROP_HEALTH:
262 zpool_pool_state_to_name(POOL_STATE_UNAVAIL), len);
265 case ZPOOL_PROP_GUID:
266 intval = zpool_get_prop_int(zhp, prop, &src);
267 (void) snprintf(buf, len, "%llu", intval);
270 case ZPOOL_PROP_ALTROOT:
271 case ZPOOL_PROP_CACHEFILE:
272 case ZPOOL_PROP_COMMENT:
273 if (zhp->zpool_props != NULL ||
274 zpool_get_all_props(zhp) == 0) {
276 zpool_get_prop_string(zhp, prop, &src),
282 (void) strlcpy(buf, "-", len);
291 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
292 prop != ZPOOL_PROP_NAME)
295 switch (zpool_prop_get_type(prop)) {
296 case PROP_TYPE_STRING:
297 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
301 case PROP_TYPE_NUMBER:
302 intval = zpool_get_prop_int(zhp, prop, &src);
305 case ZPOOL_PROP_SIZE:
306 case ZPOOL_PROP_ALLOCATED:
307 case ZPOOL_PROP_FREE:
308 case ZPOOL_PROP_FREEING:
309 case ZPOOL_PROP_LEAKED:
311 (void) snprintf(buf, len, "%llu",
312 (u_longlong_t)intval);
314 (void) zfs_nicenum(intval, buf, len);
317 case ZPOOL_PROP_EXPANDSZ:
319 (void) strlcpy(buf, "-", len);
320 } else if (literal) {
321 (void) snprintf(buf, len, "%llu",
322 (u_longlong_t)intval);
324 (void) zfs_nicenum(intval, buf, len);
327 case ZPOOL_PROP_CAPACITY:
329 (void) snprintf(buf, len, "%llu",
330 (u_longlong_t)intval);
332 (void) snprintf(buf, len, "%llu%%",
333 (u_longlong_t)intval);
336 case ZPOOL_PROP_FRAGMENTATION:
337 if (intval == UINT64_MAX) {
338 (void) strlcpy(buf, "-", len);
340 (void) snprintf(buf, len, "%llu%%",
341 (u_longlong_t)intval);
344 case ZPOOL_PROP_DEDUPRATIO:
345 (void) snprintf(buf, len, "%llu.%02llux",
346 (u_longlong_t)(intval / 100),
347 (u_longlong_t)(intval % 100));
349 case ZPOOL_PROP_HEALTH:
350 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
351 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
352 verify(nvlist_lookup_uint64_array(nvroot,
353 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
356 (void) strlcpy(buf, zpool_state_to_name(intval,
359 case ZPOOL_PROP_VERSION:
360 if (intval >= SPA_VERSION_FEATURES) {
361 (void) snprintf(buf, len, "-");
366 (void) snprintf(buf, len, "%llu", intval);
370 case PROP_TYPE_INDEX:
371 intval = zpool_get_prop_int(zhp, prop, &src);
372 if (zpool_prop_index_to_string(prop, intval, &strval)
375 (void) strlcpy(buf, strval, len);
389 * Check if the bootfs name has the same pool name as it is set to.
390 * Assuming bootfs is a valid dataset name.
393 bootfs_name_valid(const char *pool, char *bootfs)
395 int len = strlen(pool);
397 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
400 if (strncmp(pool, bootfs, len) == 0 &&
401 (bootfs[len] == '/' || bootfs[len] == '\0'))
408 zpool_is_bootable(zpool_handle_t *zhp)
410 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
412 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
413 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
414 sizeof (bootfs)) != 0);
419 * Given an nvlist of zpool properties to be set, validate that they are
420 * correct, and parse any numeric properties (index, boolean, etc) if they are
421 * specified as strings.
424 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
425 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
433 struct stat64 statbuf;
436 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
437 (void) no_memory(hdl);
442 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
443 const char *propname = nvpair_name(elem);
445 prop = zpool_name_to_prop(propname);
446 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
448 char *fname = strchr(propname, '@') + 1;
450 err = zfeature_lookup_name(fname, NULL);
452 ASSERT3U(err, ==, ENOENT);
453 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
454 "invalid feature '%s'"), fname);
455 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
459 if (nvpair_type(elem) != DATA_TYPE_STRING) {
460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
461 "'%s' must be a string"), propname);
462 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
466 (void) nvpair_value_string(elem, &strval);
467 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
468 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
469 "property '%s' can only be set to "
470 "'enabled'"), propname);
471 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
475 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
476 (void) no_memory(hdl);
483 * Make sure this property is valid and applies to this type.
485 if (prop == ZPROP_INVAL) {
486 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
487 "invalid property '%s'"), propname);
488 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
492 if (zpool_prop_readonly(prop)) {
493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
494 "is readonly"), propname);
495 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
499 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
500 &strval, &intval, errbuf) != 0)
504 * Perform additional checking for specific properties.
507 case ZPOOL_PROP_VERSION:
508 if (intval < version ||
509 !SPA_VERSION_IS_SUPPORTED(intval)) {
510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
511 "property '%s' number %d is invalid."),
513 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
518 case ZPOOL_PROP_BOOTFS:
519 if (flags.create || flags.import) {
520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
521 "property '%s' cannot be set at creation "
522 "or import time"), propname);
523 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
527 if (version < SPA_VERSION_BOOTFS) {
528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
529 "pool must be upgraded to support "
530 "'%s' property"), propname);
531 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
536 * bootfs property value has to be a dataset name and
537 * the dataset has to be in the same pool as it sets to.
539 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
542 "is an invalid name"), strval);
543 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
547 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
549 "could not open pool '%s'"), poolname);
550 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
556 case ZPOOL_PROP_ALTROOT:
557 if (!flags.create && !flags.import) {
558 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 "property '%s' can only be set during pool "
560 "creation or import"), propname);
561 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
565 if (strval[0] != '/') {
566 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
567 "bad alternate root '%s'"), strval);
568 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
573 case ZPOOL_PROP_CACHEFILE:
574 if (strval[0] == '\0')
577 if (strcmp(strval, "none") == 0)
580 if (strval[0] != '/') {
581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
582 "property '%s' must be empty, an "
583 "absolute path, or 'none'"), propname);
584 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
588 slash = strrchr(strval, '/');
590 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
591 strcmp(slash, "/..") == 0) {
592 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
593 "'%s' is not a valid file"), strval);
594 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
600 if (strval[0] != '\0' &&
601 (stat64(strval, &statbuf) != 0 ||
602 !S_ISDIR(statbuf.st_mode))) {
603 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
604 "'%s' is not a valid directory"),
606 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
613 case ZPOOL_PROP_COMMENT:
614 for (check = strval; *check != '\0'; check++) {
615 if (!isprint(*check)) {
617 dgettext(TEXT_DOMAIN,
618 "comment may only have printable "
620 (void) zfs_error(hdl, EZFS_BADPROP,
625 if (strlen(strval) > ZPROP_MAX_COMMENT) {
626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
627 "comment must not exceed %d characters"),
629 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
634 case ZPOOL_PROP_READONLY:
636 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
637 "property '%s' can only be set at "
638 "import time"), propname);
639 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
644 case ZPOOL_PROP_TNAME:
646 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
647 "property '%s' can only be set at "
648 "creation time"), propname);
649 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
655 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
656 "property '%s'(%d) not defined"), propname, prop);
663 nvlist_free(retprops);
668 * Set zpool property : propname=propval.
671 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
673 zfs_cmd_t zc = { 0 };
676 nvlist_t *nvl = NULL;
679 prop_flags_t flags = { 0 };
681 (void) snprintf(errbuf, sizeof (errbuf),
682 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
685 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
686 return (no_memory(zhp->zpool_hdl));
688 if (nvlist_add_string(nvl, propname, propval) != 0) {
690 return (no_memory(zhp->zpool_hdl));
693 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
694 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
695 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
704 * Execute the corresponding ioctl() to set this property.
706 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
708 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
713 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
715 zcmd_free_nvlists(&zc);
719 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
721 (void) zpool_props_refresh(zhp);
727 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
729 libzfs_handle_t *hdl = zhp->zpool_hdl;
731 char buf[ZFS_MAXPROPLEN];
732 nvlist_t *features = NULL;
734 boolean_t firstexpand = (NULL == *plp);
736 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
740 while (*last != NULL)
741 last = &(*last)->pl_next;
744 features = zpool_get_features(zhp);
746 if ((*plp)->pl_all && firstexpand) {
747 for (int i = 0; i < SPA_FEATURES; i++) {
748 zprop_list_t *entry = zfs_alloc(hdl,
749 sizeof (zprop_list_t));
750 entry->pl_prop = ZPROP_INVAL;
751 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
752 spa_feature_table[i].fi_uname);
753 entry->pl_width = strlen(entry->pl_user_prop);
754 entry->pl_all = B_TRUE;
757 last = &entry->pl_next;
761 /* add any unsupported features */
762 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
763 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
768 if (zfeature_is_supported(nvpair_name(nvp)))
771 propname = zfs_asprintf(hdl, "unsupported@%s",
775 * Before adding the property to the list make sure that no
776 * other pool already added the same property.
780 while (entry != NULL) {
781 if (entry->pl_user_prop != NULL &&
782 strcmp(propname, entry->pl_user_prop) == 0) {
786 entry = entry->pl_next;
793 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
794 entry->pl_prop = ZPROP_INVAL;
795 entry->pl_user_prop = propname;
796 entry->pl_width = strlen(entry->pl_user_prop);
797 entry->pl_all = B_TRUE;
800 last = &entry->pl_next;
803 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
808 if (entry->pl_prop != ZPROP_INVAL &&
809 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
810 NULL, B_FALSE) == 0) {
811 if (strlen(buf) > entry->pl_width)
812 entry->pl_width = strlen(buf);
820 * Get the state for the given feature on the given ZFS pool.
823 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
827 boolean_t found = B_FALSE;
828 nvlist_t *features = zpool_get_features(zhp);
830 const char *feature = strchr(propname, '@') + 1;
832 supported = zpool_prop_feature(propname);
833 ASSERT(supported || zpool_prop_unsupported(propname));
836 * Convert from feature name to feature guid. This conversion is
837 * unecessary for unsupported@... properties because they already
844 ret = zfeature_lookup_name(feature, &fid);
846 (void) strlcpy(buf, "-", len);
849 feature = spa_feature_table[fid].fi_guid;
852 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
857 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
860 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
862 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
867 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
869 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
872 (void) strlcpy(buf, "-", len);
881 * Don't start the slice at the default block of 34; many storage
882 * devices will use a stripe width of 128k, so start there instead.
884 #define NEW_START_BLOCK 256
887 * Validate the given pool name, optionally putting an extended error message in
891 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
897 ret = pool_namecheck(pool, &why, &what);
900 * The rules for reserved pool names were extended at a later point.
901 * But we need to support users with existing pools that may now be
902 * invalid. So we only check for this expanded set of names during a
903 * create (or import), and only in userland.
905 if (ret == 0 && !isopen &&
906 (strncmp(pool, "mirror", 6) == 0 ||
907 strncmp(pool, "raidz", 5) == 0 ||
908 strncmp(pool, "spare", 5) == 0 ||
909 strcmp(pool, "log") == 0)) {
912 dgettext(TEXT_DOMAIN, "name is reserved"));
920 case NAME_ERR_TOOLONG:
922 dgettext(TEXT_DOMAIN, "name is too long"));
925 case NAME_ERR_INVALCHAR:
927 dgettext(TEXT_DOMAIN, "invalid character "
928 "'%c' in pool name"), what);
931 case NAME_ERR_NOLETTER:
932 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
933 "name must begin with a letter"));
936 case NAME_ERR_RESERVED:
937 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
938 "name is reserved"));
941 case NAME_ERR_DISKLIKE:
942 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
943 "pool name is reserved"));
946 case NAME_ERR_LEADING_SLASH:
947 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
948 "leading slash in name"));
951 case NAME_ERR_EMPTY_COMPONENT:
952 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
953 "empty component in name"));
956 case NAME_ERR_TRAILING_SLASH:
957 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
958 "trailing slash in name"));
961 case NAME_ERR_MULTIPLE_AT:
962 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
963 "multiple '@' delimiters in name"));
967 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
968 "(%d) not defined"), why);
979 * Open a handle to the given pool, even if the pool is currently in the FAULTED
983 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
989 * Make sure the pool name is valid.
991 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
992 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
993 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
998 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1001 zhp->zpool_hdl = hdl;
1002 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1004 if (zpool_refresh_stats(zhp, &missing) != 0) {
1010 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1011 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1012 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1021 * Like the above, but silent on error. Used when iterating over pools (because
1022 * the configuration cache may be out of date).
1025 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1027 zpool_handle_t *zhp;
1030 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1033 zhp->zpool_hdl = hdl;
1034 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1036 if (zpool_refresh_stats(zhp, &missing) != 0) {
1052 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1056 zpool_open(libzfs_handle_t *hdl, const char *pool)
1058 zpool_handle_t *zhp;
1060 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1063 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1064 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1065 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1074 * Close the handle. Simply frees the memory associated with the handle.
1077 zpool_close(zpool_handle_t *zhp)
1079 nvlist_free(zhp->zpool_config);
1080 nvlist_free(zhp->zpool_old_config);
1081 nvlist_free(zhp->zpool_props);
1086 * Return the name of the pool.
1089 zpool_get_name(zpool_handle_t *zhp)
1091 return (zhp->zpool_name);
1096 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1099 zpool_get_state(zpool_handle_t *zhp)
1101 return (zhp->zpool_state);
1105 * Create the named pool, using the provided vdev list. It is assumed
1106 * that the consumer has already validated the contents of the nvlist, so we
1107 * don't have to worry about error semantics.
1110 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1111 nvlist_t *props, nvlist_t *fsprops)
1113 zfs_cmd_t zc = { 0 };
1114 nvlist_t *zc_fsprops = NULL;
1115 nvlist_t *zc_props = NULL;
1119 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1120 "cannot create '%s'"), pool);
1122 if (!zpool_name_valid(hdl, B_FALSE, pool))
1123 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1125 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1129 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1131 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1132 SPA_VERSION_1, flags, msg)) == NULL) {
1141 zoned = ((nvlist_lookup_string(fsprops,
1142 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1143 strcmp(zonestr, "on") == 0);
1145 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1146 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1150 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1153 if (nvlist_add_nvlist(zc_props,
1154 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1159 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1162 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1164 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1166 zcmd_free_nvlists(&zc);
1167 nvlist_free(zc_props);
1168 nvlist_free(zc_fsprops);
1173 * This can happen if the user has specified the same
1174 * device multiple times. We can't reliably detect this
1175 * until we try to add it and see we already have a
1178 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1179 "one or more vdevs refer to the same device"));
1180 return (zfs_error(hdl, EZFS_BADDEV, msg));
1184 * This happens if the record size is smaller or larger
1185 * than the allowed size range, or not a power of 2.
1187 * NOTE: although zfs_valid_proplist is called earlier,
1188 * this case may have slipped through since the
1189 * pool does not exist yet and it is therefore
1190 * impossible to read properties e.g. max blocksize
1193 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1194 "record size invalid"));
1195 return (zfs_error(hdl, EZFS_BADPROP, msg));
1199 * This occurs when one of the devices is below
1200 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1201 * device was the problem device since there's no
1202 * reliable way to determine device size from userland.
1207 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1209 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1210 "one or more devices is less than the "
1211 "minimum size (%s)"), buf);
1213 return (zfs_error(hdl, EZFS_BADDEV, msg));
1216 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1217 "one or more devices is out of space"));
1218 return (zfs_error(hdl, EZFS_BADDEV, msg));
1221 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1222 "cache device must be a disk or disk slice"));
1223 return (zfs_error(hdl, EZFS_BADDEV, msg));
1226 return (zpool_standard_error(hdl, errno, msg));
1231 zcmd_free_nvlists(&zc);
1232 nvlist_free(zc_props);
1233 nvlist_free(zc_fsprops);
1238 * Destroy the given pool. It is up to the caller to ensure that there are no
1239 * datasets left in the pool.
1242 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1244 zfs_cmd_t zc = { 0 };
1245 zfs_handle_t *zfp = NULL;
1246 libzfs_handle_t *hdl = zhp->zpool_hdl;
1249 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1250 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1253 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1254 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1256 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1257 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1258 "cannot destroy '%s'"), zhp->zpool_name);
1260 if (errno == EROFS) {
1261 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1262 "one or more devices is read only"));
1263 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1265 (void) zpool_standard_error(hdl, errno, msg);
1274 remove_mountpoint(zfp);
1282 * Add the given vdevs to the pool. The caller must have already performed the
1283 * necessary verification to ensure that the vdev specification is well-formed.
1286 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1288 zfs_cmd_t zc = { 0 };
1290 libzfs_handle_t *hdl = zhp->zpool_hdl;
1292 nvlist_t **spares, **l2cache;
1293 uint_t nspares, nl2cache;
1295 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1296 "cannot add to '%s'"), zhp->zpool_name);
1298 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1299 SPA_VERSION_SPARES &&
1300 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1301 &spares, &nspares) == 0) {
1302 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1303 "upgraded to add hot spares"));
1304 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1307 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1308 SPA_VERSION_L2CACHE &&
1309 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1310 &l2cache, &nl2cache) == 0) {
1311 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1312 "upgraded to add cache devices"));
1313 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1316 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1318 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1320 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1324 * This can happen if the user has specified the same
1325 * device multiple times. We can't reliably detect this
1326 * until we try to add it and see we already have a
1329 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1330 "one or more vdevs refer to the same device"));
1331 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1336 * This occurrs when one of the devices is below
1337 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1338 * device was the problem device since there's no
1339 * reliable way to determine device size from userland.
1344 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1346 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1347 "device is less than the minimum "
1350 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1354 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1355 "pool must be upgraded to add these vdevs"));
1356 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1360 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1361 "root pool can not have multiple vdevs"
1362 " or separate logs"));
1363 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1367 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1368 "cache device must be a disk or disk slice"));
1369 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1373 (void) zpool_standard_error(hdl, errno, msg);
1381 zcmd_free_nvlists(&zc);
1387 * Exports the pool from the system. The caller must ensure that there are no
1388 * mounted datasets in the pool.
1391 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1392 const char *log_str)
1394 zfs_cmd_t zc = { 0 };
1397 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1398 "cannot export '%s'"), zhp->zpool_name);
1400 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1401 zc.zc_cookie = force;
1402 zc.zc_guid = hardforce;
1403 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1405 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1408 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1409 "use '-f' to override the following errors:\n"
1410 "'%s' has an active shared spare which could be"
1411 " used by other pools once '%s' is exported."),
1412 zhp->zpool_name, zhp->zpool_name);
1413 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1416 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1425 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1427 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1431 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1433 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1437 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1440 nvlist_t *nv = NULL;
1446 if (!hdl->libzfs_printerr || config == NULL)
1449 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1450 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1454 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1456 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1458 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1459 strftime(timestr, 128, 0, &t) != 0) {
1461 (void) printf(dgettext(TEXT_DOMAIN,
1462 "Would be able to return %s "
1463 "to its state as of %s.\n"),
1466 (void) printf(dgettext(TEXT_DOMAIN,
1467 "Pool %s returned to its state as of %s.\n"),
1471 (void) printf(dgettext(TEXT_DOMAIN,
1472 "%s approximately %lld "),
1473 dryrun ? "Would discard" : "Discarded",
1475 (void) printf(dgettext(TEXT_DOMAIN,
1476 "minutes of transactions.\n"));
1477 } else if (loss > 0) {
1478 (void) printf(dgettext(TEXT_DOMAIN,
1479 "%s approximately %lld "),
1480 dryrun ? "Would discard" : "Discarded", loss);
1481 (void) printf(dgettext(TEXT_DOMAIN,
1482 "seconds of transactions.\n"));
1488 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1491 nvlist_t *nv = NULL;
1493 uint64_t edata = UINT64_MAX;
1498 if (!hdl->libzfs_printerr)
1502 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1504 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1506 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1507 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1508 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1509 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1512 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1513 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1516 (void) printf(dgettext(TEXT_DOMAIN,
1517 "Recovery is possible, but will result in some data loss.\n"));
1519 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1520 strftime(timestr, 128, 0, &t) != 0) {
1521 (void) printf(dgettext(TEXT_DOMAIN,
1522 "\tReturning the pool to its state as of %s\n"
1523 "\tshould correct the problem. "),
1526 (void) printf(dgettext(TEXT_DOMAIN,
1527 "\tReverting the pool to an earlier state "
1528 "should correct the problem.\n\t"));
1532 (void) printf(dgettext(TEXT_DOMAIN,
1533 "Approximately %lld minutes of data\n"
1534 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1535 } else if (loss > 0) {
1536 (void) printf(dgettext(TEXT_DOMAIN,
1537 "Approximately %lld seconds of data\n"
1538 "\tmust be discarded, irreversibly. "), loss);
1540 if (edata != 0 && edata != UINT64_MAX) {
1542 (void) printf(dgettext(TEXT_DOMAIN,
1543 "After rewind, at least\n"
1544 "\tone persistent user-data error will remain. "));
1546 (void) printf(dgettext(TEXT_DOMAIN,
1547 "After rewind, several\n"
1548 "\tpersistent user-data errors will remain. "));
1551 (void) printf(dgettext(TEXT_DOMAIN,
1552 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1553 reason >= 0 ? "clear" : "import", name);
1555 (void) printf(dgettext(TEXT_DOMAIN,
1556 "A scrub of the pool\n"
1557 "\tis strongly recommended after recovery.\n"));
1561 (void) printf(dgettext(TEXT_DOMAIN,
1562 "Destroy and re-create the pool from\n\ta backup source.\n"));
1566 * zpool_import() is a contracted interface. Should be kept the same
1569 * Applications should use zpool_import_props() to import a pool with
1570 * new properties value to be set.
1573 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1576 nvlist_t *props = NULL;
1579 if (altroot != NULL) {
1580 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1581 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1582 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1586 if (nvlist_add_string(props,
1587 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1588 nvlist_add_string(props,
1589 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1591 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1592 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1597 ret = zpool_import_props(hdl, config, newname, props,
1604 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1610 uint64_t is_log = 0;
1612 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1616 (void) printf("\t%*s%s%s\n", indent, "", name,
1617 is_log ? " [log]" : "");
1619 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1620 &child, &children) != 0)
1623 for (c = 0; c < children; c++) {
1624 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1625 print_vdev_tree(hdl, vname, child[c], indent + 2);
1631 zpool_print_unsup_feat(nvlist_t *config)
1633 nvlist_t *nvinfo, *unsup_feat;
1635 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1637 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1640 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1641 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1644 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1645 verify(nvpair_value_string(nvp, &desc) == 0);
1647 if (strlen(desc) > 0)
1648 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1650 (void) printf("\t%s\n", nvpair_name(nvp));
1655 * Import the given pool using the known configuration and a list of
1656 * properties to be set. The configuration should have come from
1657 * zpool_find_import(). The 'newname' parameters control whether the pool
1658 * is imported with a different name.
1661 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1662 nvlist_t *props, int flags)
1664 zfs_cmd_t zc = { 0 };
1665 zpool_rewind_policy_t policy;
1666 nvlist_t *nv = NULL;
1667 nvlist_t *nvinfo = NULL;
1668 nvlist_t *missing = NULL;
1675 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1678 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1679 "cannot import pool '%s'"), origname);
1681 if (newname != NULL) {
1682 if (!zpool_name_valid(hdl, B_FALSE, newname))
1683 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1684 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1686 thename = (char *)newname;
1691 if (props != NULL) {
1693 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1695 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1698 if ((props = zpool_valid_proplist(hdl, origname,
1699 props, version, flags, errbuf)) == NULL)
1701 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1708 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1710 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1713 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1714 zcmd_free_nvlists(&zc);
1717 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1718 zcmd_free_nvlists(&zc);
1722 zc.zc_cookie = flags;
1723 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1725 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1726 zcmd_free_nvlists(&zc);
1733 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1735 zcmd_free_nvlists(&zc);
1737 zpool_get_rewind_policy(config, &policy);
1743 * Dry-run failed, but we print out what success
1744 * looks like if we found a best txg
1746 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1747 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1753 if (newname == NULL)
1754 (void) snprintf(desc, sizeof (desc),
1755 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1758 (void) snprintf(desc, sizeof (desc),
1759 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1764 if (nv != NULL && nvlist_lookup_nvlist(nv,
1765 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1766 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1767 (void) printf(dgettext(TEXT_DOMAIN, "This "
1768 "pool uses the following feature(s) not "
1769 "supported by this system:\n"));
1770 zpool_print_unsup_feat(nv);
1771 if (nvlist_exists(nvinfo,
1772 ZPOOL_CONFIG_CAN_RDONLY)) {
1773 (void) printf(dgettext(TEXT_DOMAIN,
1774 "All unsupported features are only "
1775 "required for writing to the pool."
1776 "\nThe pool can be imported using "
1777 "'-o readonly=on'.\n"));
1781 * Unsupported version.
1783 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1787 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1791 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1792 "one or more devices is read only"));
1793 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1797 if (nv && nvlist_lookup_nvlist(nv,
1798 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1799 nvlist_lookup_nvlist(nvinfo,
1800 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1801 (void) printf(dgettext(TEXT_DOMAIN,
1802 "The devices below are missing, use "
1803 "'-m' to import the pool anyway:\n"));
1804 print_vdev_tree(hdl, NULL, missing, 2);
1805 (void) printf("\n");
1807 (void) zpool_standard_error(hdl, error, desc);
1811 (void) zpool_standard_error(hdl, error, desc);
1814 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1815 "new name of at least one dataset is longer than "
1816 "the maximum allowable length"));
1817 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1820 (void) zpool_standard_error(hdl, error, desc);
1821 zpool_explain_recover(hdl,
1822 newname ? origname : thename, -error, nv);
1829 zpool_handle_t *zhp;
1832 * This should never fail, but play it safe anyway.
1834 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1836 else if (zhp != NULL)
1838 if (policy.zrp_request &
1839 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1840 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1841 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1854 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1856 zfs_cmd_t zc = { 0 };
1858 libzfs_handle_t *hdl = zhp->zpool_hdl;
1860 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1861 zc.zc_cookie = func;
1863 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1864 (errno == ENOENT && func != POOL_SCAN_NONE))
1867 if (func == POOL_SCAN_SCRUB) {
1868 (void) snprintf(msg, sizeof (msg),
1869 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1870 } else if (func == POOL_SCAN_NONE) {
1871 (void) snprintf(msg, sizeof (msg),
1872 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1875 assert(!"unexpected result");
1878 if (errno == EBUSY) {
1880 pool_scan_stat_t *ps = NULL;
1883 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1884 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1885 (void) nvlist_lookup_uint64_array(nvroot,
1886 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1887 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1888 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1890 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1891 } else if (errno == ENOENT) {
1892 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1894 return (zpool_standard_error(hdl, errno, msg));
1900 * This provides a very minimal check whether a given string is likely a
1901 * c#t#d# style string. Users of this are expected to do their own
1902 * verification of the s# part.
1904 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1907 * More elaborate version for ones which may start with "/dev/dsk/"
1911 ctd_check_path(char *str)
1914 * If it starts with a slash, check the last component.
1916 if (str && str[0] == '/') {
1917 char *tmp = strrchr(str, '/');
1920 * If it ends in "/old", check the second-to-last
1921 * component of the string instead.
1923 if (tmp != str && strcmp(tmp, "/old") == 0) {
1924 for (tmp--; *tmp != '/'; tmp--)
1929 return (CTD_CHECK(str));
1934 * Find a vdev that matches the search criteria specified. We use the
1935 * the nvpair name to determine how we should look for the device.
1936 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1937 * spare; but FALSE if its an INUSE spare.
1940 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1941 boolean_t *l2cache, boolean_t *log)
1948 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1950 /* Nothing to look for */
1951 if (search == NULL || pair == NULL)
1954 /* Obtain the key we will use to search */
1955 srchkey = nvpair_name(pair);
1957 switch (nvpair_type(pair)) {
1958 case DATA_TYPE_UINT64:
1959 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1960 uint64_t srchval, theguid;
1962 verify(nvpair_value_uint64(pair, &srchval) == 0);
1963 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1965 if (theguid == srchval)
1970 case DATA_TYPE_STRING: {
1971 char *srchval, *val;
1973 verify(nvpair_value_string(pair, &srchval) == 0);
1974 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1978 * Search for the requested value. Special cases:
1980 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1981 * "s0" or "s0/old". The "s0" part is hidden from the user,
1982 * but included in the string, so this matches around it.
1983 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1985 * Otherwise, all other searches are simple string compares.
1988 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1989 ctd_check_path(val)) {
1990 uint64_t wholedisk = 0;
1992 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1995 int slen = strlen(srchval);
1996 int vlen = strlen(val);
1998 if (slen != vlen - 2)
2002 * make_leaf_vdev() should only set
2003 * wholedisk for ZPOOL_CONFIG_PATHs which
2004 * will include "/dev/dsk/", giving plenty of
2005 * room for the indices used next.
2010 * strings identical except trailing "s0"
2012 if (strcmp(&val[vlen - 2], "s0") == 0 &&
2013 strncmp(srchval, val, slen) == 0)
2017 * strings identical except trailing "s0/old"
2019 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
2020 strcmp(&srchval[slen - 4], "/old") == 0 &&
2021 strncmp(srchval, val, slen - 4) == 0)
2026 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2028 if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2030 char *type, *idx, *end, *p;
2031 uint64_t id, vdev_id;
2034 * Determine our vdev type, keeping in mind
2035 * that the srchval is composed of a type and
2036 * vdev id pair (i.e. mirror-4).
2038 if ((type = strdup(srchval)) == NULL)
2041 if ((p = strrchr(type, '-')) == NULL) {
2049 * If the types don't match then keep looking.
2051 if (strncmp(val, type, strlen(val)) != 0) {
2056 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2057 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2058 strncmp(type, VDEV_TYPE_MIRROR,
2059 strlen(VDEV_TYPE_MIRROR)) == 0);
2060 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2064 vdev_id = strtoull(idx, &end, 10);
2071 * Now verify that we have the correct vdev id.
2080 if (strcmp(srchval, val) == 0)
2089 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2090 &child, &children) != 0)
2093 for (c = 0; c < children; c++) {
2094 if ((ret = vdev_to_nvlist_iter(child[c], search,
2095 avail_spare, l2cache, NULL)) != NULL) {
2097 * The 'is_log' value is only set for the toplevel
2098 * vdev, not the leaf vdevs. So we always lookup the
2099 * log device from the root of the vdev tree (where
2100 * 'log' is non-NULL).
2103 nvlist_lookup_uint64(child[c],
2104 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2112 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2113 &child, &children) == 0) {
2114 for (c = 0; c < children; c++) {
2115 if ((ret = vdev_to_nvlist_iter(child[c], search,
2116 avail_spare, l2cache, NULL)) != NULL) {
2117 *avail_spare = B_TRUE;
2123 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2124 &child, &children) == 0) {
2125 for (c = 0; c < children; c++) {
2126 if ((ret = vdev_to_nvlist_iter(child[c], search,
2127 avail_spare, l2cache, NULL)) != NULL) {
2138 * Given a physical path (minus the "/devices" prefix), find the
2142 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2143 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2145 nvlist_t *search, *nvroot, *ret;
2147 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2148 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2150 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2153 *avail_spare = B_FALSE;
2157 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2158 nvlist_free(search);
2164 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2167 zpool_vdev_is_interior(const char *name)
2169 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2170 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2176 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2177 boolean_t *l2cache, boolean_t *log)
2179 char buf[MAXPATHLEN];
2181 nvlist_t *nvroot, *search, *ret;
2184 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2186 guid = strtoull(path, &end, 10);
2187 if (guid != 0 && *end == '\0') {
2188 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2189 } else if (zpool_vdev_is_interior(path)) {
2190 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2191 } else if (path[0] != '/') {
2192 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
2193 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2195 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2198 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2201 *avail_spare = B_FALSE;
2205 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2206 nvlist_free(search);
2212 vdev_online(nvlist_t *nv)
2216 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2217 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2218 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2225 * Helper function for zpool_get_physpaths().
2228 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2229 size_t *bytes_written)
2231 size_t bytes_left, pos, rsz;
2235 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2237 return (EZFS_NODEVICE);
2239 pos = *bytes_written;
2240 bytes_left = physpath_size - pos;
2241 format = (pos == 0) ? "%s" : " %s";
2243 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2244 *bytes_written += rsz;
2246 if (rsz >= bytes_left) {
2247 /* if physpath was not copied properly, clear it */
2248 if (bytes_left != 0) {
2251 return (EZFS_NOSPC);
2257 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2258 size_t *rsz, boolean_t is_spare)
2263 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2264 return (EZFS_INVALCONFIG);
2266 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2268 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2269 * For a spare vdev, we only want to boot from the active
2274 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2277 return (EZFS_INVALCONFIG);
2280 if (vdev_online(nv)) {
2281 if ((ret = vdev_get_one_physpath(nv, physpath,
2282 phypath_size, rsz)) != 0)
2285 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2286 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2287 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2288 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2293 if (nvlist_lookup_nvlist_array(nv,
2294 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2295 return (EZFS_INVALCONFIG);
2297 for (i = 0; i < count; i++) {
2298 ret = vdev_get_physpaths(child[i], physpath,
2299 phypath_size, rsz, is_spare);
2300 if (ret == EZFS_NOSPC)
2305 return (EZFS_POOL_INVALARG);
2309 * Get phys_path for a root pool config.
2310 * Return 0 on success; non-zero on failure.
2313 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2316 nvlist_t *vdev_root;
2323 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2325 return (EZFS_INVALCONFIG);
2327 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2328 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2329 &child, &count) != 0)
2330 return (EZFS_INVALCONFIG);
2333 * root pool can only have a single top-level vdev.
2335 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2336 return (EZFS_POOL_INVALARG);
2338 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2341 /* No online devices */
2343 return (EZFS_NODEVICE);
2349 * Get phys_path for a root pool
2350 * Return 0 on success; non-zero on failure.
2353 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2355 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2360 * If the device has being dynamically expanded then we need to relabel
2361 * the disk to use the new unallocated space.
2364 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2367 char path[MAXPATHLEN];
2370 int (*_efi_use_whole_disk)(int);
2372 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2373 "efi_use_whole_disk")) == NULL)
2376 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name);
2378 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2379 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2380 "relabel '%s': unable to open device"), name);
2381 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2385 * It's possible that we might encounter an error if the device
2386 * does not have any unallocated space left. If so, we simply
2387 * ignore that error and continue on.
2389 error = _efi_use_whole_disk(fd);
2391 if (error && error != VT_ENOSPC) {
2392 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2393 "relabel '%s': unable to read disk capacity"), name);
2394 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2396 #endif /* illumos */
2401 * Bring the specified vdev online. The 'flags' parameter is a set of the
2402 * ZFS_ONLINE_* flags.
2405 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2406 vdev_state_t *newstate)
2408 zfs_cmd_t zc = { 0 };
2411 boolean_t avail_spare, l2cache, islog;
2412 libzfs_handle_t *hdl = zhp->zpool_hdl;
2414 if (flags & ZFS_ONLINE_EXPAND) {
2415 (void) snprintf(msg, sizeof (msg),
2416 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2418 (void) snprintf(msg, sizeof (msg),
2419 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2422 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2423 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2425 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2427 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2430 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2432 if (flags & ZFS_ONLINE_EXPAND ||
2433 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2434 char *pathname = NULL;
2435 uint64_t wholedisk = 0;
2437 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2439 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2443 * XXX - L2ARC 1.0 devices can't support expansion.
2446 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2447 "cannot expand cache devices"));
2448 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2452 pathname += strlen(ZFS_DISK_ROOT) + 1;
2453 (void) zpool_relabel_disk(hdl, pathname);
2457 zc.zc_cookie = VDEV_STATE_ONLINE;
2460 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2461 if (errno == EINVAL) {
2462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2463 "from this pool into a new one. Use '%s' "
2464 "instead"), "zpool detach");
2465 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2467 return (zpool_standard_error(hdl, errno, msg));
2470 *newstate = zc.zc_cookie;
2475 * Take the specified vdev offline
2478 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2480 zfs_cmd_t zc = { 0 };
2483 boolean_t avail_spare, l2cache;
2484 libzfs_handle_t *hdl = zhp->zpool_hdl;
2486 (void) snprintf(msg, sizeof (msg),
2487 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2489 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2490 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2492 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2494 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2497 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2499 zc.zc_cookie = VDEV_STATE_OFFLINE;
2500 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2502 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2509 * There are no other replicas of this device.
2511 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2515 * The log device has unplayed logs
2517 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2520 return (zpool_standard_error(hdl, errno, msg));
2525 * Mark the given vdev faulted.
2528 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2530 zfs_cmd_t zc = { 0 };
2532 libzfs_handle_t *hdl = zhp->zpool_hdl;
2534 (void) snprintf(msg, sizeof (msg),
2535 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2537 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2539 zc.zc_cookie = VDEV_STATE_FAULTED;
2542 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2549 * There are no other replicas of this device.
2551 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2554 return (zpool_standard_error(hdl, errno, msg));
2560 * Mark the given vdev degraded.
2563 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2565 zfs_cmd_t zc = { 0 };
2567 libzfs_handle_t *hdl = zhp->zpool_hdl;
2569 (void) snprintf(msg, sizeof (msg),
2570 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2572 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2574 zc.zc_cookie = VDEV_STATE_DEGRADED;
2577 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2580 return (zpool_standard_error(hdl, errno, msg));
2584 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2588 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2594 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2596 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2599 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2600 children == 2 && child[which] == tgt)
2603 for (c = 0; c < children; c++)
2604 if (is_replacing_spare(child[c], tgt, which))
2612 * Attach new_disk (fully described by nvroot) to old_disk.
2613 * If 'replacing' is specified, the new disk will replace the old one.
2616 zpool_vdev_attach(zpool_handle_t *zhp,
2617 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2619 zfs_cmd_t zc = { 0 };
2623 boolean_t avail_spare, l2cache, islog;
2628 nvlist_t *config_root;
2629 libzfs_handle_t *hdl = zhp->zpool_hdl;
2630 boolean_t rootpool = zpool_is_bootable(zhp);
2633 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2634 "cannot replace %s with %s"), old_disk, new_disk);
2636 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2637 "cannot attach %s to %s"), new_disk, old_disk);
2639 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2640 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2642 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2645 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2648 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2650 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2651 zc.zc_cookie = replacing;
2653 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2654 &child, &children) != 0 || children != 1) {
2655 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2656 "new device must be a single disk"));
2657 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2660 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2661 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2663 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2667 * If the target is a hot spare that has been swapped in, we can only
2668 * replace it with another hot spare.
2671 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2672 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2673 NULL) == NULL || !avail_spare) &&
2674 is_replacing_spare(config_root, tgt, 1)) {
2675 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2676 "can only be replaced by another hot spare"));
2678 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2683 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2686 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2688 zcmd_free_nvlists(&zc);
2693 * XXX need a better way to prevent user from
2694 * booting up a half-baked vdev.
2696 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2697 "sure to wait until resilver is done "
2698 "before rebooting.\n"));
2699 (void) fprintf(stderr, "\n");
2700 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If "
2701 "you boot from pool '%s', you may need to update\n"
2702 "boot code on newly attached disk '%s'.\n\n"
2703 "Assuming you use GPT partitioning and 'da0' is "
2704 "your new boot disk\n"
2705 "you may use the following command:\n\n"
2706 "\tgpart bootcode -b /boot/pmbr -p "
2707 "/boot/gptzfsboot -i 1 da0\n\n"),
2708 zhp->zpool_name, new_disk);
2716 * Can't attach to or replace this type of vdev.
2719 uint64_t version = zpool_get_prop_int(zhp,
2720 ZPOOL_PROP_VERSION, NULL);
2723 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2724 "cannot replace a log with a spare"));
2725 else if (version >= SPA_VERSION_MULTI_REPLACE)
2726 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2727 "already in replacing/spare config; wait "
2728 "for completion or use 'zpool detach'"));
2730 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2731 "cannot replace a replacing device"));
2733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2734 "can only attach to mirrors and top-level "
2737 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2742 * The new device must be a single disk.
2744 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2745 "new device must be a single disk"));
2746 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2750 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2752 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2757 * The new device is too small.
2759 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2760 "device is too small"));
2761 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2766 * The new device has a different alignment requirement.
2768 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2769 "devices have different sector alignment"));
2770 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2775 * The resulting top-level vdev spec won't fit in the label.
2777 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2781 (void) zpool_standard_error(hdl, errno, msg);
2788 * Detach the specified device.
2791 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2793 zfs_cmd_t zc = { 0 };
2796 boolean_t avail_spare, l2cache;
2797 libzfs_handle_t *hdl = zhp->zpool_hdl;
2799 (void) snprintf(msg, sizeof (msg),
2800 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2802 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2803 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2805 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2808 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2811 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2813 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2815 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2822 * Can't detach from this type of vdev.
2824 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2825 "applicable to mirror and replacing vdevs"));
2826 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2831 * There are no other replicas of this device.
2833 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2837 (void) zpool_standard_error(hdl, errno, msg);
2844 * Find a mirror vdev in the source nvlist.
2846 * The mchild array contains a list of disks in one of the top-level mirrors
2847 * of the source pool. The schild array contains a list of disks that the
2848 * user specified on the command line. We loop over the mchild array to
2849 * see if any entry in the schild array matches.
2851 * If a disk in the mchild array is found in the schild array, we return
2852 * the index of that entry. Otherwise we return -1.
2855 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2856 nvlist_t **schild, uint_t schildren)
2860 for (mc = 0; mc < mchildren; mc++) {
2862 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2863 mchild[mc], B_FALSE);
2865 for (sc = 0; sc < schildren; sc++) {
2866 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2867 schild[sc], B_FALSE);
2868 boolean_t result = (strcmp(mpath, spath) == 0);
2884 * Split a mirror pool. If newroot points to null, then a new nvlist
2885 * is generated and it is the responsibility of the caller to free it.
2888 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2889 nvlist_t *props, splitflags_t flags)
2891 zfs_cmd_t zc = { 0 };
2893 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2894 nvlist_t **varray = NULL, *zc_props = NULL;
2895 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2896 libzfs_handle_t *hdl = zhp->zpool_hdl;
2898 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2901 (void) snprintf(msg, sizeof (msg),
2902 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2904 if (!zpool_name_valid(hdl, B_FALSE, newname))
2905 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2907 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2908 (void) fprintf(stderr, gettext("Internal error: unable to "
2909 "retrieve pool configuration\n"));
2913 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2915 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2918 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2919 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2920 props, vers, flags, msg)) == NULL)
2924 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2926 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2927 "Source pool is missing vdev tree"));
2928 nvlist_free(zc_props);
2932 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2935 if (*newroot == NULL ||
2936 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2937 &newchild, &newchildren) != 0)
2940 for (c = 0; c < children; c++) {
2941 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2943 nvlist_t **mchild, *vdev;
2948 * Unlike cache & spares, slogs are stored in the
2949 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2951 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2953 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2955 if (is_log || is_hole) {
2957 * Create a hole vdev and put it in the config.
2959 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2961 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2962 VDEV_TYPE_HOLE) != 0)
2964 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2969 varray[vcount++] = vdev;
2973 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2975 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2976 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2977 "Source pool must be composed only of mirrors\n"));
2978 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2982 verify(nvlist_lookup_nvlist_array(child[c],
2983 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2985 /* find or add an entry for this top-level vdev */
2986 if (newchildren > 0 &&
2987 (entry = find_vdev_entry(zhp, mchild, mchildren,
2988 newchild, newchildren)) >= 0) {
2989 /* We found a disk that the user specified. */
2990 vdev = mchild[entry];
2993 /* User didn't specify a disk for this vdev. */
2994 vdev = mchild[mchildren - 1];
2997 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3001 /* did we find every disk the user specified? */
3002 if (found != newchildren) {
3003 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3004 "include at most one disk from each mirror"));
3005 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3009 /* Prepare the nvlist for populating. */
3010 if (*newroot == NULL) {
3011 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3014 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3015 VDEV_TYPE_ROOT) != 0)
3018 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3021 /* Add all the children we found */
3022 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3023 lastlog == 0 ? vcount : lastlog) != 0)
3027 * If we're just doing a dry run, exit now with success.
3030 memory_err = B_FALSE;
3035 /* now build up the config list & call the ioctl */
3036 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3039 if (nvlist_add_nvlist(newconfig,
3040 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3041 nvlist_add_string(newconfig,
3042 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3043 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3047 * The new pool is automatically part of the namespace unless we
3048 * explicitly export it.
3051 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3052 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3053 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3054 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3056 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3059 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3060 retval = zpool_standard_error(hdl, errno, msg);
3065 memory_err = B_FALSE;
3068 if (varray != NULL) {
3071 for (v = 0; v < vcount; v++)
3072 nvlist_free(varray[v]);
3075 zcmd_free_nvlists(&zc);
3076 nvlist_free(zc_props);
3077 nvlist_free(newconfig);
3079 nvlist_free(*newroot);
3087 return (no_memory(hdl));
3093 * Remove the given device. Currently, this is supported only for hot spares
3094 * and level 2 cache devices.
3097 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3099 zfs_cmd_t zc = { 0 };
3102 boolean_t avail_spare, l2cache, islog;
3103 libzfs_handle_t *hdl = zhp->zpool_hdl;
3106 (void) snprintf(msg, sizeof (msg),
3107 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3109 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3110 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3112 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3114 * XXX - this should just go away.
3116 if (!avail_spare && !l2cache && !islog) {
3117 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3118 "only inactive hot spares, cache, top-level, "
3119 "or log devices can be removed"));
3120 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3123 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3124 if (islog && version < SPA_VERSION_HOLES) {
3125 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3126 "pool must be upgrade to support log removal"));
3127 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3130 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3132 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3135 return (zpool_standard_error(hdl, errno, msg));
3139 * Clear the errors for the pool, or the particular device if specified.
3142 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3144 zfs_cmd_t zc = { 0 };
3147 zpool_rewind_policy_t policy;
3148 boolean_t avail_spare, l2cache;
3149 libzfs_handle_t *hdl = zhp->zpool_hdl;
3150 nvlist_t *nvi = NULL;
3154 (void) snprintf(msg, sizeof (msg),
3155 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3158 (void) snprintf(msg, sizeof (msg),
3159 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3162 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3164 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3165 &l2cache, NULL)) == 0)
3166 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3169 * Don't allow error clearing for hot spares. Do allow
3170 * error clearing for l2cache devices.
3173 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3175 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3179 zpool_get_rewind_policy(rewindnvl, &policy);
3180 zc.zc_cookie = policy.zrp_request;
3182 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3185 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3188 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3190 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3191 zcmd_free_nvlists(&zc);
3196 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3197 errno != EPERM && errno != EACCES)) {
3198 if (policy.zrp_request &
3199 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3200 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3201 zpool_rewind_exclaim(hdl, zc.zc_name,
3202 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3206 zcmd_free_nvlists(&zc);
3210 zcmd_free_nvlists(&zc);
3211 return (zpool_standard_error(hdl, errno, msg));
3215 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3218 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3220 zfs_cmd_t zc = { 0 };
3222 libzfs_handle_t *hdl = zhp->zpool_hdl;
3224 (void) snprintf(msg, sizeof (msg),
3225 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3228 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3230 zc.zc_cookie = ZPOOL_NO_REWIND;
3232 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3235 return (zpool_standard_error(hdl, errno, msg));
3239 * Change the GUID for a pool.
3242 zpool_reguid(zpool_handle_t *zhp)
3245 libzfs_handle_t *hdl = zhp->zpool_hdl;
3246 zfs_cmd_t zc = { 0 };
3248 (void) snprintf(msg, sizeof (msg),
3249 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3251 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3252 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3255 return (zpool_standard_error(hdl, errno, msg));
3262 zpool_reopen(zpool_handle_t *zhp)
3264 zfs_cmd_t zc = { 0 };
3266 libzfs_handle_t *hdl = zhp->zpool_hdl;
3268 (void) snprintf(msg, sizeof (msg),
3269 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3272 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3273 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3275 return (zpool_standard_error(hdl, errno, msg));
3279 * Convert from a devid string to a path.
3282 devid_to_path(char *devid_str)
3287 devid_nmlist_t *list = NULL;
3290 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3293 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3295 devid_str_free(minor);
3302 * In a case the strdup() fails, we will just return NULL below.
3304 path = strdup(list[0].devname);
3306 devid_free_nmlist(list);
3312 * Convert from a path to a devid string.
3315 path_to_devid(const char *path)
3322 if ((fd = open(path, O_RDONLY)) < 0)
3327 if (devid_get(fd, &devid) == 0) {
3328 if (devid_get_minor_name(fd, &minor) == 0)
3329 ret = devid_str_encode(devid, minor);
3331 devid_str_free(minor);
3343 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3344 * ignore any failure here, since a common case is for an unprivileged user to
3345 * type 'zpool status', and we'll display the correct information anyway.
3348 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3350 zfs_cmd_t zc = { 0 };
3352 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3353 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3354 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3357 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3361 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3362 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3363 * We also check if this is a whole disk, in which case we strip off the
3364 * trailing 's0' slice name.
3366 * This routine is also responsible for identifying when disks have been
3367 * reconfigured in a new location. The kernel will have opened the device by
3368 * devid, but the path will still refer to the old location. To catch this, we
3369 * first do a path -> devid translation (which is fast for the common case). If
3370 * the devid matches, we're done. If not, we do a reverse devid -> path
3371 * translation and issue the appropriate ioctl() to update the path of the vdev.
3372 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3376 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3387 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3388 (uint64_t **)&vs, &vsc) == 0;
3389 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0;
3392 * If the device is not currently present, assume it will not
3393 * come back at the same device path. Display the device by GUID.
3395 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3396 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) {
3397 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3399 (void) snprintf(buf, sizeof (buf), "%llu",
3400 (u_longlong_t)value);
3402 } else if (have_path) {
3405 * If the device is dead (faulted, offline, etc) then don't
3406 * bother opening it. Otherwise we may be forcing the user to
3407 * open a misbehaving device, which can have undesirable
3410 if ((have_stats == 0 ||
3411 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3413 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3415 * Determine if the current path is correct.
3417 char *newdevid = path_to_devid(path);
3419 if (newdevid == NULL ||
3420 strcmp(devid, newdevid) != 0) {
3423 if ((newpath = devid_to_path(devid)) != NULL) {
3425 * Update the path appropriately.
3427 set_path(zhp, nv, newpath);
3428 if (nvlist_add_string(nv,
3429 ZPOOL_CONFIG_PATH, newpath) == 0)
3430 verify(nvlist_lookup_string(nv,
3438 devid_str_free(newdevid);
3442 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0)
3443 path += strlen(ZFS_DISK_ROOTD);
3445 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3446 &value) == 0 && value) {
3447 int pathlen = strlen(path);
3448 char *tmp = zfs_strdup(hdl, path);
3451 * If it starts with c#, and ends with "s0", chop
3452 * the "s0" off, or if it ends with "s0/old", remove
3453 * the "s0" from the middle.
3455 if (CTD_CHECK(tmp)) {
3456 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3457 tmp[pathlen - 2] = '\0';
3458 } else if (pathlen > 6 &&
3459 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3460 (void) strcpy(&tmp[pathlen - 6],
3466 #else /* !illumos */
3467 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
3468 path += sizeof(_PATH_DEV) - 1;
3469 #endif /* illumos */
3471 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3474 * If it's a raidz device, we need to stick in the parity level.
3476 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3477 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3479 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3480 (u_longlong_t)value);
3485 * We identify each top-level vdev by using a <type-id>
3486 * naming convention.
3491 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3493 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3499 return (zfs_strdup(hdl, path));
3503 zbookmark_mem_compare(const void *a, const void *b)
3505 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3509 * Retrieve the persistent error log, uniquify the members, and return to the
3513 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3515 zfs_cmd_t zc = { 0 };
3517 zbookmark_phys_t *zb = NULL;
3521 * Retrieve the raw error list from the kernel. If the number of errors
3522 * has increased, allocate more space and continue until we get the
3525 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3529 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3530 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3532 zc.zc_nvlist_dst_size = count;
3533 (void) strcpy(zc.zc_name, zhp->zpool_name);
3535 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3537 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3538 if (errno == ENOMEM) {
3541 count = zc.zc_nvlist_dst_size;
3542 dst = zfs_alloc(zhp->zpool_hdl, count *
3543 sizeof (zbookmark_phys_t));
3546 zc.zc_nvlist_dst = (uintptr_t)dst;
3556 * Sort the resulting bookmarks. This is a little confusing due to the
3557 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3558 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3559 * _not_ copied as part of the process. So we point the start of our
3560 * array appropriate and decrement the total number of elements.
3562 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3563 zc.zc_nvlist_dst_size;
3564 count -= zc.zc_nvlist_dst_size;
3566 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3568 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3571 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3573 for (i = 0; i < count; i++) {
3576 /* ignoring zb_blkid and zb_level for now */
3577 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3578 zb[i-1].zb_object == zb[i].zb_object)
3581 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3583 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3584 zb[i].zb_objset) != 0) {
3588 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3589 zb[i].zb_object) != 0) {
3593 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3600 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3604 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3605 return (no_memory(zhp->zpool_hdl));
3609 * Upgrade a ZFS pool to the latest on-disk version.
3612 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3614 zfs_cmd_t zc = { 0 };
3615 libzfs_handle_t *hdl = zhp->zpool_hdl;
3617 (void) strcpy(zc.zc_name, zhp->zpool_name);
3618 zc.zc_cookie = new_version;
3620 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3621 return (zpool_standard_error_fmt(hdl, errno,
3622 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3628 zfs_save_arguments(int argc, char **argv, char *string, int len)
3630 (void) strlcpy(string, basename(argv[0]), len);
3631 for (int i = 1; i < argc; i++) {
3632 (void) strlcat(string, " ", len);
3633 (void) strlcat(string, argv[i], len);
3638 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3640 zfs_cmd_t zc = { 0 };
3644 args = fnvlist_alloc();
3645 fnvlist_add_string(args, "message", message);
3646 err = zcmd_write_src_nvlist(hdl, &zc, args);
3648 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3650 zcmd_free_nvlists(&zc);
3655 * Perform ioctl to get some command history of a pool.
3657 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3658 * logical offset of the history buffer to start reading from.
3660 * Upon return, 'off' is the next logical offset to read from and
3661 * 'len' is the actual amount of bytes read into 'buf'.
3664 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3666 zfs_cmd_t zc = { 0 };
3667 libzfs_handle_t *hdl = zhp->zpool_hdl;
3669 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3671 zc.zc_history = (uint64_t)(uintptr_t)buf;
3672 zc.zc_history_len = *len;
3673 zc.zc_history_offset = *off;
3675 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3678 return (zfs_error_fmt(hdl, EZFS_PERM,
3679 dgettext(TEXT_DOMAIN,
3680 "cannot show history for pool '%s'"),
3683 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3684 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3685 "'%s'"), zhp->zpool_name));
3687 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3688 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3689 "'%s', pool must be upgraded"), zhp->zpool_name));
3691 return (zpool_standard_error_fmt(hdl, errno,
3692 dgettext(TEXT_DOMAIN,
3693 "cannot get history for '%s'"), zhp->zpool_name));
3697 *len = zc.zc_history_len;
3698 *off = zc.zc_history_offset;
3704 * Process the buffer of nvlists, unpacking and storing each nvlist record
3705 * into 'records'. 'leftover' is set to the number of bytes that weren't
3706 * processed as there wasn't a complete record.
3709 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3710 nvlist_t ***records, uint_t *numrecords)
3716 while (bytes_read > sizeof (reclen)) {
3718 /* get length of packed record (stored as little endian) */
3719 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3720 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3722 if (bytes_read < sizeof (reclen) + reclen)
3726 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3728 bytes_read -= sizeof (reclen) + reclen;
3729 buf += sizeof (reclen) + reclen;
3731 /* add record to nvlist array */
3733 if (ISP2(*numrecords + 1)) {
3734 *records = realloc(*records,
3735 *numrecords * 2 * sizeof (nvlist_t *));
3737 (*records)[*numrecords - 1] = nv;
3740 *leftover = bytes_read;
3744 /* from spa_history.c: spa_history_create_obj() */
3745 #define HIS_BUF_LEN_DEF (128 << 10)
3746 #define HIS_BUF_LEN_MAX (1 << 30)
3749 * Retrieve the command history of a pool.
3752 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3755 uint64_t buflen = HIS_BUF_LEN_DEF;
3757 nvlist_t **records = NULL;
3758 uint_t numrecords = 0;
3761 buf = malloc(buflen);
3765 uint64_t bytes_read = buflen;
3768 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3771 /* if nothing else was read in, we're at EOF, just return */
3772 if (bytes_read == 0)
3775 if ((err = zpool_history_unpack(buf, bytes_read,
3776 &leftover, &records, &numrecords)) != 0)
3779 if (leftover == bytes_read) {
3781 * no progress made, because buffer is not big enough
3782 * to hold this record; resize and retry.
3787 if ((buflen >= HIS_BUF_LEN_MAX) ||
3788 ((buf = malloc(buflen)) == NULL)) {
3800 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3801 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3802 records, numrecords) == 0);
3804 for (i = 0; i < numrecords; i++)
3805 nvlist_free(records[i]);
3812 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3813 char *pathname, size_t len)
3815 zfs_cmd_t zc = { 0 };
3816 boolean_t mounted = B_FALSE;
3817 char *mntpnt = NULL;
3818 char dsname[ZFS_MAX_DATASET_NAME_LEN];
3821 /* special case for the MOS */
3822 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3826 /* get the dataset's name */
3827 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3829 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3830 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3831 /* just write out a path of two object numbers */
3832 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3836 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3838 /* find out if the dataset is mounted */
3839 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3841 /* get the corrupted object's path */
3842 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3844 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3847 (void) snprintf(pathname, len, "%s%s", mntpnt,
3850 (void) snprintf(pathname, len, "%s:%s",
3851 dsname, zc.zc_value);
3854 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3861 * Read the EFI label from the config, if a label does not exist then
3862 * pass back the error to the caller. If the caller has passed a non-NULL
3863 * diskaddr argument then we set it to the starting address of the EFI
3867 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3871 char diskname[MAXPATHLEN];
3874 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3877 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT,
3878 strrchr(path, '/'));
3879 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3880 struct dk_gpt *vtoc;
3882 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3884 *sb = vtoc->efi_parts[0].p_start;
3893 * determine where a partition starts on a disk in the current
3897 find_start_block(nvlist_t *config)
3901 diskaddr_t sb = MAXOFFSET_T;
3904 if (nvlist_lookup_nvlist_array(config,
3905 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3906 if (nvlist_lookup_uint64(config,
3907 ZPOOL_CONFIG_WHOLE_DISK,
3908 &wholedisk) != 0 || !wholedisk) {
3909 return (MAXOFFSET_T);
3911 if (read_efi_label(config, &sb) < 0)
3916 for (c = 0; c < children; c++) {
3917 sb = find_start_block(child[c]);
3918 if (sb != MAXOFFSET_T) {
3922 return (MAXOFFSET_T);
3924 #endif /* illumos */
3927 * Label an individual disk. The name provided is the short name,
3928 * stripped of any leading /dev path.
3931 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name)
3934 char path[MAXPATHLEN];
3935 struct dk_gpt *vtoc;
3937 size_t resv = EFI_MIN_RESV_SIZE;
3938 uint64_t slice_size;
3939 diskaddr_t start_block;
3942 /* prepare an error message just in case */
3943 (void) snprintf(errbuf, sizeof (errbuf),
3944 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3949 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3950 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3952 if (zhp->zpool_start_block == 0)
3953 start_block = find_start_block(nvroot);
3955 start_block = zhp->zpool_start_block;
3956 zhp->zpool_start_block = start_block;
3959 start_block = NEW_START_BLOCK;
3962 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name,
3965 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3967 * This shouldn't happen. We've long since verified that this
3968 * is a valid device.
3971 dgettext(TEXT_DOMAIN, "unable to open device"));
3972 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3975 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3977 * The only way this can fail is if we run out of memory, or we
3978 * were unable to read the disk's capacity
3980 if (errno == ENOMEM)
3981 (void) no_memory(hdl);
3984 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3985 "unable to read disk capacity"), name);
3987 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3990 slice_size = vtoc->efi_last_u_lba + 1;
3991 slice_size -= EFI_MIN_RESV_SIZE;
3992 if (start_block == MAXOFFSET_T)
3993 start_block = NEW_START_BLOCK;
3994 slice_size -= start_block;
3996 vtoc->efi_parts[0].p_start = start_block;
3997 vtoc->efi_parts[0].p_size = slice_size;
4000 * Why we use V_USR: V_BACKUP confuses users, and is considered
4001 * disposable by some EFI utilities (since EFI doesn't have a backup
4002 * slice). V_UNASSIGNED is supposed to be used only for zero size
4003 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4004 * etc. were all pretty specific. V_USR is as close to reality as we
4005 * can get, in the absence of V_OTHER.
4007 vtoc->efi_parts[0].p_tag = V_USR;
4008 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
4010 vtoc->efi_parts[8].p_start = slice_size + start_block;
4011 vtoc->efi_parts[8].p_size = resv;
4012 vtoc->efi_parts[8].p_tag = V_RESERVED;
4014 if (efi_write(fd, vtoc) != 0) {
4016 * Some block drivers (like pcata) may not support EFI
4017 * GPT labels. Print out a helpful error message dir-
4018 * ecting the user to manually label the disk and give
4024 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4025 "try using fdisk(1M) and then provide a specific slice"));
4026 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4031 #endif /* illumos */
4036 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
4042 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
4043 if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
4044 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
4045 strcmp(type, VDEV_TYPE_MISSING) == 0) {
4046 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4047 "vdev type '%s' is not supported"), type);
4048 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
4051 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
4052 &child, &children) == 0) {
4053 for (c = 0; c < children; c++) {
4054 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
4062 * Check if this zvol is allowable for use as a dump device; zero if
4063 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
4065 * Allowable storage configurations include mirrors, all raidz variants, and
4066 * pools with log, cache, and spare devices. Pools which are backed by files or
4067 * have missing/hole vdevs are not suitable.
4070 zvol_check_dump_config(char *arg)
4072 zpool_handle_t *zhp = NULL;
4073 nvlist_t *config, *nvroot;
4077 libzfs_handle_t *hdl;
4079 char poolname[ZFS_MAX_DATASET_NAME_LEN];
4080 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4083 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4087 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4088 "dump is not supported on device '%s'"), arg);
4090 if ((hdl = libzfs_init()) == NULL)
4092 libzfs_print_on_error(hdl, B_TRUE);
4094 volname = arg + pathlen;
4096 /* check the configuration of the pool */
4097 if ((p = strchr(volname, '/')) == NULL) {
4098 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4099 "malformed dataset name"));
4100 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4102 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) {
4103 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4104 "dataset name is too long"));
4105 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4108 (void) strncpy(poolname, volname, p - volname);
4109 poolname[p - volname] = '\0';
4112 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4113 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4114 "could not open pool '%s'"), poolname);
4115 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4118 config = zpool_get_config(zhp, NULL);
4119 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4121 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4122 "could not obtain vdev configuration for '%s'"), poolname);
4123 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4127 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4128 &top, &toplevels) == 0);
4130 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4143 zpool_nextboot(libzfs_handle_t *hdl, uint64_t pool_guid, uint64_t dev_guid,
4144 const char *command)
4146 zfs_cmd_t zc = { 0 };
4152 args = fnvlist_alloc();
4153 fnvlist_add_uint64(args, ZPOOL_CONFIG_POOL_GUID, pool_guid);
4154 fnvlist_add_uint64(args, ZPOOL_CONFIG_GUID, dev_guid);
4155 fnvlist_add_string(args, "command", command);
4156 error = zcmd_write_src_nvlist(hdl, &zc, args);
4158 error = ioctl(hdl->libzfs_fd, ZFS_IOC_NEXTBOOT, &zc);
4159 zcmd_free_nvlists(&zc);