4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright 2016 Nexenta Systems, Inc.
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
28 * Copyright (c) 2017 Datto Inc.
31 #include <sys/types.h>
43 #include <sys/zfs_ioctl.h>
46 #include "zfs_namecheck.h"
48 #include "libzfs_impl.h"
49 #include "zfs_comutil.h"
50 #include "zfeature_common.h"
52 static int read_efi_label(nvlist_t *, diskaddr_t *, boolean_t *);
53 static boolean_t zpool_vdev_is_interior(const char *name);
55 #define BACKUP_SLICE "s2"
57 typedef struct prop_flags {
58 int create:1; /* Validate property on creation */
59 int import:1; /* Validate property on import */
63 * ====================================================================
64 * zpool property functions
65 * ====================================================================
69 zpool_get_all_props(zpool_handle_t *zhp)
72 libzfs_handle_t *hdl = zhp->zpool_hdl;
74 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
76 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
79 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
80 if (errno == ENOMEM) {
81 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
82 zcmd_free_nvlists(&zc);
86 zcmd_free_nvlists(&zc);
91 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
92 zcmd_free_nvlists(&zc);
96 zcmd_free_nvlists(&zc);
102 zpool_props_refresh(zpool_handle_t *zhp)
106 old_props = zhp->zpool_props;
108 if (zpool_get_all_props(zhp) != 0)
111 nvlist_free(old_props);
116 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
122 zprop_source_t source;
124 nvl = zhp->zpool_props;
125 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
126 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
128 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
130 source = ZPROP_SRC_DEFAULT;
131 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
142 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
146 zprop_source_t source;
148 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
150 * zpool_get_all_props() has most likely failed because
151 * the pool is faulted, but if all we need is the top level
152 * vdev's guid then get it from the zhp config nvlist.
154 if ((prop == ZPOOL_PROP_GUID) &&
155 (nvlist_lookup_nvlist(zhp->zpool_config,
156 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
157 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
161 return (zpool_prop_default_numeric(prop));
164 nvl = zhp->zpool_props;
165 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
166 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
168 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
170 source = ZPROP_SRC_DEFAULT;
171 value = zpool_prop_default_numeric(prop);
181 * Map VDEV STATE to printed strings.
184 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
187 case VDEV_STATE_CLOSED:
188 case VDEV_STATE_OFFLINE:
189 return (gettext("OFFLINE"));
190 case VDEV_STATE_REMOVED:
191 return (gettext("REMOVED"));
192 case VDEV_STATE_CANT_OPEN:
193 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
194 return (gettext("FAULTED"));
195 else if (aux == VDEV_AUX_SPLIT_POOL)
196 return (gettext("SPLIT"));
198 return (gettext("UNAVAIL"));
199 case VDEV_STATE_FAULTED:
200 return (gettext("FAULTED"));
201 case VDEV_STATE_DEGRADED:
202 return (gettext("DEGRADED"));
203 case VDEV_STATE_HEALTHY:
204 return (gettext("ONLINE"));
210 return (gettext("UNKNOWN"));
214 * Map POOL STATE to printed strings.
217 zpool_pool_state_to_name(pool_state_t state)
220 case POOL_STATE_ACTIVE:
221 return (gettext("ACTIVE"));
222 case POOL_STATE_EXPORTED:
223 return (gettext("EXPORTED"));
224 case POOL_STATE_DESTROYED:
225 return (gettext("DESTROYED"));
226 case POOL_STATE_SPARE:
227 return (gettext("SPARE"));
228 case POOL_STATE_L2CACHE:
229 return (gettext("L2CACHE"));
230 case POOL_STATE_UNINITIALIZED:
231 return (gettext("UNINITIALIZED"));
232 case POOL_STATE_UNAVAIL:
233 return (gettext("UNAVAIL"));
234 case POOL_STATE_POTENTIALLY_ACTIVE:
235 return (gettext("POTENTIALLY_ACTIVE"));
238 return (gettext("UNKNOWN"));
242 * Get a zpool property value for 'prop' and return the value in
243 * a pre-allocated buffer.
246 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
247 zprop_source_t *srctype, boolean_t literal)
251 zprop_source_t src = ZPROP_SRC_NONE;
256 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
258 case ZPOOL_PROP_NAME:
259 (void) strlcpy(buf, zpool_get_name(zhp), len);
262 case ZPOOL_PROP_HEALTH:
264 zpool_pool_state_to_name(POOL_STATE_UNAVAIL), len);
267 case ZPOOL_PROP_GUID:
268 intval = zpool_get_prop_int(zhp, prop, &src);
269 (void) snprintf(buf, len, "%llu", intval);
272 case ZPOOL_PROP_ALTROOT:
273 case ZPOOL_PROP_CACHEFILE:
274 case ZPOOL_PROP_COMMENT:
275 if (zhp->zpool_props != NULL ||
276 zpool_get_all_props(zhp) == 0) {
278 zpool_get_prop_string(zhp, prop, &src),
284 (void) strlcpy(buf, "-", len);
293 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
294 prop != ZPOOL_PROP_NAME)
297 switch (zpool_prop_get_type(prop)) {
298 case PROP_TYPE_STRING:
299 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
303 case PROP_TYPE_NUMBER:
304 intval = zpool_get_prop_int(zhp, prop, &src);
307 case ZPOOL_PROP_SIZE:
308 case ZPOOL_PROP_ALLOCATED:
309 case ZPOOL_PROP_FREE:
310 case ZPOOL_PROP_FREEING:
311 case ZPOOL_PROP_LEAKED:
313 (void) snprintf(buf, len, "%llu",
314 (u_longlong_t)intval);
316 (void) zfs_nicenum(intval, buf, len);
319 case ZPOOL_PROP_BOOTSIZE:
320 case ZPOOL_PROP_EXPANDSZ:
321 case ZPOOL_PROP_CHECKPOINT:
323 (void) strlcpy(buf, "-", len);
324 } else if (literal) {
325 (void) snprintf(buf, len, "%llu",
326 (u_longlong_t)intval);
328 (void) zfs_nicenum(intval, buf, len);
331 case ZPOOL_PROP_CAPACITY:
333 (void) snprintf(buf, len, "%llu",
334 (u_longlong_t)intval);
336 (void) snprintf(buf, len, "%llu%%",
337 (u_longlong_t)intval);
340 case ZPOOL_PROP_FRAGMENTATION:
341 if (intval == UINT64_MAX) {
342 (void) strlcpy(buf, "-", len);
344 (void) snprintf(buf, len, "%llu%%",
345 (u_longlong_t)intval);
348 case ZPOOL_PROP_DEDUPRATIO:
349 (void) snprintf(buf, len, "%llu.%02llux",
350 (u_longlong_t)(intval / 100),
351 (u_longlong_t)(intval % 100));
353 case ZPOOL_PROP_HEALTH:
354 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
355 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
356 verify(nvlist_lookup_uint64_array(nvroot,
357 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
360 (void) strlcpy(buf, zpool_state_to_name(intval,
363 case ZPOOL_PROP_VERSION:
364 if (intval >= SPA_VERSION_FEATURES) {
365 (void) snprintf(buf, len, "-");
370 (void) snprintf(buf, len, "%llu", intval);
374 case PROP_TYPE_INDEX:
375 intval = zpool_get_prop_int(zhp, prop, &src);
376 if (zpool_prop_index_to_string(prop, intval, &strval)
379 (void) strlcpy(buf, strval, len);
393 * Check if the bootfs name has the same pool name as it is set to.
394 * Assuming bootfs is a valid dataset name.
397 bootfs_name_valid(const char *pool, char *bootfs)
399 int len = strlen(pool);
401 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
404 if (strncmp(pool, bootfs, len) == 0 &&
405 (bootfs[len] == '/' || bootfs[len] == '\0'))
412 zpool_is_bootable(zpool_handle_t *zhp)
414 char bootfs[ZFS_MAX_DATASET_NAME_LEN];
416 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
417 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
418 sizeof (bootfs)) != 0);
423 * Given an nvlist of zpool properties to be set, validate that they are
424 * correct, and parse any numeric properties (index, boolean, etc) if they are
425 * specified as strings.
428 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
429 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
437 struct stat64 statbuf;
440 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
441 (void) no_memory(hdl);
446 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
447 const char *propname = nvpair_name(elem);
449 prop = zpool_name_to_prop(propname);
450 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
452 char *fname = strchr(propname, '@') + 1;
454 err = zfeature_lookup_name(fname, NULL);
456 ASSERT3U(err, ==, ENOENT);
457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
458 "invalid feature '%s'"), fname);
459 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
463 if (nvpair_type(elem) != DATA_TYPE_STRING) {
464 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
465 "'%s' must be a string"), propname);
466 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
470 (void) nvpair_value_string(elem, &strval);
471 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
472 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
473 "property '%s' can only be set to "
474 "'enabled'"), propname);
475 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
479 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
480 (void) no_memory(hdl);
487 * Make sure this property is valid and applies to this type.
489 if (prop == ZPOOL_PROP_INVAL) {
490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
491 "invalid property '%s'"), propname);
492 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
496 if (zpool_prop_readonly(prop)) {
497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
498 "is readonly"), propname);
499 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
503 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
504 &strval, &intval, errbuf) != 0)
508 * Perform additional checking for specific properties.
511 case ZPOOL_PROP_VERSION:
512 if (intval < version ||
513 !SPA_VERSION_IS_SUPPORTED(intval)) {
514 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
515 "property '%s' number %d is invalid."),
517 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
522 case ZPOOL_PROP_BOOTSIZE:
524 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
525 "property '%s' can only be set during pool "
526 "creation"), propname);
527 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
532 case ZPOOL_PROP_BOOTFS:
533 if (flags.create || flags.import) {
534 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
535 "property '%s' cannot be set at creation "
536 "or import time"), propname);
537 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
541 if (version < SPA_VERSION_BOOTFS) {
542 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
543 "pool must be upgraded to support "
544 "'%s' property"), propname);
545 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
550 * bootfs property value has to be a dataset name and
551 * the dataset has to be in the same pool as it sets to.
553 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
555 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
556 "is an invalid name"), strval);
557 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
561 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
562 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
563 "could not open pool '%s'"), poolname);
564 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
570 case ZPOOL_PROP_ALTROOT:
571 if (!flags.create && !flags.import) {
572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
573 "property '%s' can only be set during pool "
574 "creation or import"), propname);
575 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
579 if (strval[0] != '/') {
580 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
581 "bad alternate root '%s'"), strval);
582 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
587 case ZPOOL_PROP_CACHEFILE:
588 if (strval[0] == '\0')
591 if (strcmp(strval, "none") == 0)
594 if (strval[0] != '/') {
595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
596 "property '%s' must be empty, an "
597 "absolute path, or 'none'"), propname);
598 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
602 slash = strrchr(strval, '/');
604 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
605 strcmp(slash, "/..") == 0) {
606 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
607 "'%s' is not a valid file"), strval);
608 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
614 if (strval[0] != '\0' &&
615 (stat64(strval, &statbuf) != 0 ||
616 !S_ISDIR(statbuf.st_mode))) {
617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
618 "'%s' is not a valid directory"),
620 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
627 case ZPOOL_PROP_COMMENT:
628 for (check = strval; *check != '\0'; check++) {
629 if (!isprint(*check)) {
631 dgettext(TEXT_DOMAIN,
632 "comment may only have printable "
634 (void) zfs_error(hdl, EZFS_BADPROP,
639 if (strlen(strval) > ZPROP_MAX_COMMENT) {
640 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
641 "comment must not exceed %d characters"),
643 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
647 case ZPOOL_PROP_READONLY:
649 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
650 "property '%s' can only be set at "
651 "import time"), propname);
652 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
659 "property '%s'(%d) not defined"), propname, prop);
666 nvlist_free(retprops);
671 * Set zpool property : propname=propval.
674 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
676 zfs_cmd_t zc = { 0 };
679 nvlist_t *nvl = NULL;
682 prop_flags_t flags = { 0 };
684 (void) snprintf(errbuf, sizeof (errbuf),
685 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
688 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
689 return (no_memory(zhp->zpool_hdl));
691 if (nvlist_add_string(nvl, propname, propval) != 0) {
693 return (no_memory(zhp->zpool_hdl));
696 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
697 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
698 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
707 * Execute the corresponding ioctl() to set this property.
709 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
711 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
716 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
718 zcmd_free_nvlists(&zc);
722 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
724 (void) zpool_props_refresh(zhp);
730 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
732 libzfs_handle_t *hdl = zhp->zpool_hdl;
734 char buf[ZFS_MAXPROPLEN];
735 nvlist_t *features = NULL;
737 boolean_t firstexpand = (NULL == *plp);
739 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
743 while (*last != NULL)
744 last = &(*last)->pl_next;
747 features = zpool_get_features(zhp);
749 if ((*plp)->pl_all && firstexpand) {
750 for (int i = 0; i < SPA_FEATURES; i++) {
751 zprop_list_t *entry = zfs_alloc(hdl,
752 sizeof (zprop_list_t));
753 entry->pl_prop = ZPROP_INVAL;
754 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
755 spa_feature_table[i].fi_uname);
756 entry->pl_width = strlen(entry->pl_user_prop);
757 entry->pl_all = B_TRUE;
760 last = &entry->pl_next;
764 /* add any unsupported features */
765 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
766 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
771 if (zfeature_is_supported(nvpair_name(nvp)))
774 propname = zfs_asprintf(hdl, "unsupported@%s",
778 * Before adding the property to the list make sure that no
779 * other pool already added the same property.
783 while (entry != NULL) {
784 if (entry->pl_user_prop != NULL &&
785 strcmp(propname, entry->pl_user_prop) == 0) {
789 entry = entry->pl_next;
796 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
797 entry->pl_prop = ZPROP_INVAL;
798 entry->pl_user_prop = propname;
799 entry->pl_width = strlen(entry->pl_user_prop);
800 entry->pl_all = B_TRUE;
803 last = &entry->pl_next;
806 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
811 if (entry->pl_prop != ZPROP_INVAL &&
812 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
813 NULL, B_FALSE) == 0) {
814 if (strlen(buf) > entry->pl_width)
815 entry->pl_width = strlen(buf);
823 * Get the state for the given feature on the given ZFS pool.
826 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
830 boolean_t found = B_FALSE;
831 nvlist_t *features = zpool_get_features(zhp);
833 const char *feature = strchr(propname, '@') + 1;
835 supported = zpool_prop_feature(propname);
836 ASSERT(supported || zpool_prop_unsupported(propname));
839 * Convert from feature name to feature guid. This conversion is
840 * unecessary for unsupported@... properties because they already
847 ret = zfeature_lookup_name(feature, &fid);
849 (void) strlcpy(buf, "-", len);
852 feature = spa_feature_table[fid].fi_guid;
855 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
860 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
863 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
865 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
870 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
872 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
875 (void) strlcpy(buf, "-", len);
884 * Don't start the slice at the default block of 34; many storage
885 * devices will use a stripe width of 128k, so start there instead.
887 #define NEW_START_BLOCK 256
890 * Validate the given pool name, optionally putting an extended error message in
894 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
900 ret = pool_namecheck(pool, &why, &what);
903 * The rules for reserved pool names were extended at a later point.
904 * But we need to support users with existing pools that may now be
905 * invalid. So we only check for this expanded set of names during a
906 * create (or import), and only in userland.
908 if (ret == 0 && !isopen &&
909 (strncmp(pool, "mirror", 6) == 0 ||
910 strncmp(pool, "raidz", 5) == 0 ||
911 strncmp(pool, "spare", 5) == 0 ||
912 strcmp(pool, "log") == 0)) {
915 dgettext(TEXT_DOMAIN, "name is reserved"));
923 case NAME_ERR_TOOLONG:
925 dgettext(TEXT_DOMAIN, "name is too long"));
928 case NAME_ERR_INVALCHAR:
930 dgettext(TEXT_DOMAIN, "invalid character "
931 "'%c' in pool name"), what);
934 case NAME_ERR_NOLETTER:
935 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
936 "name must begin with a letter"));
939 case NAME_ERR_RESERVED:
940 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
941 "name is reserved"));
944 case NAME_ERR_DISKLIKE:
945 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
946 "pool name is reserved"));
949 case NAME_ERR_LEADING_SLASH:
950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
951 "leading slash in name"));
954 case NAME_ERR_EMPTY_COMPONENT:
955 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
956 "empty component in name"));
959 case NAME_ERR_TRAILING_SLASH:
960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
961 "trailing slash in name"));
964 case NAME_ERR_MULTIPLE_DELIMITERS:
965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
966 "multiple '@' and/or '#' delimiters in "
971 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
972 "(%d) not defined"), why);
983 * Open a handle to the given pool, even if the pool is currently in the FAULTED
987 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
993 * Make sure the pool name is valid.
995 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
996 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
997 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1002 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1005 zhp->zpool_hdl = hdl;
1006 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1008 if (zpool_refresh_stats(zhp, &missing) != 0) {
1014 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1015 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1016 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1025 * Like the above, but silent on error. Used when iterating over pools (because
1026 * the configuration cache may be out of date).
1029 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1031 zpool_handle_t *zhp;
1034 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1037 zhp->zpool_hdl = hdl;
1038 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1040 if (zpool_refresh_stats(zhp, &missing) != 0) {
1056 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1060 zpool_open(libzfs_handle_t *hdl, const char *pool)
1062 zpool_handle_t *zhp;
1064 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1067 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1068 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1069 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1078 * Close the handle. Simply frees the memory associated with the handle.
1081 zpool_close(zpool_handle_t *zhp)
1083 nvlist_free(zhp->zpool_config);
1084 nvlist_free(zhp->zpool_old_config);
1085 nvlist_free(zhp->zpool_props);
1090 * Return the name of the pool.
1093 zpool_get_name(zpool_handle_t *zhp)
1095 return (zhp->zpool_name);
1100 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1103 zpool_get_state(zpool_handle_t *zhp)
1105 return (zhp->zpool_state);
1109 * Create the named pool, using the provided vdev list. It is assumed
1110 * that the consumer has already validated the contents of the nvlist, so we
1111 * don't have to worry about error semantics.
1114 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1115 nvlist_t *props, nvlist_t *fsprops)
1117 zfs_cmd_t zc = { 0 };
1118 nvlist_t *zc_fsprops = NULL;
1119 nvlist_t *zc_props = NULL;
1123 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1124 "cannot create '%s'"), pool);
1126 if (!zpool_name_valid(hdl, B_FALSE, pool))
1127 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1129 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1133 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1135 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1136 SPA_VERSION_1, flags, msg)) == NULL) {
1145 zoned = ((nvlist_lookup_string(fsprops,
1146 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1147 strcmp(zonestr, "on") == 0);
1149 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1150 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1154 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1157 if (nvlist_add_nvlist(zc_props,
1158 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1163 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1166 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1168 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1170 zcmd_free_nvlists(&zc);
1171 nvlist_free(zc_props);
1172 nvlist_free(zc_fsprops);
1177 * This can happen if the user has specified the same
1178 * device multiple times. We can't reliably detect this
1179 * until we try to add it and see we already have a
1182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1183 "one or more vdevs refer to the same device"));
1184 return (zfs_error(hdl, EZFS_BADDEV, msg));
1188 * This happens if the record size is smaller or larger
1189 * than the allowed size range, or not a power of 2.
1191 * NOTE: although zfs_valid_proplist is called earlier,
1192 * this case may have slipped through since the
1193 * pool does not exist yet and it is therefore
1194 * impossible to read properties e.g. max blocksize
1197 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1198 "record size invalid"));
1199 return (zfs_error(hdl, EZFS_BADPROP, msg));
1203 * This occurs when one of the devices is below
1204 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1205 * device was the problem device since there's no
1206 * reliable way to determine device size from userland.
1211 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1213 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1214 "one or more devices is less than the "
1215 "minimum size (%s)"), buf);
1217 return (zfs_error(hdl, EZFS_BADDEV, msg));
1220 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1221 "one or more devices is out of space"));
1222 return (zfs_error(hdl, EZFS_BADDEV, msg));
1225 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1226 "cache device must be a disk or disk slice"));
1227 return (zfs_error(hdl, EZFS_BADDEV, msg));
1230 return (zpool_standard_error(hdl, errno, msg));
1235 zcmd_free_nvlists(&zc);
1236 nvlist_free(zc_props);
1237 nvlist_free(zc_fsprops);
1242 * Destroy the given pool. It is up to the caller to ensure that there are no
1243 * datasets left in the pool.
1246 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1248 zfs_cmd_t zc = { 0 };
1249 zfs_handle_t *zfp = NULL;
1250 libzfs_handle_t *hdl = zhp->zpool_hdl;
1253 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1254 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1257 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1258 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1260 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1261 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1262 "cannot destroy '%s'"), zhp->zpool_name);
1264 if (errno == EROFS) {
1265 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1266 "one or more devices is read only"));
1267 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1269 (void) zpool_standard_error(hdl, errno, msg);
1278 remove_mountpoint(zfp);
1286 * Create a checkpoint in the given pool.
1289 zpool_checkpoint(zpool_handle_t *zhp)
1291 libzfs_handle_t *hdl = zhp->zpool_hdl;
1295 error = lzc_pool_checkpoint(zhp->zpool_name);
1297 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1298 "cannot checkpoint '%s'"), zhp->zpool_name);
1299 (void) zpool_standard_error(hdl, error, msg);
1307 * Discard the checkpoint from the given pool.
1310 zpool_discard_checkpoint(zpool_handle_t *zhp)
1312 libzfs_handle_t *hdl = zhp->zpool_hdl;
1316 error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1318 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1319 "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1320 (void) zpool_standard_error(hdl, error, msg);
1328 * Add the given vdevs to the pool. The caller must have already performed the
1329 * necessary verification to ensure that the vdev specification is well-formed.
1332 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1334 zfs_cmd_t zc = { 0 };
1336 libzfs_handle_t *hdl = zhp->zpool_hdl;
1338 nvlist_t **spares, **l2cache;
1339 uint_t nspares, nl2cache;
1341 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1342 "cannot add to '%s'"), zhp->zpool_name);
1344 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1345 SPA_VERSION_SPARES &&
1346 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1347 &spares, &nspares) == 0) {
1348 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1349 "upgraded to add hot spares"));
1350 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1353 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1354 SPA_VERSION_L2CACHE &&
1355 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1356 &l2cache, &nl2cache) == 0) {
1357 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1358 "upgraded to add cache devices"));
1359 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1362 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1364 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1366 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1370 * This can happen if the user has specified the same
1371 * device multiple times. We can't reliably detect this
1372 * until we try to add it and see we already have a
1375 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1376 "one or more vdevs refer to the same device"));
1377 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1381 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1382 "invalid config; a pool with removing/removed "
1383 "vdevs does not support adding raidz vdevs"));
1384 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1389 * This occurrs when one of the devices is below
1390 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1391 * device was the problem device since there's no
1392 * reliable way to determine device size from userland.
1397 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1399 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1400 "device is less than the minimum "
1403 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1407 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1408 "pool must be upgraded to add these vdevs"));
1409 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1413 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1414 "root pool can not have multiple vdevs"
1415 " or separate logs"));
1416 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1420 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1421 "cache device must be a disk or disk slice"));
1422 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1426 (void) zpool_standard_error(hdl, errno, msg);
1434 zcmd_free_nvlists(&zc);
1440 * Exports the pool from the system. The caller must ensure that there are no
1441 * mounted datasets in the pool.
1444 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1445 const char *log_str)
1447 zfs_cmd_t zc = { 0 };
1450 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1451 "cannot export '%s'"), zhp->zpool_name);
1453 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1454 zc.zc_cookie = force;
1455 zc.zc_guid = hardforce;
1456 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1458 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1461 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1462 "use '-f' to override the following errors:\n"
1463 "'%s' has an active shared spare which could be"
1464 " used by other pools once '%s' is exported."),
1465 zhp->zpool_name, zhp->zpool_name);
1466 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1469 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1478 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1480 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1484 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1486 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1490 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1493 nvlist_t *nv = NULL;
1499 if (!hdl->libzfs_printerr || config == NULL)
1502 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1503 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1507 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1509 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1511 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1512 strftime(timestr, 128, 0, &t) != 0) {
1514 (void) printf(dgettext(TEXT_DOMAIN,
1515 "Would be able to return %s "
1516 "to its state as of %s.\n"),
1519 (void) printf(dgettext(TEXT_DOMAIN,
1520 "Pool %s returned to its state as of %s.\n"),
1524 (void) printf(dgettext(TEXT_DOMAIN,
1525 "%s approximately %lld "),
1526 dryrun ? "Would discard" : "Discarded",
1528 (void) printf(dgettext(TEXT_DOMAIN,
1529 "minutes of transactions.\n"));
1530 } else if (loss > 0) {
1531 (void) printf(dgettext(TEXT_DOMAIN,
1532 "%s approximately %lld "),
1533 dryrun ? "Would discard" : "Discarded", loss);
1534 (void) printf(dgettext(TEXT_DOMAIN,
1535 "seconds of transactions.\n"));
1541 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1544 nvlist_t *nv = NULL;
1546 uint64_t edata = UINT64_MAX;
1551 if (!hdl->libzfs_printerr)
1555 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1557 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1559 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1560 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1561 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1562 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1565 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1566 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1569 (void) printf(dgettext(TEXT_DOMAIN,
1570 "Recovery is possible, but will result in some data loss.\n"));
1572 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1573 strftime(timestr, 128, 0, &t) != 0) {
1574 (void) printf(dgettext(TEXT_DOMAIN,
1575 "\tReturning the pool to its state as of %s\n"
1576 "\tshould correct the problem. "),
1579 (void) printf(dgettext(TEXT_DOMAIN,
1580 "\tReverting the pool to an earlier state "
1581 "should correct the problem.\n\t"));
1585 (void) printf(dgettext(TEXT_DOMAIN,
1586 "Approximately %lld minutes of data\n"
1587 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1588 } else if (loss > 0) {
1589 (void) printf(dgettext(TEXT_DOMAIN,
1590 "Approximately %lld seconds of data\n"
1591 "\tmust be discarded, irreversibly. "), loss);
1593 if (edata != 0 && edata != UINT64_MAX) {
1595 (void) printf(dgettext(TEXT_DOMAIN,
1596 "After rewind, at least\n"
1597 "\tone persistent user-data error will remain. "));
1599 (void) printf(dgettext(TEXT_DOMAIN,
1600 "After rewind, several\n"
1601 "\tpersistent user-data errors will remain. "));
1604 (void) printf(dgettext(TEXT_DOMAIN,
1605 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1606 reason >= 0 ? "clear" : "import", name);
1608 (void) printf(dgettext(TEXT_DOMAIN,
1609 "A scrub of the pool\n"
1610 "\tis strongly recommended after recovery.\n"));
1614 (void) printf(dgettext(TEXT_DOMAIN,
1615 "Destroy and re-create the pool from\n\ta backup source.\n"));
1619 * zpool_import() is a contracted interface. Should be kept the same
1622 * Applications should use zpool_import_props() to import a pool with
1623 * new properties value to be set.
1626 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1629 nvlist_t *props = NULL;
1632 if (altroot != NULL) {
1633 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1634 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1635 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1639 if (nvlist_add_string(props,
1640 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1641 nvlist_add_string(props,
1642 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1644 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1645 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1650 ret = zpool_import_props(hdl, config, newname, props,
1657 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1663 uint64_t is_log = 0;
1665 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1669 (void) printf("\t%*s%s%s\n", indent, "", name,
1670 is_log ? " [log]" : "");
1672 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1673 &child, &children) != 0)
1676 for (c = 0; c < children; c++) {
1677 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1678 print_vdev_tree(hdl, vname, child[c], indent + 2);
1684 zpool_print_unsup_feat(nvlist_t *config)
1686 nvlist_t *nvinfo, *unsup_feat;
1688 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1690 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1693 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1694 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1697 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1698 verify(nvpair_value_string(nvp, &desc) == 0);
1700 if (strlen(desc) > 0)
1701 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1703 (void) printf("\t%s\n", nvpair_name(nvp));
1708 * Import the given pool using the known configuration and a list of
1709 * properties to be set. The configuration should have come from
1710 * zpool_find_import(). The 'newname' parameters control whether the pool
1711 * is imported with a different name.
1714 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1715 nvlist_t *props, int flags)
1717 zfs_cmd_t zc = { 0 };
1718 zpool_rewind_policy_t policy;
1719 nvlist_t *nv = NULL;
1720 nvlist_t *nvinfo = NULL;
1721 nvlist_t *missing = NULL;
1728 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1731 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1732 "cannot import pool '%s'"), origname);
1734 if (newname != NULL) {
1735 if (!zpool_name_valid(hdl, B_FALSE, newname))
1736 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1737 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1739 thename = (char *)newname;
1744 if (props != NULL) {
1746 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1748 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1751 if ((props = zpool_valid_proplist(hdl, origname,
1752 props, version, flags, errbuf)) == NULL)
1754 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1761 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1763 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1766 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1767 zcmd_free_nvlists(&zc);
1770 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1771 zcmd_free_nvlists(&zc);
1775 zc.zc_cookie = flags;
1776 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1778 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1779 zcmd_free_nvlists(&zc);
1786 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1788 zcmd_free_nvlists(&zc);
1790 zpool_get_rewind_policy(config, &policy);
1796 * Dry-run failed, but we print out what success
1797 * looks like if we found a best txg
1799 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1800 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1806 if (newname == NULL)
1807 (void) snprintf(desc, sizeof (desc),
1808 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1811 (void) snprintf(desc, sizeof (desc),
1812 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1817 if (nv != NULL && nvlist_lookup_nvlist(nv,
1818 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1819 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1820 (void) printf(dgettext(TEXT_DOMAIN, "This "
1821 "pool uses the following feature(s) not "
1822 "supported by this system:\n"));
1823 zpool_print_unsup_feat(nv);
1824 if (nvlist_exists(nvinfo,
1825 ZPOOL_CONFIG_CAN_RDONLY)) {
1826 (void) printf(dgettext(TEXT_DOMAIN,
1827 "All unsupported features are only "
1828 "required for writing to the pool."
1829 "\nThe pool can be imported using "
1830 "'-o readonly=on'.\n"));
1834 * Unsupported version.
1836 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1840 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1844 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1845 "one or more devices is read only"));
1846 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1850 if (nv && nvlist_lookup_nvlist(nv,
1851 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1852 nvlist_lookup_nvlist(nvinfo,
1853 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1854 (void) printf(dgettext(TEXT_DOMAIN,
1855 "The devices below are missing or "
1856 "corrupted, use '-m' to import the pool "
1858 print_vdev_tree(hdl, NULL, missing, 2);
1859 (void) printf("\n");
1861 (void) zpool_standard_error(hdl, error, desc);
1865 (void) zpool_standard_error(hdl, error, desc);
1868 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1869 "new name of at least one dataset is longer than "
1870 "the maximum allowable length"));
1871 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
1874 (void) zpool_standard_error(hdl, error, desc);
1875 zpool_explain_recover(hdl,
1876 newname ? origname : thename, -error, nv);
1883 zpool_handle_t *zhp;
1886 * This should never fail, but play it safe anyway.
1888 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1890 else if (zhp != NULL)
1892 if (policy.zrp_request &
1893 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1894 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1895 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1908 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
1910 zfs_cmd_t zc = { 0 };
1913 libzfs_handle_t *hdl = zhp->zpool_hdl;
1915 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1916 zc.zc_cookie = func;
1919 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
1924 /* ECANCELED on a scrub means we resumed a paused scrub */
1925 if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
1926 cmd == POOL_SCRUB_NORMAL)
1929 if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
1932 if (func == POOL_SCAN_SCRUB) {
1933 if (cmd == POOL_SCRUB_PAUSE) {
1934 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1935 "cannot pause scrubbing %s"), zc.zc_name);
1937 assert(cmd == POOL_SCRUB_NORMAL);
1938 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1939 "cannot scrub %s"), zc.zc_name);
1941 } else if (func == POOL_SCAN_NONE) {
1942 (void) snprintf(msg, sizeof (msg),
1943 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1946 assert(!"unexpected result");
1951 pool_scan_stat_t *ps = NULL;
1954 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1955 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1956 (void) nvlist_lookup_uint64_array(nvroot,
1957 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1958 if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
1959 if (cmd == POOL_SCRUB_PAUSE)
1960 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
1962 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1964 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1966 } else if (err == ENOENT) {
1967 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1969 return (zpool_standard_error(hdl, err, msg));
1975 * This provides a very minimal check whether a given string is likely a
1976 * c#t#d# style string. Users of this are expected to do their own
1977 * verification of the s# part.
1979 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1982 * More elaborate version for ones which may start with "/dev/dsk/"
1986 ctd_check_path(char *str)
1989 * If it starts with a slash, check the last component.
1991 if (str && str[0] == '/') {
1992 char *tmp = strrchr(str, '/');
1995 * If it ends in "/old", check the second-to-last
1996 * component of the string instead.
1998 if (tmp != str && strcmp(tmp, "/old") == 0) {
1999 for (tmp--; *tmp != '/'; tmp--)
2004 return (CTD_CHECK(str));
2009 * Find a vdev that matches the search criteria specified. We use the
2010 * the nvpair name to determine how we should look for the device.
2011 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2012 * spare; but FALSE if its an INUSE spare.
2015 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2016 boolean_t *l2cache, boolean_t *log)
2023 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2025 /* Nothing to look for */
2026 if (search == NULL || pair == NULL)
2029 /* Obtain the key we will use to search */
2030 srchkey = nvpair_name(pair);
2032 switch (nvpair_type(pair)) {
2033 case DATA_TYPE_UINT64:
2034 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2035 uint64_t srchval, theguid;
2037 verify(nvpair_value_uint64(pair, &srchval) == 0);
2038 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2040 if (theguid == srchval)
2045 case DATA_TYPE_STRING: {
2046 char *srchval, *val;
2048 verify(nvpair_value_string(pair, &srchval) == 0);
2049 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2053 * Search for the requested value. Special cases:
2055 * - ZPOOL_CONFIG_PATH for whole disk entries. To support
2056 * UEFI boot, these end in "s0" or "s0/old" or "s1" or
2057 * "s1/old". The "s0" or "s1" part is hidden from the user,
2058 * but included in the string, so this matches around it.
2059 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2061 * Otherwise, all other searches are simple string compares.
2064 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
2065 ctd_check_path(val)) {
2066 uint64_t wholedisk = 0;
2068 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2071 int slen = strlen(srchval);
2072 int vlen = strlen(val);
2074 if (slen != vlen - 2)
2078 * make_leaf_vdev() should only set
2079 * wholedisk for ZPOOL_CONFIG_PATHs which
2080 * will include "/dev/dsk/", giving plenty of
2081 * room for the indices used next.
2086 * strings identical except trailing "s0"
2088 if ((strcmp(&val[vlen - 2], "s0") == 0 ||
2089 strcmp(&val[vlen - 2], "s1") == 0) &&
2090 strncmp(srchval, val, slen) == 0)
2094 * strings identical except trailing "s0/old"
2096 if ((strcmp(&val[vlen - 6], "s0/old") == 0 ||
2097 strcmp(&val[vlen - 6], "s1/old") == 0) &&
2098 strcmp(&srchval[slen - 4], "/old") == 0 &&
2099 strncmp(srchval, val, slen - 4) == 0)
2104 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2106 if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2108 char *type, *idx, *end, *p;
2109 uint64_t id, vdev_id;
2112 * Determine our vdev type, keeping in mind
2113 * that the srchval is composed of a type and
2114 * vdev id pair (i.e. mirror-4).
2116 if ((type = strdup(srchval)) == NULL)
2119 if ((p = strrchr(type, '-')) == NULL) {
2127 * If the types don't match then keep looking.
2129 if (strncmp(val, type, strlen(val)) != 0) {
2134 verify(zpool_vdev_is_interior(type));
2135 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2139 vdev_id = strtoull(idx, &end, 10);
2146 * Now verify that we have the correct vdev id.
2155 if (strcmp(srchval, val) == 0)
2164 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2165 &child, &children) != 0)
2168 for (c = 0; c < children; c++) {
2169 if ((ret = vdev_to_nvlist_iter(child[c], search,
2170 avail_spare, l2cache, NULL)) != NULL) {
2172 * The 'is_log' value is only set for the toplevel
2173 * vdev, not the leaf vdevs. So we always lookup the
2174 * log device from the root of the vdev tree (where
2175 * 'log' is non-NULL).
2178 nvlist_lookup_uint64(child[c],
2179 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2187 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2188 &child, &children) == 0) {
2189 for (c = 0; c < children; c++) {
2190 if ((ret = vdev_to_nvlist_iter(child[c], search,
2191 avail_spare, l2cache, NULL)) != NULL) {
2192 *avail_spare = B_TRUE;
2198 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2199 &child, &children) == 0) {
2200 for (c = 0; c < children; c++) {
2201 if ((ret = vdev_to_nvlist_iter(child[c], search,
2202 avail_spare, l2cache, NULL)) != NULL) {
2213 * Given a physical path (minus the "/devices" prefix), find the
2217 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2218 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2220 nvlist_t *search, *nvroot, *ret;
2222 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2223 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2225 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2228 *avail_spare = B_FALSE;
2232 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2233 nvlist_free(search);
2239 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2242 zpool_vdev_is_interior(const char *name)
2244 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2245 strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2247 VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2248 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2254 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2255 boolean_t *l2cache, boolean_t *log)
2257 char buf[MAXPATHLEN];
2259 nvlist_t *nvroot, *search, *ret;
2262 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2264 guid = strtoull(path, &end, 10);
2265 if (guid != 0 && *end == '\0') {
2266 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2267 } else if (zpool_vdev_is_interior(path)) {
2268 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2269 } else if (path[0] != '/') {
2270 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
2271 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2273 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2276 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2279 *avail_spare = B_FALSE;
2283 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2284 nvlist_free(search);
2290 vdev_online(nvlist_t *nv)
2294 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2295 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2296 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2303 * Helper function for zpool_get_physpaths().
2306 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2307 size_t *bytes_written)
2309 size_t bytes_left, pos, rsz;
2313 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2315 return (EZFS_NODEVICE);
2317 pos = *bytes_written;
2318 bytes_left = physpath_size - pos;
2319 format = (pos == 0) ? "%s" : " %s";
2321 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2322 *bytes_written += rsz;
2324 if (rsz >= bytes_left) {
2325 /* if physpath was not copied properly, clear it */
2326 if (bytes_left != 0) {
2329 return (EZFS_NOSPC);
2335 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2336 size_t *rsz, boolean_t is_spare)
2341 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2342 return (EZFS_INVALCONFIG);
2344 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2346 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2347 * For a spare vdev, we only want to boot from the active
2352 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2355 return (EZFS_INVALCONFIG);
2358 if (vdev_online(nv)) {
2359 if ((ret = vdev_get_one_physpath(nv, physpath,
2360 phypath_size, rsz)) != 0)
2363 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2364 strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2365 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2366 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2371 if (nvlist_lookup_nvlist_array(nv,
2372 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2373 return (EZFS_INVALCONFIG);
2375 for (i = 0; i < count; i++) {
2376 ret = vdev_get_physpaths(child[i], physpath,
2377 phypath_size, rsz, is_spare);
2378 if (ret == EZFS_NOSPC)
2383 return (EZFS_POOL_INVALARG);
2387 * Get phys_path for a root pool config.
2388 * Return 0 on success; non-zero on failure.
2391 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2394 nvlist_t *vdev_root;
2401 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2403 return (EZFS_INVALCONFIG);
2405 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2406 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2407 &child, &count) != 0)
2408 return (EZFS_INVALCONFIG);
2411 * root pool can only have a single top-level vdev.
2413 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2414 return (EZFS_POOL_INVALARG);
2416 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2419 /* No online devices */
2421 return (EZFS_NODEVICE);
2427 * Get phys_path for a root pool
2428 * Return 0 on success; non-zero on failure.
2431 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2433 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2438 * If the device has being dynamically expanded then we need to relabel
2439 * the disk to use the new unallocated space.
2442 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2445 char path[MAXPATHLEN];
2448 int (*_efi_use_whole_disk)(int);
2450 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2451 "efi_use_whole_disk")) == NULL)
2454 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name);
2456 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2457 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2458 "relabel '%s': unable to open device"), name);
2459 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2463 * It's possible that we might encounter an error if the device
2464 * does not have any unallocated space left. If so, we simply
2465 * ignore that error and continue on.
2467 error = _efi_use_whole_disk(fd);
2469 if (error && error != VT_ENOSPC) {
2470 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2471 "relabel '%s': unable to read disk capacity"), name);
2472 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2474 #endif /* illumos */
2479 * Bring the specified vdev online. The 'flags' parameter is a set of the
2480 * ZFS_ONLINE_* flags.
2483 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2484 vdev_state_t *newstate)
2486 zfs_cmd_t zc = { 0 };
2490 boolean_t avail_spare, l2cache, islog;
2491 libzfs_handle_t *hdl = zhp->zpool_hdl;
2493 if (flags & ZFS_ONLINE_EXPAND) {
2494 (void) snprintf(msg, sizeof (msg),
2495 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2497 (void) snprintf(msg, sizeof (msg),
2498 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2501 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2502 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2504 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2506 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2509 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2511 if ((flags & ZFS_ONLINE_EXPAND ||
2512 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2513 nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2514 uint64_t wholedisk = 0;
2516 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2520 * XXX - L2ARC 1.0 devices can't support expansion.
2523 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2524 "cannot expand cache devices"));
2525 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2529 pathname += strlen(ZFS_DISK_ROOT) + 1;
2530 (void) zpool_relabel_disk(hdl, pathname);
2534 zc.zc_cookie = VDEV_STATE_ONLINE;
2537 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2538 if (errno == EINVAL) {
2539 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2540 "from this pool into a new one. Use '%s' "
2541 "instead"), "zpool detach");
2542 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2544 return (zpool_standard_error(hdl, errno, msg));
2547 *newstate = zc.zc_cookie;
2552 * Take the specified vdev offline
2555 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2557 zfs_cmd_t zc = { 0 };
2560 boolean_t avail_spare, l2cache;
2561 libzfs_handle_t *hdl = zhp->zpool_hdl;
2563 (void) snprintf(msg, sizeof (msg),
2564 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2566 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2567 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2569 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2571 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2574 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2576 zc.zc_cookie = VDEV_STATE_OFFLINE;
2577 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2579 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2586 * There are no other replicas of this device.
2588 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2592 * The log device has unplayed logs
2594 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2597 return (zpool_standard_error(hdl, errno, msg));
2602 * Mark the given vdev faulted.
2605 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2607 zfs_cmd_t zc = { 0 };
2609 libzfs_handle_t *hdl = zhp->zpool_hdl;
2611 (void) snprintf(msg, sizeof (msg),
2612 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2614 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2616 zc.zc_cookie = VDEV_STATE_FAULTED;
2619 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2626 * There are no other replicas of this device.
2628 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2631 return (zpool_standard_error(hdl, errno, msg));
2637 * Mark the given vdev degraded.
2640 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2642 zfs_cmd_t zc = { 0 };
2644 libzfs_handle_t *hdl = zhp->zpool_hdl;
2646 (void) snprintf(msg, sizeof (msg),
2647 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2649 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2651 zc.zc_cookie = VDEV_STATE_DEGRADED;
2654 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2657 return (zpool_standard_error(hdl, errno, msg));
2661 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2665 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2671 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2673 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2676 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2677 children == 2 && child[which] == tgt)
2680 for (c = 0; c < children; c++)
2681 if (is_replacing_spare(child[c], tgt, which))
2689 * Attach new_disk (fully described by nvroot) to old_disk.
2690 * If 'replacing' is specified, the new disk will replace the old one.
2693 zpool_vdev_attach(zpool_handle_t *zhp,
2694 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2696 zfs_cmd_t zc = { 0 };
2700 boolean_t avail_spare, l2cache, islog;
2705 nvlist_t *config_root;
2706 libzfs_handle_t *hdl = zhp->zpool_hdl;
2707 boolean_t rootpool = zpool_is_bootable(zhp);
2710 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2711 "cannot replace %s with %s"), old_disk, new_disk);
2713 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2714 "cannot attach %s to %s"), new_disk, old_disk);
2716 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2717 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2719 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2722 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2725 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2727 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2728 zc.zc_cookie = replacing;
2730 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2731 &child, &children) != 0 || children != 1) {
2732 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2733 "new device must be a single disk"));
2734 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2737 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2738 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2740 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2744 * If the target is a hot spare that has been swapped in, we can only
2745 * replace it with another hot spare.
2748 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2749 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2750 NULL) == NULL || !avail_spare) &&
2751 is_replacing_spare(config_root, tgt, 1)) {
2752 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2753 "can only be replaced by another hot spare"));
2755 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2760 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2763 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2765 zcmd_free_nvlists(&zc);
2770 * XXX need a better way to prevent user from
2771 * booting up a half-baked vdev.
2773 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2774 "sure to wait until resilver is done "
2775 "before rebooting.\n"));
2776 (void) fprintf(stderr, "\n");
2777 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If "
2778 "you boot from pool '%s', you may need to update\n"
2779 "boot code on newly attached disk '%s'.\n\n"
2780 "Assuming you use GPT partitioning and 'da0' is "
2781 "your new boot disk\n"
2782 "you may use the following command:\n\n"
2783 "\tgpart bootcode -b /boot/pmbr -p "
2784 "/boot/gptzfsboot -i 1 da0\n\n"),
2785 zhp->zpool_name, new_disk);
2793 * Can't attach to or replace this type of vdev.
2796 uint64_t version = zpool_get_prop_int(zhp,
2797 ZPOOL_PROP_VERSION, NULL);
2800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2801 "cannot replace a log with a spare"));
2802 else if (version >= SPA_VERSION_MULTI_REPLACE)
2803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2804 "already in replacing/spare config; wait "
2805 "for completion or use 'zpool detach'"));
2807 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2808 "cannot replace a replacing device"));
2810 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2811 "can only attach to mirrors and top-level "
2814 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2819 * The new device must be a single disk.
2821 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2822 "new device must be a single disk"));
2823 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2827 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
2828 "or pool has removing/removed vdevs"),
2830 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2835 * The new device is too small.
2837 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2838 "device is too small"));
2839 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2844 * The new device has a different alignment requirement.
2846 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2847 "devices have different sector alignment"));
2848 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2853 * The resulting top-level vdev spec won't fit in the label.
2855 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2859 (void) zpool_standard_error(hdl, errno, msg);
2866 * Detach the specified device.
2869 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2871 zfs_cmd_t zc = { 0 };
2874 boolean_t avail_spare, l2cache;
2875 libzfs_handle_t *hdl = zhp->zpool_hdl;
2877 (void) snprintf(msg, sizeof (msg),
2878 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2880 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2881 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2883 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2886 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2889 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2891 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2893 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2900 * Can't detach from this type of vdev.
2902 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2903 "applicable to mirror and replacing vdevs"));
2904 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2909 * There are no other replicas of this device.
2911 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2915 (void) zpool_standard_error(hdl, errno, msg);
2922 * Find a mirror vdev in the source nvlist.
2924 * The mchild array contains a list of disks in one of the top-level mirrors
2925 * of the source pool. The schild array contains a list of disks that the
2926 * user specified on the command line. We loop over the mchild array to
2927 * see if any entry in the schild array matches.
2929 * If a disk in the mchild array is found in the schild array, we return
2930 * the index of that entry. Otherwise we return -1.
2933 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2934 nvlist_t **schild, uint_t schildren)
2938 for (mc = 0; mc < mchildren; mc++) {
2940 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2941 mchild[mc], B_FALSE);
2943 for (sc = 0; sc < schildren; sc++) {
2944 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2945 schild[sc], B_FALSE);
2946 boolean_t result = (strcmp(mpath, spath) == 0);
2962 * Split a mirror pool. If newroot points to null, then a new nvlist
2963 * is generated and it is the responsibility of the caller to free it.
2966 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2967 nvlist_t *props, splitflags_t flags)
2969 zfs_cmd_t zc = { 0 };
2971 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2972 nvlist_t **varray = NULL, *zc_props = NULL;
2973 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2974 libzfs_handle_t *hdl = zhp->zpool_hdl;
2976 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2979 (void) snprintf(msg, sizeof (msg),
2980 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2982 if (!zpool_name_valid(hdl, B_FALSE, newname))
2983 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2985 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2986 (void) fprintf(stderr, gettext("Internal error: unable to "
2987 "retrieve pool configuration\n"));
2991 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2993 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2996 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2997 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2998 props, vers, flags, msg)) == NULL)
3002 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3004 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3005 "Source pool is missing vdev tree"));
3006 nvlist_free(zc_props);
3010 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3013 if (*newroot == NULL ||
3014 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3015 &newchild, &newchildren) != 0)
3018 for (c = 0; c < children; c++) {
3019 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3021 nvlist_t **mchild, *vdev;
3026 * Unlike cache & spares, slogs are stored in the
3027 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
3029 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3031 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3033 if (is_log || is_hole) {
3035 * Create a hole vdev and put it in the config.
3037 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3039 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3040 VDEV_TYPE_HOLE) != 0)
3042 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3047 varray[vcount++] = vdev;
3051 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3053 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3054 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3055 "Source pool must be composed only of mirrors\n"));
3056 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3060 verify(nvlist_lookup_nvlist_array(child[c],
3061 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3063 /* find or add an entry for this top-level vdev */
3064 if (newchildren > 0 &&
3065 (entry = find_vdev_entry(zhp, mchild, mchildren,
3066 newchild, newchildren)) >= 0) {
3067 /* We found a disk that the user specified. */
3068 vdev = mchild[entry];
3071 /* User didn't specify a disk for this vdev. */
3072 vdev = mchild[mchildren - 1];
3075 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3079 /* did we find every disk the user specified? */
3080 if (found != newchildren) {
3081 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3082 "include at most one disk from each mirror"));
3083 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3087 /* Prepare the nvlist for populating. */
3088 if (*newroot == NULL) {
3089 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3092 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3093 VDEV_TYPE_ROOT) != 0)
3096 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3099 /* Add all the children we found */
3100 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3101 lastlog == 0 ? vcount : lastlog) != 0)
3105 * If we're just doing a dry run, exit now with success.
3108 memory_err = B_FALSE;
3113 /* now build up the config list & call the ioctl */
3114 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3117 if (nvlist_add_nvlist(newconfig,
3118 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3119 nvlist_add_string(newconfig,
3120 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3121 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3125 * The new pool is automatically part of the namespace unless we
3126 * explicitly export it.
3129 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3130 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3131 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3132 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3134 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3137 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3138 retval = zpool_standard_error(hdl, errno, msg);
3143 memory_err = B_FALSE;
3146 if (varray != NULL) {
3149 for (v = 0; v < vcount; v++)
3150 nvlist_free(varray[v]);
3153 zcmd_free_nvlists(&zc);
3154 nvlist_free(zc_props);
3155 nvlist_free(newconfig);
3157 nvlist_free(*newroot);
3165 return (no_memory(hdl));
3171 * Remove the given device.
3174 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3176 zfs_cmd_t zc = { 0 };
3179 boolean_t avail_spare, l2cache, islog;
3180 libzfs_handle_t *hdl = zhp->zpool_hdl;
3183 (void) snprintf(msg, sizeof (msg),
3184 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3186 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3187 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3189 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3191 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3192 if (islog && version < SPA_VERSION_HOLES) {
3193 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3194 "pool must be upgraded to support log removal"));
3195 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3198 if (!islog && !avail_spare && !l2cache && zpool_is_bootable(zhp)) {
3199 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3200 "root pool can not have removed devices, "
3201 "because GRUB does not understand them"));
3202 return (zfs_error(hdl, EINVAL, msg));
3205 zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3207 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3213 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3214 "invalid config; all top-level vdevs must "
3215 "have the same sector size and not be raidz."));
3216 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3220 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3221 "Pool busy; removal may already be in progress"));
3222 (void) zfs_error(hdl, EZFS_BUSY, msg);
3226 (void) zpool_standard_error(hdl, errno, msg);
3232 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3234 zfs_cmd_t zc = { 0 };
3236 libzfs_handle_t *hdl = zhp->zpool_hdl;
3238 (void) snprintf(msg, sizeof (msg),
3239 dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3241 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3244 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3247 return (zpool_standard_error(hdl, errno, msg));
3251 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3256 boolean_t avail_spare, l2cache, islog;
3257 libzfs_handle_t *hdl = zhp->zpool_hdl;
3259 (void) snprintf(msg, sizeof (msg),
3260 dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3263 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3265 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3267 if (avail_spare || l2cache || islog) {
3272 if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3273 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3274 "indirect size not available"));
3275 return (zfs_error(hdl, EINVAL, msg));
3281 * Clear the errors for the pool, or the particular device if specified.
3284 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3286 zfs_cmd_t zc = { 0 };
3289 zpool_rewind_policy_t policy;
3290 boolean_t avail_spare, l2cache;
3291 libzfs_handle_t *hdl = zhp->zpool_hdl;
3292 nvlist_t *nvi = NULL;
3296 (void) snprintf(msg, sizeof (msg),
3297 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3300 (void) snprintf(msg, sizeof (msg),
3301 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3304 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3306 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3307 &l2cache, NULL)) == NULL)
3308 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3311 * Don't allow error clearing for hot spares. Do allow
3312 * error clearing for l2cache devices.
3315 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3317 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3321 zpool_get_rewind_policy(rewindnvl, &policy);
3322 zc.zc_cookie = policy.zrp_request;
3324 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3327 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3330 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3332 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3333 zcmd_free_nvlists(&zc);
3338 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3339 errno != EPERM && errno != EACCES)) {
3340 if (policy.zrp_request &
3341 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3342 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3343 zpool_rewind_exclaim(hdl, zc.zc_name,
3344 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3348 zcmd_free_nvlists(&zc);
3352 zcmd_free_nvlists(&zc);
3353 return (zpool_standard_error(hdl, errno, msg));
3357 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3360 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3362 zfs_cmd_t zc = { 0 };
3364 libzfs_handle_t *hdl = zhp->zpool_hdl;
3366 (void) snprintf(msg, sizeof (msg),
3367 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3370 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3372 zc.zc_cookie = ZPOOL_NO_REWIND;
3374 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3377 return (zpool_standard_error(hdl, errno, msg));
3381 * Change the GUID for a pool.
3384 zpool_reguid(zpool_handle_t *zhp)
3387 libzfs_handle_t *hdl = zhp->zpool_hdl;
3388 zfs_cmd_t zc = { 0 };
3390 (void) snprintf(msg, sizeof (msg),
3391 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3393 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3394 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3397 return (zpool_standard_error(hdl, errno, msg));
3404 zpool_reopen(zpool_handle_t *zhp)
3406 zfs_cmd_t zc = { 0 };
3408 libzfs_handle_t *hdl = zhp->zpool_hdl;
3410 (void) snprintf(msg, sizeof (msg),
3411 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3414 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3415 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3417 return (zpool_standard_error(hdl, errno, msg));
3421 * Convert from a devid string to a path.
3424 devid_to_path(char *devid_str)
3429 devid_nmlist_t *list = NULL;
3432 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3435 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3437 devid_str_free(minor);
3444 * In a case the strdup() fails, we will just return NULL below.
3446 path = strdup(list[0].devname);
3448 devid_free_nmlist(list);
3454 * Convert from a path to a devid string.
3457 path_to_devid(const char *path)
3464 if ((fd = open(path, O_RDONLY)) < 0)
3469 if (devid_get(fd, &devid) == 0) {
3470 if (devid_get_minor_name(fd, &minor) == 0)
3471 ret = devid_str_encode(devid, minor);
3473 devid_str_free(minor);
3485 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3486 * ignore any failure here, since a common case is for an unprivileged user to
3487 * type 'zpool status', and we'll display the correct information anyway.
3490 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3492 zfs_cmd_t zc = { 0 };
3494 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3495 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3496 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3499 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3503 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3504 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3505 * We also check if this is a whole disk, in which case we strip off the
3506 * trailing 's0' slice name.
3508 * This routine is also responsible for identifying when disks have been
3509 * reconfigured in a new location. The kernel will have opened the device by
3510 * devid, but the path will still refer to the old location. To catch this, we
3511 * first do a path -> devid translation (which is fast for the common case). If
3512 * the devid matches, we're done. If not, we do a reverse devid -> path
3513 * translation and issue the appropriate ioctl() to update the path of the vdev.
3514 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3518 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3529 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3530 (uint64_t **)&vs, &vsc) == 0;
3531 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0;
3534 * If the device is not currently present, assume it will not
3535 * come back at the same device path. Display the device by GUID.
3537 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3538 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) {
3539 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3541 (void) snprintf(buf, sizeof (buf), "%llu",
3542 (u_longlong_t)value);
3544 } else if (have_path) {
3547 * If the device is dead (faulted, offline, etc) then don't
3548 * bother opening it. Otherwise we may be forcing the user to
3549 * open a misbehaving device, which can have undesirable
3552 if ((have_stats == 0 ||
3553 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3555 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3557 * Determine if the current path is correct.
3559 char *newdevid = path_to_devid(path);
3561 if (newdevid == NULL ||
3562 strcmp(devid, newdevid) != 0) {
3565 if ((newpath = devid_to_path(devid)) != NULL) {
3567 * Update the path appropriately.
3569 set_path(zhp, nv, newpath);
3570 if (nvlist_add_string(nv,
3571 ZPOOL_CONFIG_PATH, newpath) == 0)
3572 verify(nvlist_lookup_string(nv,
3580 devid_str_free(newdevid);
3584 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0)
3585 path += strlen(ZFS_DISK_ROOTD);
3587 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3588 &value) == 0 && value) {
3589 int pathlen = strlen(path);
3590 char *tmp = zfs_strdup(hdl, path);
3593 * If it starts with c#, and ends with "s0" or "s1",
3594 * chop the slice off, or if it ends with "s0/old" or
3595 * "s1/old", remove the slice from the middle.
3597 if (CTD_CHECK(tmp)) {
3598 if (strcmp(&tmp[pathlen - 2], "s0") == 0 ||
3599 strcmp(&tmp[pathlen - 2], "s1") == 0) {
3600 tmp[pathlen - 2] = '\0';
3601 } else if (pathlen > 6 &&
3602 (strcmp(&tmp[pathlen - 6], "s0/old") == 0 ||
3603 strcmp(&tmp[pathlen - 6], "s1/old") == 0)) {
3604 (void) strcpy(&tmp[pathlen - 6],
3610 #else /* !illumos */
3611 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
3612 path += sizeof(_PATH_DEV) - 1;
3613 #endif /* illumos */
3615 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3618 * If it's a raidz device, we need to stick in the parity level.
3620 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3621 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3623 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3624 (u_longlong_t)value);
3629 * We identify each top-level vdev by using a <type-id>
3630 * naming convention.
3635 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3637 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3643 return (zfs_strdup(hdl, path));
3647 zbookmark_mem_compare(const void *a, const void *b)
3649 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3653 * Retrieve the persistent error log, uniquify the members, and return to the
3657 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3659 zfs_cmd_t zc = { 0 };
3661 zbookmark_phys_t *zb = NULL;
3665 * Retrieve the raw error list from the kernel. If the number of errors
3666 * has increased, allocate more space and continue until we get the
3669 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3673 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3674 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3676 zc.zc_nvlist_dst_size = count;
3677 (void) strcpy(zc.zc_name, zhp->zpool_name);
3679 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3681 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3682 if (errno == ENOMEM) {
3685 count = zc.zc_nvlist_dst_size;
3686 dst = zfs_alloc(zhp->zpool_hdl, count *
3687 sizeof (zbookmark_phys_t));
3690 zc.zc_nvlist_dst = (uintptr_t)dst;
3700 * Sort the resulting bookmarks. This is a little confusing due to the
3701 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3702 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3703 * _not_ copied as part of the process. So we point the start of our
3704 * array appropriate and decrement the total number of elements.
3706 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3707 zc.zc_nvlist_dst_size;
3708 count -= zc.zc_nvlist_dst_size;
3710 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3712 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3715 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3717 for (i = 0; i < count; i++) {
3720 /* ignoring zb_blkid and zb_level for now */
3721 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3722 zb[i-1].zb_object == zb[i].zb_object)
3725 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3727 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3728 zb[i].zb_objset) != 0) {
3732 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3733 zb[i].zb_object) != 0) {
3737 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3744 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3748 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3749 return (no_memory(zhp->zpool_hdl));
3753 * Upgrade a ZFS pool to the latest on-disk version.
3756 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3758 zfs_cmd_t zc = { 0 };
3759 libzfs_handle_t *hdl = zhp->zpool_hdl;
3761 (void) strcpy(zc.zc_name, zhp->zpool_name);
3762 zc.zc_cookie = new_version;
3764 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3765 return (zpool_standard_error_fmt(hdl, errno,
3766 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3772 zfs_save_arguments(int argc, char **argv, char *string, int len)
3774 (void) strlcpy(string, basename(argv[0]), len);
3775 for (int i = 1; i < argc; i++) {
3776 (void) strlcat(string, " ", len);
3777 (void) strlcat(string, argv[i], len);
3782 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3784 zfs_cmd_t zc = { 0 };
3788 args = fnvlist_alloc();
3789 fnvlist_add_string(args, "message", message);
3790 err = zcmd_write_src_nvlist(hdl, &zc, args);
3792 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3794 zcmd_free_nvlists(&zc);
3799 * Perform ioctl to get some command history of a pool.
3801 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3802 * logical offset of the history buffer to start reading from.
3804 * Upon return, 'off' is the next logical offset to read from and
3805 * 'len' is the actual amount of bytes read into 'buf'.
3808 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3810 zfs_cmd_t zc = { 0 };
3811 libzfs_handle_t *hdl = zhp->zpool_hdl;
3813 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3815 zc.zc_history = (uint64_t)(uintptr_t)buf;
3816 zc.zc_history_len = *len;
3817 zc.zc_history_offset = *off;
3819 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3822 return (zfs_error_fmt(hdl, EZFS_PERM,
3823 dgettext(TEXT_DOMAIN,
3824 "cannot show history for pool '%s'"),
3827 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3828 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3829 "'%s'"), zhp->zpool_name));
3831 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3832 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3833 "'%s', pool must be upgraded"), zhp->zpool_name));
3835 return (zpool_standard_error_fmt(hdl, errno,
3836 dgettext(TEXT_DOMAIN,
3837 "cannot get history for '%s'"), zhp->zpool_name));
3841 *len = zc.zc_history_len;
3842 *off = zc.zc_history_offset;
3848 * Process the buffer of nvlists, unpacking and storing each nvlist record
3849 * into 'records'. 'leftover' is set to the number of bytes that weren't
3850 * processed as there wasn't a complete record.
3853 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3854 nvlist_t ***records, uint_t *numrecords)
3860 while (bytes_read > sizeof (reclen)) {
3862 /* get length of packed record (stored as little endian) */
3863 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3864 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3866 if (bytes_read < sizeof (reclen) + reclen)
3870 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3872 bytes_read -= sizeof (reclen) + reclen;
3873 buf += sizeof (reclen) + reclen;
3875 /* add record to nvlist array */
3877 if (ISP2(*numrecords + 1)) {
3878 *records = realloc(*records,
3879 *numrecords * 2 * sizeof (nvlist_t *));
3881 (*records)[*numrecords - 1] = nv;
3884 *leftover = bytes_read;
3888 /* from spa_history.c: spa_history_create_obj() */
3889 #define HIS_BUF_LEN_DEF (128 << 10)
3890 #define HIS_BUF_LEN_MAX (1 << 30)
3893 * Retrieve the command history of a pool.
3896 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3899 uint64_t buflen = HIS_BUF_LEN_DEF;
3901 nvlist_t **records = NULL;
3902 uint_t numrecords = 0;
3905 buf = malloc(buflen);
3909 uint64_t bytes_read = buflen;
3912 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3915 /* if nothing else was read in, we're at EOF, just return */
3916 if (bytes_read == 0)
3919 if ((err = zpool_history_unpack(buf, bytes_read,
3920 &leftover, &records, &numrecords)) != 0)
3923 if (leftover == bytes_read) {
3925 * no progress made, because buffer is not big enough
3926 * to hold this record; resize and retry.
3931 if ((buflen >= HIS_BUF_LEN_MAX) ||
3932 ((buf = malloc(buflen)) == NULL)) {
3944 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3945 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3946 records, numrecords) == 0);
3948 for (i = 0; i < numrecords; i++)
3949 nvlist_free(records[i]);
3956 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3957 char *pathname, size_t len)
3959 zfs_cmd_t zc = { 0 };
3960 boolean_t mounted = B_FALSE;
3961 char *mntpnt = NULL;
3962 char dsname[ZFS_MAX_DATASET_NAME_LEN];
3965 /* special case for the MOS */
3966 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3970 /* get the dataset's name */
3971 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3973 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3974 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3975 /* just write out a path of two object numbers */
3976 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3980 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3982 /* find out if the dataset is mounted */
3983 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3985 /* get the corrupted object's path */
3986 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3988 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3991 (void) snprintf(pathname, len, "%s%s", mntpnt,
3994 (void) snprintf(pathname, len, "%s:%s",
3995 dsname, zc.zc_value);
3998 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
4005 * Read the EFI label from the config, if a label does not exist then
4006 * pass back the error to the caller. If the caller has passed a non-NULL
4007 * diskaddr argument then we set it to the starting address of the EFI
4008 * partition. If the caller has passed a non-NULL boolean argument, then
4009 * we set it to indicate if the disk does have efi system partition.
4012 read_efi_label(nvlist_t *config, diskaddr_t *sb, boolean_t *system)
4016 char diskname[MAXPATHLEN];
4017 boolean_t boot = B_FALSE;
4021 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
4024 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT,
4025 strrchr(path, '/'));
4026 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
4027 struct dk_gpt *vtoc;
4029 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
4030 for (slice = 0; slice < vtoc->efi_nparts; slice++) {
4031 if (vtoc->efi_parts[slice].p_tag == V_SYSTEM)
4033 if (vtoc->efi_parts[slice].p_tag == V_USR)
4036 if (sb != NULL && vtoc->efi_parts[slice].p_tag == V_USR)
4037 *sb = vtoc->efi_parts[slice].p_start;
4048 * determine where a partition starts on a disk in the current
4052 find_start_block(nvlist_t *config)
4056 diskaddr_t sb = MAXOFFSET_T;
4059 if (nvlist_lookup_nvlist_array(config,
4060 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
4061 if (nvlist_lookup_uint64(config,
4062 ZPOOL_CONFIG_WHOLE_DISK,
4063 &wholedisk) != 0 || !wholedisk) {
4064 return (MAXOFFSET_T);
4066 if (read_efi_label(config, &sb, NULL) < 0)
4071 for (c = 0; c < children; c++) {
4072 sb = find_start_block(child[c]);
4073 if (sb != MAXOFFSET_T) {
4077 return (MAXOFFSET_T);
4079 #endif /* illumos */
4082 * Label an individual disk. The name provided is the short name,
4083 * stripped of any leading /dev path.
4086 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name,
4087 zpool_boot_label_t boot_type, uint64_t boot_size, int *slice)
4090 char path[MAXPATHLEN];
4091 struct dk_gpt *vtoc;
4093 size_t resv = EFI_MIN_RESV_SIZE;
4094 uint64_t slice_size;
4095 diskaddr_t start_block;
4098 /* prepare an error message just in case */
4099 (void) snprintf(errbuf, sizeof (errbuf),
4100 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
4105 verify(nvlist_lookup_nvlist(zhp->zpool_config,
4106 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
4108 if (zhp->zpool_start_block == 0)
4109 start_block = find_start_block(nvroot);
4111 start_block = zhp->zpool_start_block;
4112 zhp->zpool_start_block = start_block;
4115 start_block = NEW_START_BLOCK;
4118 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name,
4121 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
4123 * This shouldn't happen. We've long since verified that this
4124 * is a valid device.
4127 dgettext(TEXT_DOMAIN, "unable to open device"));
4128 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4131 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4133 * The only way this can fail is if we run out of memory, or we
4134 * were unable to read the disk's capacity
4136 if (errno == ENOMEM)
4137 (void) no_memory(hdl);
4140 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4141 "unable to read disk capacity"), name);
4143 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4147 * Why we use V_USR: V_BACKUP confuses users, and is considered
4148 * disposable by some EFI utilities (since EFI doesn't have a backup
4149 * slice). V_UNASSIGNED is supposed to be used only for zero size
4150 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
4151 * etc. were all pretty specific. V_USR is as close to reality as we
4152 * can get, in the absence of V_OTHER.
4154 /* first fix the partition start block */
4155 if (start_block == MAXOFFSET_T)
4156 start_block = NEW_START_BLOCK;
4159 * EFI System partition is using slice 0.
4160 * ZFS is on slice 1 and slice 8 is reserved.
4161 * We assume the GPT partition table without system
4162 * partition has zfs p_start == NEW_START_BLOCK.
4163 * If start_block != NEW_START_BLOCK, it means we have
4164 * system partition. Correct solution would be to query/cache vtoc
4165 * from existing vdev member.
4167 if (boot_type == ZPOOL_CREATE_BOOT_LABEL) {
4168 if (boot_size % vtoc->efi_lbasize != 0) {
4169 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4170 "boot partition size must be a multiple of %d"),
4174 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4177 * System partition size checks.
4178 * Note the 1MB is quite arbitrary value, since we
4179 * are creating dedicated pool, it should be enough
4180 * to hold fat + efi bootloader. May need to be
4181 * adjusted if the bootloader size will grow.
4183 if (boot_size < 1024 * 1024) {
4185 zfs_nicenum(boot_size, buf, sizeof (buf));
4186 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4187 "Specified size %s for EFI System partition is too "
4188 "small, the minimum size is 1MB."), buf);
4191 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4193 /* 33MB is tested with mkfs -F pcfs */
4194 if (hdl->libzfs_printerr &&
4195 ((vtoc->efi_lbasize == 512 &&
4196 boot_size < 33 * 1024 * 1024) ||
4197 (vtoc->efi_lbasize == 4096 &&
4198 boot_size < 256 * 1024 * 1024))) {
4200 zfs_nicenum(boot_size, buf, sizeof (buf));
4201 (void) fprintf(stderr, dgettext(TEXT_DOMAIN,
4202 "Warning: EFI System partition size %s is "
4203 "not allowing to create FAT32 file\nsystem, which "
4204 "may result in unbootable system.\n"), buf);
4206 /* Adjust zfs partition start by size of system partition. */
4207 start_block += boot_size / vtoc->efi_lbasize;
4210 if (start_block == NEW_START_BLOCK) {
4212 * Use default layout.
4213 * ZFS is on slice 0 and slice 8 is reserved.
4215 slice_size = vtoc->efi_last_u_lba + 1;
4216 slice_size -= EFI_MIN_RESV_SIZE;
4217 slice_size -= start_block;
4221 vtoc->efi_parts[0].p_start = start_block;
4222 vtoc->efi_parts[0].p_size = slice_size;
4224 vtoc->efi_parts[0].p_tag = V_USR;
4225 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
4227 vtoc->efi_parts[8].p_start = slice_size + start_block;
4228 vtoc->efi_parts[8].p_size = resv;
4229 vtoc->efi_parts[8].p_tag = V_RESERVED;
4231 slice_size = start_block - NEW_START_BLOCK;
4232 vtoc->efi_parts[0].p_start = NEW_START_BLOCK;
4233 vtoc->efi_parts[0].p_size = slice_size;
4234 vtoc->efi_parts[0].p_tag = V_SYSTEM;
4235 (void) strcpy(vtoc->efi_parts[0].p_name, "loader");
4238 /* prepare slice 1 */
4239 slice_size = vtoc->efi_last_u_lba + 1 - slice_size;
4241 slice_size -= NEW_START_BLOCK;
4242 vtoc->efi_parts[1].p_start = start_block;
4243 vtoc->efi_parts[1].p_size = slice_size;
4244 vtoc->efi_parts[1].p_tag = V_USR;
4245 (void) strcpy(vtoc->efi_parts[1].p_name, "zfs");
4247 vtoc->efi_parts[8].p_start = slice_size + start_block;
4248 vtoc->efi_parts[8].p_size = resv;
4249 vtoc->efi_parts[8].p_tag = V_RESERVED;
4252 if (efi_write(fd, vtoc) != 0) {
4254 * Some block drivers (like pcata) may not support EFI
4255 * GPT labels. Print out a helpful error message dir-
4256 * ecting the user to manually label the disk and give
4262 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4263 "try using fdisk(1M) and then provide a specific slice"));
4264 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4269 #endif /* illumos */
4274 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
4280 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
4281 if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
4282 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
4283 strcmp(type, VDEV_TYPE_MISSING) == 0) {
4284 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4285 "vdev type '%s' is not supported"), type);
4286 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
4289 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
4290 &child, &children) == 0) {
4291 for (c = 0; c < children; c++) {
4292 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
4300 * Check if this zvol is allowable for use as a dump device; zero if
4301 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
4303 * Allowable storage configurations include mirrors, all raidz variants, and
4304 * pools with log, cache, and spare devices. Pools which are backed by files or
4305 * have missing/hole vdevs are not suitable.
4308 zvol_check_dump_config(char *arg)
4310 zpool_handle_t *zhp = NULL;
4311 nvlist_t *config, *nvroot;
4315 libzfs_handle_t *hdl;
4317 char poolname[ZFS_MAX_DATASET_NAME_LEN];
4318 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4321 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4325 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4326 "dump is not supported on device '%s'"), arg);
4328 if ((hdl = libzfs_init()) == NULL)
4330 libzfs_print_on_error(hdl, B_TRUE);
4332 volname = arg + pathlen;
4334 /* check the configuration of the pool */
4335 if ((p = strchr(volname, '/')) == NULL) {
4336 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4337 "malformed dataset name"));
4338 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4340 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) {
4341 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4342 "dataset name is too long"));
4343 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4346 (void) strncpy(poolname, volname, p - volname);
4347 poolname[p - volname] = '\0';
4350 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4351 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4352 "could not open pool '%s'"), poolname);
4353 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4356 config = zpool_get_config(zhp, NULL);
4357 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4359 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4360 "could not obtain vdev configuration for '%s'"), poolname);
4361 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4365 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4366 &top, &toplevels) == 0);
4368 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4381 zpool_nextboot(libzfs_handle_t *hdl, uint64_t pool_guid, uint64_t dev_guid,
4382 const char *command)
4384 zfs_cmd_t zc = { 0 };
4390 args = fnvlist_alloc();
4391 fnvlist_add_uint64(args, ZPOOL_CONFIG_POOL_GUID, pool_guid);
4392 fnvlist_add_uint64(args, ZPOOL_CONFIG_GUID, dev_guid);
4393 fnvlist_add_string(args, "command", command);
4394 error = zcmd_write_src_nvlist(hdl, &zc, args);
4396 error = ioctl(hdl->libzfs_fd, ZFS_IOC_NEXTBOOT, &zc);
4397 zcmd_free_nvlists(&zc);