]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libzfs/libzfs_pool.c
Vendor import of openzfs master @ 184df27eef0abdc7ab2105b21257f753834b936b
[FreeBSD/FreeBSD.git] / lib / libzfs / libzfs_pool.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25  * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26  * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27  * Copyright (c) 2018 Datto Inc.
28  * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
29  * Copyright (c) 2017, Intel Corporation.
30  * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
31  */
32
33 #include <errno.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <strings.h>
38 #include <unistd.h>
39 #include <libgen.h>
40 #include <zone.h>
41 #include <sys/stat.h>
42 #include <sys/efi_partition.h>
43 #include <sys/systeminfo.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/vdev_disk.h>
46 #include <dlfcn.h>
47 #include <libzutil.h>
48
49 #include "zfs_namecheck.h"
50 #include "zfs_prop.h"
51 #include "libzfs_impl.h"
52 #include "zfs_comutil.h"
53 #include "zfeature_common.h"
54
55 static boolean_t zpool_vdev_is_interior(const char *name);
56
57 typedef struct prop_flags {
58         int create:1;   /* Validate property on creation */
59         int import:1;   /* Validate property on import */
60 } prop_flags_t;
61
62 /*
63  * ====================================================================
64  *   zpool property functions
65  * ====================================================================
66  */
67
68 static int
69 zpool_get_all_props(zpool_handle_t *zhp)
70 {
71         zfs_cmd_t zc = {"\0"};
72         libzfs_handle_t *hdl = zhp->zpool_hdl;
73
74         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
75
76         if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
77                 return (-1);
78
79         while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
80                 if (errno == ENOMEM) {
81                         if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
82                                 zcmd_free_nvlists(&zc);
83                                 return (-1);
84                         }
85                 } else {
86                         zcmd_free_nvlists(&zc);
87                         return (-1);
88                 }
89         }
90
91         if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
92                 zcmd_free_nvlists(&zc);
93                 return (-1);
94         }
95
96         zcmd_free_nvlists(&zc);
97
98         return (0);
99 }
100
101 int
102 zpool_props_refresh(zpool_handle_t *zhp)
103 {
104         nvlist_t *old_props;
105
106         old_props = zhp->zpool_props;
107
108         if (zpool_get_all_props(zhp) != 0)
109                 return (-1);
110
111         nvlist_free(old_props);
112         return (0);
113 }
114
115 static const char *
116 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
117     zprop_source_t *src)
118 {
119         nvlist_t *nv, *nvl;
120         uint64_t ival;
121         char *value;
122         zprop_source_t source;
123
124         nvl = zhp->zpool_props;
125         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
126                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
127                 source = ival;
128                 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
129         } else {
130                 source = ZPROP_SRC_DEFAULT;
131                 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
132                         value = "-";
133         }
134
135         if (src)
136                 *src = source;
137
138         return (value);
139 }
140
141 uint64_t
142 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
143 {
144         nvlist_t *nv, *nvl;
145         uint64_t value;
146         zprop_source_t source;
147
148         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
149                 /*
150                  * zpool_get_all_props() has most likely failed because
151                  * the pool is faulted, but if all we need is the top level
152                  * vdev's guid then get it from the zhp config nvlist.
153                  */
154                 if ((prop == ZPOOL_PROP_GUID) &&
155                     (nvlist_lookup_nvlist(zhp->zpool_config,
156                     ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
157                     (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
158                     == 0)) {
159                         return (value);
160                 }
161                 return (zpool_prop_default_numeric(prop));
162         }
163
164         nvl = zhp->zpool_props;
165         if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
166                 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
167                 source = value;
168                 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
169         } else {
170                 source = ZPROP_SRC_DEFAULT;
171                 value = zpool_prop_default_numeric(prop);
172         }
173
174         if (src)
175                 *src = source;
176
177         return (value);
178 }
179
180 /*
181  * Map VDEV STATE to printed strings.
182  */
183 const char *
184 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
185 {
186         switch (state) {
187         case VDEV_STATE_CLOSED:
188         case VDEV_STATE_OFFLINE:
189                 return (gettext("OFFLINE"));
190         case VDEV_STATE_REMOVED:
191                 return (gettext("REMOVED"));
192         case VDEV_STATE_CANT_OPEN:
193                 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
194                         return (gettext("FAULTED"));
195                 else if (aux == VDEV_AUX_SPLIT_POOL)
196                         return (gettext("SPLIT"));
197                 else
198                         return (gettext("UNAVAIL"));
199         case VDEV_STATE_FAULTED:
200                 return (gettext("FAULTED"));
201         case VDEV_STATE_DEGRADED:
202                 return (gettext("DEGRADED"));
203         case VDEV_STATE_HEALTHY:
204                 return (gettext("ONLINE"));
205
206         default:
207                 break;
208         }
209
210         return (gettext("UNKNOWN"));
211 }
212
213 /*
214  * Map POOL STATE to printed strings.
215  */
216 const char *
217 zpool_pool_state_to_name(pool_state_t state)
218 {
219         switch (state) {
220         default:
221                 break;
222         case POOL_STATE_ACTIVE:
223                 return (gettext("ACTIVE"));
224         case POOL_STATE_EXPORTED:
225                 return (gettext("EXPORTED"));
226         case POOL_STATE_DESTROYED:
227                 return (gettext("DESTROYED"));
228         case POOL_STATE_SPARE:
229                 return (gettext("SPARE"));
230         case POOL_STATE_L2CACHE:
231                 return (gettext("L2CACHE"));
232         case POOL_STATE_UNINITIALIZED:
233                 return (gettext("UNINITIALIZED"));
234         case POOL_STATE_UNAVAIL:
235                 return (gettext("UNAVAIL"));
236         case POOL_STATE_POTENTIALLY_ACTIVE:
237                 return (gettext("POTENTIALLY_ACTIVE"));
238         }
239
240         return (gettext("UNKNOWN"));
241 }
242
243 /*
244  * Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
245  * "SUSPENDED", etc).
246  */
247 const char *
248 zpool_get_state_str(zpool_handle_t *zhp)
249 {
250         zpool_errata_t errata;
251         zpool_status_t status;
252         nvlist_t *nvroot;
253         vdev_stat_t *vs;
254         uint_t vsc;
255         const char *str;
256
257         status = zpool_get_status(zhp, NULL, &errata);
258
259         if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
260                 str = gettext("FAULTED");
261         } else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
262             status == ZPOOL_STATUS_IO_FAILURE_MMP) {
263                 str = gettext("SUSPENDED");
264         } else {
265                 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
266                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
267                 verify(nvlist_lookup_uint64_array(nvroot,
268                     ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
269                     == 0);
270                 str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
271         }
272         return (str);
273 }
274
275 /*
276  * Get a zpool property value for 'prop' and return the value in
277  * a pre-allocated buffer.
278  */
279 int
280 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
281     size_t len, zprop_source_t *srctype, boolean_t literal)
282 {
283         uint64_t intval;
284         const char *strval;
285         zprop_source_t src = ZPROP_SRC_NONE;
286
287         if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
288                 switch (prop) {
289                 case ZPOOL_PROP_NAME:
290                         (void) strlcpy(buf, zpool_get_name(zhp), len);
291                         break;
292
293                 case ZPOOL_PROP_HEALTH:
294                         (void) strlcpy(buf, zpool_get_state_str(zhp), len);
295                         break;
296
297                 case ZPOOL_PROP_GUID:
298                         intval = zpool_get_prop_int(zhp, prop, &src);
299                         (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
300                         break;
301
302                 case ZPOOL_PROP_ALTROOT:
303                 case ZPOOL_PROP_CACHEFILE:
304                 case ZPOOL_PROP_COMMENT:
305                         if (zhp->zpool_props != NULL ||
306                             zpool_get_all_props(zhp) == 0) {
307                                 (void) strlcpy(buf,
308                                     zpool_get_prop_string(zhp, prop, &src),
309                                     len);
310                                 break;
311                         }
312                         /* FALLTHROUGH */
313                 default:
314                         (void) strlcpy(buf, "-", len);
315                         break;
316                 }
317
318                 if (srctype != NULL)
319                         *srctype = src;
320                 return (0);
321         }
322
323         if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
324             prop != ZPOOL_PROP_NAME)
325                 return (-1);
326
327         switch (zpool_prop_get_type(prop)) {
328         case PROP_TYPE_STRING:
329                 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
330                     len);
331                 break;
332
333         case PROP_TYPE_NUMBER:
334                 intval = zpool_get_prop_int(zhp, prop, &src);
335
336                 switch (prop) {
337                 case ZPOOL_PROP_SIZE:
338                 case ZPOOL_PROP_ALLOCATED:
339                 case ZPOOL_PROP_FREE:
340                 case ZPOOL_PROP_FREEING:
341                 case ZPOOL_PROP_LEAKED:
342                 case ZPOOL_PROP_ASHIFT:
343                         if (literal)
344                                 (void) snprintf(buf, len, "%llu",
345                                     (u_longlong_t)intval);
346                         else
347                                 (void) zfs_nicenum(intval, buf, len);
348                         break;
349
350                 case ZPOOL_PROP_EXPANDSZ:
351                 case ZPOOL_PROP_CHECKPOINT:
352                         if (intval == 0) {
353                                 (void) strlcpy(buf, "-", len);
354                         } else if (literal) {
355                                 (void) snprintf(buf, len, "%llu",
356                                     (u_longlong_t)intval);
357                         } else {
358                                 (void) zfs_nicebytes(intval, buf, len);
359                         }
360                         break;
361
362                 case ZPOOL_PROP_CAPACITY:
363                         if (literal) {
364                                 (void) snprintf(buf, len, "%llu",
365                                     (u_longlong_t)intval);
366                         } else {
367                                 (void) snprintf(buf, len, "%llu%%",
368                                     (u_longlong_t)intval);
369                         }
370                         break;
371
372                 case ZPOOL_PROP_FRAGMENTATION:
373                         if (intval == UINT64_MAX) {
374                                 (void) strlcpy(buf, "-", len);
375                         } else if (literal) {
376                                 (void) snprintf(buf, len, "%llu",
377                                     (u_longlong_t)intval);
378                         } else {
379                                 (void) snprintf(buf, len, "%llu%%",
380                                     (u_longlong_t)intval);
381                         }
382                         break;
383
384                 case ZPOOL_PROP_DEDUPRATIO:
385                         if (literal)
386                                 (void) snprintf(buf, len, "%llu.%02llu",
387                                     (u_longlong_t)(intval / 100),
388                                     (u_longlong_t)(intval % 100));
389                         else
390                                 (void) snprintf(buf, len, "%llu.%02llux",
391                                     (u_longlong_t)(intval / 100),
392                                     (u_longlong_t)(intval % 100));
393                         break;
394
395                 case ZPOOL_PROP_HEALTH:
396                         (void) strlcpy(buf, zpool_get_state_str(zhp), len);
397                         break;
398                 case ZPOOL_PROP_VERSION:
399                         if (intval >= SPA_VERSION_FEATURES) {
400                                 (void) snprintf(buf, len, "-");
401                                 break;
402                         }
403                         /* FALLTHROUGH */
404                 default:
405                         (void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
406                 }
407                 break;
408
409         case PROP_TYPE_INDEX:
410                 intval = zpool_get_prop_int(zhp, prop, &src);
411                 if (zpool_prop_index_to_string(prop, intval, &strval)
412                     != 0)
413                         return (-1);
414                 (void) strlcpy(buf, strval, len);
415                 break;
416
417         default:
418                 abort();
419         }
420
421         if (srctype)
422                 *srctype = src;
423
424         return (0);
425 }
426
427 /*
428  * Check if the bootfs name has the same pool name as it is set to.
429  * Assuming bootfs is a valid dataset name.
430  */
431 static boolean_t
432 bootfs_name_valid(const char *pool, const char *bootfs)
433 {
434         int len = strlen(pool);
435         if (bootfs[0] == '\0')
436                 return (B_TRUE);
437
438         if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
439                 return (B_FALSE);
440
441         if (strncmp(pool, bootfs, len) == 0 &&
442             (bootfs[len] == '/' || bootfs[len] == '\0'))
443                 return (B_TRUE);
444
445         return (B_FALSE);
446 }
447
448 /*
449  * Given an nvlist of zpool properties to be set, validate that they are
450  * correct, and parse any numeric properties (index, boolean, etc) if they are
451  * specified as strings.
452  */
453 static nvlist_t *
454 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
455     nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
456 {
457         nvpair_t *elem;
458         nvlist_t *retprops;
459         zpool_prop_t prop;
460         char *strval;
461         uint64_t intval;
462         char *slash, *check;
463         struct stat64 statbuf;
464         zpool_handle_t *zhp;
465
466         if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
467                 (void) no_memory(hdl);
468                 return (NULL);
469         }
470
471         elem = NULL;
472         while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
473                 const char *propname = nvpair_name(elem);
474
475                 prop = zpool_name_to_prop(propname);
476                 if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
477                         int err;
478                         char *fname = strchr(propname, '@') + 1;
479
480                         err = zfeature_lookup_name(fname, NULL);
481                         if (err != 0) {
482                                 ASSERT3U(err, ==, ENOENT);
483                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
484                                     "invalid feature '%s'"), fname);
485                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
486                                 goto error;
487                         }
488
489                         if (nvpair_type(elem) != DATA_TYPE_STRING) {
490                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
491                                     "'%s' must be a string"), propname);
492                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
493                                 goto error;
494                         }
495
496                         (void) nvpair_value_string(elem, &strval);
497                         if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
498                             strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
499                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
500                                     "property '%s' can only be set to "
501                                     "'enabled' or 'disabled'"), propname);
502                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
503                                 goto error;
504                         }
505
506                         if (!flags.create &&
507                             strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
508                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
509                                     "property '%s' can only be set to "
510                                     "'disabled' at creation time"), propname);
511                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
512                                 goto error;
513                         }
514
515                         if (nvlist_add_uint64(retprops, propname, 0) != 0) {
516                                 (void) no_memory(hdl);
517                                 goto error;
518                         }
519                         continue;
520                 }
521
522                 /*
523                  * Make sure this property is valid and applies to this type.
524                  */
525                 if (prop == ZPOOL_PROP_INVAL) {
526                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
527                             "invalid property '%s'"), propname);
528                         (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
529                         goto error;
530                 }
531
532                 if (zpool_prop_readonly(prop)) {
533                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
534                             "is readonly"), propname);
535                         (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
536                         goto error;
537                 }
538
539                 if (!flags.create && zpool_prop_setonce(prop)) {
540                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
541                             "property '%s' can only be set at "
542                             "creation time"), propname);
543                         (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
544                         goto error;
545                 }
546
547                 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
548                     &strval, &intval, errbuf) != 0)
549                         goto error;
550
551                 /*
552                  * Perform additional checking for specific properties.
553                  */
554                 switch (prop) {
555                 case ZPOOL_PROP_VERSION:
556                         if (intval < version ||
557                             !SPA_VERSION_IS_SUPPORTED(intval)) {
558                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559                                     "property '%s' number %d is invalid."),
560                                     propname, intval);
561                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
562                                 goto error;
563                         }
564                         break;
565
566                 case ZPOOL_PROP_ASHIFT:
567                         if (intval != 0 &&
568                             (intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
569                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
570                                     "property '%s' number %d is invalid, only "
571                                     "values between %" PRId32 " and "
572                                     "%" PRId32 " are allowed."),
573                                     propname, intval, ASHIFT_MIN, ASHIFT_MAX);
574                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
575                                 goto error;
576                         }
577                         break;
578
579                 case ZPOOL_PROP_BOOTFS:
580                         if (flags.create || flags.import) {
581                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
582                                     "property '%s' cannot be set at creation "
583                                     "or import time"), propname);
584                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
585                                 goto error;
586                         }
587
588                         if (version < SPA_VERSION_BOOTFS) {
589                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590                                     "pool must be upgraded to support "
591                                     "'%s' property"), propname);
592                                 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
593                                 goto error;
594                         }
595
596                         /*
597                          * bootfs property value has to be a dataset name and
598                          * the dataset has to be in the same pool as it sets to.
599                          */
600                         if (!bootfs_name_valid(poolname, strval)) {
601                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
602                                     "is an invalid name"), strval);
603                                 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
604                                 goto error;
605                         }
606
607                         if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
608                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
609                                     "could not open pool '%s'"), poolname);
610                                 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
611                                 goto error;
612                         }
613                         zpool_close(zhp);
614                         break;
615
616                 case ZPOOL_PROP_ALTROOT:
617                         if (!flags.create && !flags.import) {
618                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
619                                     "property '%s' can only be set during pool "
620                                     "creation or import"), propname);
621                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
622                                 goto error;
623                         }
624
625                         if (strval[0] != '/') {
626                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
627                                     "bad alternate root '%s'"), strval);
628                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
629                                 goto error;
630                         }
631                         break;
632
633                 case ZPOOL_PROP_CACHEFILE:
634                         if (strval[0] == '\0')
635                                 break;
636
637                         if (strcmp(strval, "none") == 0)
638                                 break;
639
640                         if (strval[0] != '/') {
641                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
642                                     "property '%s' must be empty, an "
643                                     "absolute path, or 'none'"), propname);
644                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
645                                 goto error;
646                         }
647
648                         slash = strrchr(strval, '/');
649
650                         if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
651                             strcmp(slash, "/..") == 0) {
652                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
653                                     "'%s' is not a valid file"), strval);
654                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
655                                 goto error;
656                         }
657
658                         *slash = '\0';
659
660                         if (strval[0] != '\0' &&
661                             (stat64(strval, &statbuf) != 0 ||
662                             !S_ISDIR(statbuf.st_mode))) {
663                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
664                                     "'%s' is not a valid directory"),
665                                     strval);
666                                 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
667                                 goto error;
668                         }
669
670                         *slash = '/';
671                         break;
672
673                 case ZPOOL_PROP_COMMENT:
674                         for (check = strval; *check != '\0'; check++) {
675                                 if (!isprint(*check)) {
676                                         zfs_error_aux(hdl,
677                                             dgettext(TEXT_DOMAIN,
678                                             "comment may only have printable "
679                                             "characters"));
680                                         (void) zfs_error(hdl, EZFS_BADPROP,
681                                             errbuf);
682                                         goto error;
683                                 }
684                         }
685                         if (strlen(strval) > ZPROP_MAX_COMMENT) {
686                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
687                                     "comment must not exceed %d characters"),
688                                     ZPROP_MAX_COMMENT);
689                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
690                                 goto error;
691                         }
692                         break;
693                 case ZPOOL_PROP_READONLY:
694                         if (!flags.import) {
695                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
696                                     "property '%s' can only be set at "
697                                     "import time"), propname);
698                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
699                                 goto error;
700                         }
701                         break;
702                 case ZPOOL_PROP_MULTIHOST:
703                         if (get_system_hostid() == 0) {
704                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
705                                     "requires a non-zero system hostid"));
706                                 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
707                                 goto error;
708                         }
709                         break;
710                 case ZPOOL_PROP_DEDUPDITTO:
711                         printf("Note: property '%s' no longer has "
712                             "any effect\n", propname);
713                         break;
714
715                 default:
716                         break;
717                 }
718         }
719
720         return (retprops);
721 error:
722         nvlist_free(retprops);
723         return (NULL);
724 }
725
726 /*
727  * Set zpool property : propname=propval.
728  */
729 int
730 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
731 {
732         zfs_cmd_t zc = {"\0"};
733         int ret = -1;
734         char errbuf[1024];
735         nvlist_t *nvl = NULL;
736         nvlist_t *realprops;
737         uint64_t version;
738         prop_flags_t flags = { 0 };
739
740         (void) snprintf(errbuf, sizeof (errbuf),
741             dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
742             zhp->zpool_name);
743
744         if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
745                 return (no_memory(zhp->zpool_hdl));
746
747         if (nvlist_add_string(nvl, propname, propval) != 0) {
748                 nvlist_free(nvl);
749                 return (no_memory(zhp->zpool_hdl));
750         }
751
752         version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
753         if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
754             zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
755                 nvlist_free(nvl);
756                 return (-1);
757         }
758
759         nvlist_free(nvl);
760         nvl = realprops;
761
762         /*
763          * Execute the corresponding ioctl() to set this property.
764          */
765         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
766
767         if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
768                 nvlist_free(nvl);
769                 return (-1);
770         }
771
772         ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
773
774         zcmd_free_nvlists(&zc);
775         nvlist_free(nvl);
776
777         if (ret)
778                 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
779         else
780                 (void) zpool_props_refresh(zhp);
781
782         return (ret);
783 }
784
785 int
786 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
787 {
788         libzfs_handle_t *hdl = zhp->zpool_hdl;
789         zprop_list_t *entry;
790         char buf[ZFS_MAXPROPLEN];
791         nvlist_t *features = NULL;
792         nvpair_t *nvp;
793         zprop_list_t **last;
794         boolean_t firstexpand = (NULL == *plp);
795         int i;
796
797         if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
798                 return (-1);
799
800         last = plp;
801         while (*last != NULL)
802                 last = &(*last)->pl_next;
803
804         if ((*plp)->pl_all)
805                 features = zpool_get_features(zhp);
806
807         if ((*plp)->pl_all && firstexpand) {
808                 for (i = 0; i < SPA_FEATURES; i++) {
809                         zprop_list_t *entry = zfs_alloc(hdl,
810                             sizeof (zprop_list_t));
811                         entry->pl_prop = ZPROP_INVAL;
812                         entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
813                             spa_feature_table[i].fi_uname);
814                         entry->pl_width = strlen(entry->pl_user_prop);
815                         entry->pl_all = B_TRUE;
816
817                         *last = entry;
818                         last = &entry->pl_next;
819                 }
820         }
821
822         /* add any unsupported features */
823         for (nvp = nvlist_next_nvpair(features, NULL);
824             nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
825                 char *propname;
826                 boolean_t found;
827                 zprop_list_t *entry;
828
829                 if (zfeature_is_supported(nvpair_name(nvp)))
830                         continue;
831
832                 propname = zfs_asprintf(hdl, "unsupported@%s",
833                     nvpair_name(nvp));
834
835                 /*
836                  * Before adding the property to the list make sure that no
837                  * other pool already added the same property.
838                  */
839                 found = B_FALSE;
840                 entry = *plp;
841                 while (entry != NULL) {
842                         if (entry->pl_user_prop != NULL &&
843                             strcmp(propname, entry->pl_user_prop) == 0) {
844                                 found = B_TRUE;
845                                 break;
846                         }
847                         entry = entry->pl_next;
848                 }
849                 if (found) {
850                         free(propname);
851                         continue;
852                 }
853
854                 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
855                 entry->pl_prop = ZPROP_INVAL;
856                 entry->pl_user_prop = propname;
857                 entry->pl_width = strlen(entry->pl_user_prop);
858                 entry->pl_all = B_TRUE;
859
860                 *last = entry;
861                 last = &entry->pl_next;
862         }
863
864         for (entry = *plp; entry != NULL; entry = entry->pl_next) {
865
866                 if (entry->pl_fixed)
867                         continue;
868
869                 if (entry->pl_prop != ZPROP_INVAL &&
870                     zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
871                     NULL, B_FALSE) == 0) {
872                         if (strlen(buf) > entry->pl_width)
873                                 entry->pl_width = strlen(buf);
874                 }
875         }
876
877         return (0);
878 }
879
880 /*
881  * Get the state for the given feature on the given ZFS pool.
882  */
883 int
884 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
885     size_t len)
886 {
887         uint64_t refcount;
888         boolean_t found = B_FALSE;
889         nvlist_t *features = zpool_get_features(zhp);
890         boolean_t supported;
891         const char *feature = strchr(propname, '@') + 1;
892
893         supported = zpool_prop_feature(propname);
894         ASSERT(supported || zpool_prop_unsupported(propname));
895
896         /*
897          * Convert from feature name to feature guid. This conversion is
898          * unnecessary for unsupported@... properties because they already
899          * use guids.
900          */
901         if (supported) {
902                 int ret;
903                 spa_feature_t fid;
904
905                 ret = zfeature_lookup_name(feature, &fid);
906                 if (ret != 0) {
907                         (void) strlcpy(buf, "-", len);
908                         return (ENOTSUP);
909                 }
910                 feature = spa_feature_table[fid].fi_guid;
911         }
912
913         if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
914                 found = B_TRUE;
915
916         if (supported) {
917                 if (!found) {
918                         (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
919                 } else  {
920                         if (refcount == 0)
921                                 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
922                         else
923                                 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
924                 }
925         } else {
926                 if (found) {
927                         if (refcount == 0) {
928                                 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
929                         } else {
930                                 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
931                         }
932                 } else {
933                         (void) strlcpy(buf, "-", len);
934                         return (ENOTSUP);
935                 }
936         }
937
938         return (0);
939 }
940
941 /*
942  * Validate the given pool name, optionally putting an extended error message in
943  * 'buf'.
944  */
945 boolean_t
946 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
947 {
948         namecheck_err_t why;
949         char what;
950         int ret;
951
952         ret = pool_namecheck(pool, &why, &what);
953
954         /*
955          * The rules for reserved pool names were extended at a later point.
956          * But we need to support users with existing pools that may now be
957          * invalid.  So we only check for this expanded set of names during a
958          * create (or import), and only in userland.
959          */
960         if (ret == 0 && !isopen &&
961             (strncmp(pool, "mirror", 6) == 0 ||
962             strncmp(pool, "raidz", 5) == 0 ||
963             strncmp(pool, "spare", 5) == 0 ||
964             strcmp(pool, "log") == 0)) {
965                 if (hdl != NULL)
966                         zfs_error_aux(hdl,
967                             dgettext(TEXT_DOMAIN, "name is reserved"));
968                 return (B_FALSE);
969         }
970
971
972         if (ret != 0) {
973                 if (hdl != NULL) {
974                         switch (why) {
975                         case NAME_ERR_TOOLONG:
976                                 zfs_error_aux(hdl,
977                                     dgettext(TEXT_DOMAIN, "name is too long"));
978                                 break;
979
980                         case NAME_ERR_INVALCHAR:
981                                 zfs_error_aux(hdl,
982                                     dgettext(TEXT_DOMAIN, "invalid character "
983                                     "'%c' in pool name"), what);
984                                 break;
985
986                         case NAME_ERR_NOLETTER:
987                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
988                                     "name must begin with a letter"));
989                                 break;
990
991                         case NAME_ERR_RESERVED:
992                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
993                                     "name is reserved"));
994                                 break;
995
996                         case NAME_ERR_DISKLIKE:
997                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
998                                     "pool name is reserved"));
999                                 break;
1000
1001                         case NAME_ERR_LEADING_SLASH:
1002                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1003                                     "leading slash in name"));
1004                                 break;
1005
1006                         case NAME_ERR_EMPTY_COMPONENT:
1007                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1008                                     "empty component in name"));
1009                                 break;
1010
1011                         case NAME_ERR_TRAILING_SLASH:
1012                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1013                                     "trailing slash in name"));
1014                                 break;
1015
1016                         case NAME_ERR_MULTIPLE_DELIMITERS:
1017                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1018                                     "multiple '@' and/or '#' delimiters in "
1019                                     "name"));
1020                                 break;
1021
1022                         case NAME_ERR_NO_AT:
1023                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1024                                     "permission set is missing '@'"));
1025                                 break;
1026
1027                         default:
1028                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1029                                     "(%d) not defined"), why);
1030                                 break;
1031                         }
1032                 }
1033                 return (B_FALSE);
1034         }
1035
1036         return (B_TRUE);
1037 }
1038
1039 /*
1040  * Open a handle to the given pool, even if the pool is currently in the FAULTED
1041  * state.
1042  */
1043 zpool_handle_t *
1044 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1045 {
1046         zpool_handle_t *zhp;
1047         boolean_t missing;
1048
1049         /*
1050          * Make sure the pool name is valid.
1051          */
1052         if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1053                 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1054                     dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1055                     pool);
1056                 return (NULL);
1057         }
1058
1059         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1060                 return (NULL);
1061
1062         zhp->zpool_hdl = hdl;
1063         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1064
1065         if (zpool_refresh_stats(zhp, &missing) != 0) {
1066                 zpool_close(zhp);
1067                 return (NULL);
1068         }
1069
1070         if (missing) {
1071                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1072                 (void) zfs_error_fmt(hdl, EZFS_NOENT,
1073                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1074                 zpool_close(zhp);
1075                 return (NULL);
1076         }
1077
1078         return (zhp);
1079 }
1080
1081 /*
1082  * Like the above, but silent on error.  Used when iterating over pools (because
1083  * the configuration cache may be out of date).
1084  */
1085 int
1086 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1087 {
1088         zpool_handle_t *zhp;
1089         boolean_t missing;
1090
1091         if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1092                 return (-1);
1093
1094         zhp->zpool_hdl = hdl;
1095         (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1096
1097         if (zpool_refresh_stats(zhp, &missing) != 0) {
1098                 zpool_close(zhp);
1099                 return (-1);
1100         }
1101
1102         if (missing) {
1103                 zpool_close(zhp);
1104                 *ret = NULL;
1105                 return (0);
1106         }
1107
1108         *ret = zhp;
1109         return (0);
1110 }
1111
1112 /*
1113  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1114  * state.
1115  */
1116 zpool_handle_t *
1117 zpool_open(libzfs_handle_t *hdl, const char *pool)
1118 {
1119         zpool_handle_t *zhp;
1120
1121         if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1122                 return (NULL);
1123
1124         if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1125                 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1126                     dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1127                 zpool_close(zhp);
1128                 return (NULL);
1129         }
1130
1131         return (zhp);
1132 }
1133
1134 /*
1135  * Close the handle.  Simply frees the memory associated with the handle.
1136  */
1137 void
1138 zpool_close(zpool_handle_t *zhp)
1139 {
1140         nvlist_free(zhp->zpool_config);
1141         nvlist_free(zhp->zpool_old_config);
1142         nvlist_free(zhp->zpool_props);
1143         free(zhp);
1144 }
1145
1146 /*
1147  * Return the name of the pool.
1148  */
1149 const char *
1150 zpool_get_name(zpool_handle_t *zhp)
1151 {
1152         return (zhp->zpool_name);
1153 }
1154
1155
1156 /*
1157  * Return the state of the pool (ACTIVE or UNAVAILABLE)
1158  */
1159 int
1160 zpool_get_state(zpool_handle_t *zhp)
1161 {
1162         return (zhp->zpool_state);
1163 }
1164
1165 /*
1166  * Check if vdev list contains a special vdev
1167  */
1168 static boolean_t
1169 zpool_has_special_vdev(nvlist_t *nvroot)
1170 {
1171         nvlist_t **child;
1172         uint_t children;
1173
1174         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
1175             &children) == 0) {
1176                 for (uint_t c = 0; c < children; c++) {
1177                         char *bias;
1178
1179                         if (nvlist_lookup_string(child[c],
1180                             ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
1181                             strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
1182                                 return (B_TRUE);
1183                         }
1184                 }
1185         }
1186         return (B_FALSE);
1187 }
1188
1189 /*
1190  * Create the named pool, using the provided vdev list.  It is assumed
1191  * that the consumer has already validated the contents of the nvlist, so we
1192  * don't have to worry about error semantics.
1193  */
1194 int
1195 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1196     nvlist_t *props, nvlist_t *fsprops)
1197 {
1198         zfs_cmd_t zc = {"\0"};
1199         nvlist_t *zc_fsprops = NULL;
1200         nvlist_t *zc_props = NULL;
1201         nvlist_t *hidden_args = NULL;
1202         uint8_t *wkeydata = NULL;
1203         uint_t wkeylen = 0;
1204         char msg[1024];
1205         int ret = -1;
1206
1207         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1208             "cannot create '%s'"), pool);
1209
1210         if (!zpool_name_valid(hdl, B_FALSE, pool))
1211                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1212
1213         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1214                 return (-1);
1215
1216         if (props) {
1217                 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1218
1219                 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1220                     SPA_VERSION_1, flags, msg)) == NULL) {
1221                         goto create_failed;
1222                 }
1223         }
1224
1225         if (fsprops) {
1226                 uint64_t zoned;
1227                 char *zonestr;
1228
1229                 zoned = ((nvlist_lookup_string(fsprops,
1230                     zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1231                     strcmp(zonestr, "on") == 0);
1232
1233                 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1234                     fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
1235                         goto create_failed;
1236                 }
1237
1238                 if (nvlist_exists(zc_fsprops,
1239                     zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
1240                     !zpool_has_special_vdev(nvroot)) {
1241                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1242                             "%s property requires a special vdev"),
1243                             zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
1244                         (void) zfs_error(hdl, EZFS_BADPROP, msg);
1245                         goto create_failed;
1246                 }
1247
1248                 if (!zc_props &&
1249                     (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1250                         goto create_failed;
1251                 }
1252                 if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
1253                     &wkeydata, &wkeylen) != 0) {
1254                         zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
1255                         goto create_failed;
1256                 }
1257                 if (nvlist_add_nvlist(zc_props,
1258                     ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1259                         goto create_failed;
1260                 }
1261                 if (wkeydata != NULL) {
1262                         if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
1263                                 goto create_failed;
1264
1265                         if (nvlist_add_uint8_array(hidden_args, "wkeydata",
1266                             wkeydata, wkeylen) != 0)
1267                                 goto create_failed;
1268
1269                         if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
1270                             hidden_args) != 0)
1271                                 goto create_failed;
1272                 }
1273         }
1274
1275         if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1276                 goto create_failed;
1277
1278         (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1279
1280         if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1281
1282                 zcmd_free_nvlists(&zc);
1283                 nvlist_free(zc_props);
1284                 nvlist_free(zc_fsprops);
1285                 nvlist_free(hidden_args);
1286                 if (wkeydata != NULL)
1287                         free(wkeydata);
1288
1289                 switch (errno) {
1290                 case EBUSY:
1291                         /*
1292                          * This can happen if the user has specified the same
1293                          * device multiple times.  We can't reliably detect this
1294                          * until we try to add it and see we already have a
1295                          * label.  This can also happen under if the device is
1296                          * part of an active md or lvm device.
1297                          */
1298                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1299                             "one or more vdevs refer to the same device, or "
1300                             "one of\nthe devices is part of an active md or "
1301                             "lvm device"));
1302                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1303
1304                 case ERANGE:
1305                         /*
1306                          * This happens if the record size is smaller or larger
1307                          * than the allowed size range, or not a power of 2.
1308                          *
1309                          * NOTE: although zfs_valid_proplist is called earlier,
1310                          * this case may have slipped through since the
1311                          * pool does not exist yet and it is therefore
1312                          * impossible to read properties e.g. max blocksize
1313                          * from the pool.
1314                          */
1315                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1316                             "record size invalid"));
1317                         return (zfs_error(hdl, EZFS_BADPROP, msg));
1318
1319                 case EOVERFLOW:
1320                         /*
1321                          * This occurs when one of the devices is below
1322                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1323                          * device was the problem device since there's no
1324                          * reliable way to determine device size from userland.
1325                          */
1326                         {
1327                                 char buf[64];
1328
1329                                 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1330                                     sizeof (buf));
1331
1332                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1333                                     "one or more devices is less than the "
1334                                     "minimum size (%s)"), buf);
1335                         }
1336                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1337
1338                 case ENOSPC:
1339                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1340                             "one or more devices is out of space"));
1341                         return (zfs_error(hdl, EZFS_BADDEV, msg));
1342
1343                 default:
1344                         return (zpool_standard_error(hdl, errno, msg));
1345                 }
1346         }
1347
1348 create_failed:
1349         zcmd_free_nvlists(&zc);
1350         nvlist_free(zc_props);
1351         nvlist_free(zc_fsprops);
1352         nvlist_free(hidden_args);
1353         if (wkeydata != NULL)
1354                 free(wkeydata);
1355         return (ret);
1356 }
1357
1358 /*
1359  * Destroy the given pool.  It is up to the caller to ensure that there are no
1360  * datasets left in the pool.
1361  */
1362 int
1363 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1364 {
1365         zfs_cmd_t zc = {"\0"};
1366         zfs_handle_t *zfp = NULL;
1367         libzfs_handle_t *hdl = zhp->zpool_hdl;
1368         char msg[1024];
1369
1370         if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1371             (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1372                 return (-1);
1373
1374         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1375         zc.zc_history = (uint64_t)(uintptr_t)log_str;
1376
1377         if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1378                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1379                     "cannot destroy '%s'"), zhp->zpool_name);
1380
1381                 if (errno == EROFS) {
1382                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1383                             "one or more devices is read only"));
1384                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1385                 } else {
1386                         (void) zpool_standard_error(hdl, errno, msg);
1387                 }
1388
1389                 if (zfp)
1390                         zfs_close(zfp);
1391                 return (-1);
1392         }
1393
1394         if (zfp) {
1395                 remove_mountpoint(zfp);
1396                 zfs_close(zfp);
1397         }
1398
1399         return (0);
1400 }
1401
1402 /*
1403  * Create a checkpoint in the given pool.
1404  */
1405 int
1406 zpool_checkpoint(zpool_handle_t *zhp)
1407 {
1408         libzfs_handle_t *hdl = zhp->zpool_hdl;
1409         char msg[1024];
1410         int error;
1411
1412         error = lzc_pool_checkpoint(zhp->zpool_name);
1413         if (error != 0) {
1414                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1415                     "cannot checkpoint '%s'"), zhp->zpool_name);
1416                 (void) zpool_standard_error(hdl, error, msg);
1417                 return (-1);
1418         }
1419
1420         return (0);
1421 }
1422
1423 /*
1424  * Discard the checkpoint from the given pool.
1425  */
1426 int
1427 zpool_discard_checkpoint(zpool_handle_t *zhp)
1428 {
1429         libzfs_handle_t *hdl = zhp->zpool_hdl;
1430         char msg[1024];
1431         int error;
1432
1433         error = lzc_pool_checkpoint_discard(zhp->zpool_name);
1434         if (error != 0) {
1435                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1436                     "cannot discard checkpoint in '%s'"), zhp->zpool_name);
1437                 (void) zpool_standard_error(hdl, error, msg);
1438                 return (-1);
1439         }
1440
1441         return (0);
1442 }
1443
1444 /*
1445  * Add the given vdevs to the pool.  The caller must have already performed the
1446  * necessary verification to ensure that the vdev specification is well-formed.
1447  */
1448 int
1449 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1450 {
1451         zfs_cmd_t zc = {"\0"};
1452         int ret;
1453         libzfs_handle_t *hdl = zhp->zpool_hdl;
1454         char msg[1024];
1455         nvlist_t **spares, **l2cache;
1456         uint_t nspares, nl2cache;
1457
1458         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1459             "cannot add to '%s'"), zhp->zpool_name);
1460
1461         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1462             SPA_VERSION_SPARES &&
1463             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1464             &spares, &nspares) == 0) {
1465                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1466                     "upgraded to add hot spares"));
1467                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1468         }
1469
1470         if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1471             SPA_VERSION_L2CACHE &&
1472             nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1473             &l2cache, &nl2cache) == 0) {
1474                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1475                     "upgraded to add cache devices"));
1476                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1477         }
1478
1479         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1480                 return (-1);
1481         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1482
1483         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1484                 switch (errno) {
1485                 case EBUSY:
1486                         /*
1487                          * This can happen if the user has specified the same
1488                          * device multiple times.  We can't reliably detect this
1489                          * until we try to add it and see we already have a
1490                          * label.
1491                          */
1492                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1493                             "one or more vdevs refer to the same device"));
1494                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1495                         break;
1496
1497                 case EINVAL:
1498                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1499                             "invalid config; a pool with removing/removed "
1500                             "vdevs does not support adding raidz vdevs"));
1501                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1502                         break;
1503
1504                 case EOVERFLOW:
1505                         /*
1506                          * This occurs when one of the devices is below
1507                          * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1508                          * device was the problem device since there's no
1509                          * reliable way to determine device size from userland.
1510                          */
1511                         {
1512                                 char buf[64];
1513
1514                                 zfs_nicebytes(SPA_MINDEVSIZE, buf,
1515                                     sizeof (buf));
1516
1517                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1518                                     "device is less than the minimum "
1519                                     "size (%s)"), buf);
1520                         }
1521                         (void) zfs_error(hdl, EZFS_BADDEV, msg);
1522                         break;
1523
1524                 case ENOTSUP:
1525                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1526                             "pool must be upgraded to add these vdevs"));
1527                         (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1528                         break;
1529
1530                 default:
1531                         (void) zpool_standard_error(hdl, errno, msg);
1532                 }
1533
1534                 ret = -1;
1535         } else {
1536                 ret = 0;
1537         }
1538
1539         zcmd_free_nvlists(&zc);
1540
1541         return (ret);
1542 }
1543
1544 /*
1545  * Exports the pool from the system.  The caller must ensure that there are no
1546  * mounted datasets in the pool.
1547  */
1548 static int
1549 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1550     const char *log_str)
1551 {
1552         zfs_cmd_t zc = {"\0"};
1553         char msg[1024];
1554
1555         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1556             "cannot export '%s'"), zhp->zpool_name);
1557
1558         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1559         zc.zc_cookie = force;
1560         zc.zc_guid = hardforce;
1561         zc.zc_history = (uint64_t)(uintptr_t)log_str;
1562
1563         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1564                 switch (errno) {
1565                 case EXDEV:
1566                         zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1567                             "use '-f' to override the following errors:\n"
1568                             "'%s' has an active shared spare which could be"
1569                             " used by other pools once '%s' is exported."),
1570                             zhp->zpool_name, zhp->zpool_name);
1571                         return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1572                             msg));
1573                 default:
1574                         return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1575                             msg));
1576                 }
1577         }
1578
1579         return (0);
1580 }
1581
1582 int
1583 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1584 {
1585         return (zpool_export_common(zhp, force, B_FALSE, log_str));
1586 }
1587
1588 int
1589 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1590 {
1591         return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1592 }
1593
1594 static void
1595 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1596     nvlist_t *config)
1597 {
1598         nvlist_t *nv = NULL;
1599         uint64_t rewindto;
1600         int64_t loss = -1;
1601         struct tm t;
1602         char timestr[128];
1603
1604         if (!hdl->libzfs_printerr || config == NULL)
1605                 return;
1606
1607         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1608             nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1609                 return;
1610         }
1611
1612         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1613                 return;
1614         (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1615
1616         if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1617             strftime(timestr, 128, "%c", &t) != 0) {
1618                 if (dryrun) {
1619                         (void) printf(dgettext(TEXT_DOMAIN,
1620                             "Would be able to return %s "
1621                             "to its state as of %s.\n"),
1622                             name, timestr);
1623                 } else {
1624                         (void) printf(dgettext(TEXT_DOMAIN,
1625                             "Pool %s returned to its state as of %s.\n"),
1626                             name, timestr);
1627                 }
1628                 if (loss > 120) {
1629                         (void) printf(dgettext(TEXT_DOMAIN,
1630                             "%s approximately %lld "),
1631                             dryrun ? "Would discard" : "Discarded",
1632                             ((longlong_t)loss + 30) / 60);
1633                         (void) printf(dgettext(TEXT_DOMAIN,
1634                             "minutes of transactions.\n"));
1635                 } else if (loss > 0) {
1636                         (void) printf(dgettext(TEXT_DOMAIN,
1637                             "%s approximately %lld "),
1638                             dryrun ? "Would discard" : "Discarded",
1639                             (longlong_t)loss);
1640                         (void) printf(dgettext(TEXT_DOMAIN,
1641                             "seconds of transactions.\n"));
1642                 }
1643         }
1644 }
1645
1646 void
1647 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1648     nvlist_t *config)
1649 {
1650         nvlist_t *nv = NULL;
1651         int64_t loss = -1;
1652         uint64_t edata = UINT64_MAX;
1653         uint64_t rewindto;
1654         struct tm t;
1655         char timestr[128];
1656
1657         if (!hdl->libzfs_printerr)
1658                 return;
1659
1660         if (reason >= 0)
1661                 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1662         else
1663                 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1664
1665         /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1666         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1667             nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1668             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1669                 goto no_info;
1670
1671         (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1672         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1673             &edata);
1674
1675         (void) printf(dgettext(TEXT_DOMAIN,
1676             "Recovery is possible, but will result in some data loss.\n"));
1677
1678         if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1679             strftime(timestr, 128, "%c", &t) != 0) {
1680                 (void) printf(dgettext(TEXT_DOMAIN,
1681                     "\tReturning the pool to its state as of %s\n"
1682                     "\tshould correct the problem.  "),
1683                     timestr);
1684         } else {
1685                 (void) printf(dgettext(TEXT_DOMAIN,
1686                     "\tReverting the pool to an earlier state "
1687                     "should correct the problem.\n\t"));
1688         }
1689
1690         if (loss > 120) {
1691                 (void) printf(dgettext(TEXT_DOMAIN,
1692                     "Approximately %lld minutes of data\n"
1693                     "\tmust be discarded, irreversibly.  "),
1694                     ((longlong_t)loss + 30) / 60);
1695         } else if (loss > 0) {
1696                 (void) printf(dgettext(TEXT_DOMAIN,
1697                     "Approximately %lld seconds of data\n"
1698                     "\tmust be discarded, irreversibly.  "),
1699                     (longlong_t)loss);
1700         }
1701         if (edata != 0 && edata != UINT64_MAX) {
1702                 if (edata == 1) {
1703                         (void) printf(dgettext(TEXT_DOMAIN,
1704                             "After rewind, at least\n"
1705                             "\tone persistent user-data error will remain.  "));
1706                 } else {
1707                         (void) printf(dgettext(TEXT_DOMAIN,
1708                             "After rewind, several\n"
1709                             "\tpersistent user-data errors will remain.  "));
1710                 }
1711         }
1712         (void) printf(dgettext(TEXT_DOMAIN,
1713             "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1714             reason >= 0 ? "clear" : "import", name);
1715
1716         (void) printf(dgettext(TEXT_DOMAIN,
1717             "A scrub of the pool\n"
1718             "\tis strongly recommended after recovery.\n"));
1719         return;
1720
1721 no_info:
1722         (void) printf(dgettext(TEXT_DOMAIN,
1723             "Destroy and re-create the pool from\n\ta backup source.\n"));
1724 }
1725
1726 /*
1727  * zpool_import() is a contracted interface. Should be kept the same
1728  * if possible.
1729  *
1730  * Applications should use zpool_import_props() to import a pool with
1731  * new properties value to be set.
1732  */
1733 int
1734 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1735     char *altroot)
1736 {
1737         nvlist_t *props = NULL;
1738         int ret;
1739
1740         if (altroot != NULL) {
1741                 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1742                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1743                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1744                             newname));
1745                 }
1746
1747                 if (nvlist_add_string(props,
1748                     zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1749                     nvlist_add_string(props,
1750                     zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1751                         nvlist_free(props);
1752                         return (zfs_error_fmt(hdl, EZFS_NOMEM,
1753                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1754                             newname));
1755                 }
1756         }
1757
1758         ret = zpool_import_props(hdl, config, newname, props,
1759             ZFS_IMPORT_NORMAL);
1760         nvlist_free(props);
1761         return (ret);
1762 }
1763
1764 static void
1765 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1766     int indent)
1767 {
1768         nvlist_t **child;
1769         uint_t c, children;
1770         char *vname;
1771         uint64_t is_log = 0;
1772
1773         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1774             &is_log);
1775
1776         if (name != NULL)
1777                 (void) printf("\t%*s%s%s\n", indent, "", name,
1778                     is_log ? " [log]" : "");
1779
1780         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1781             &child, &children) != 0)
1782                 return;
1783
1784         for (c = 0; c < children; c++) {
1785                 vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
1786                 print_vdev_tree(hdl, vname, child[c], indent + 2);
1787                 free(vname);
1788         }
1789 }
1790
1791 void
1792 zpool_print_unsup_feat(nvlist_t *config)
1793 {
1794         nvlist_t *nvinfo, *unsup_feat;
1795         nvpair_t *nvp;
1796
1797         verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1798             0);
1799         verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1800             &unsup_feat) == 0);
1801
1802         for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1803             nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1804                 char *desc;
1805
1806                 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1807                 verify(nvpair_value_string(nvp, &desc) == 0);
1808
1809                 if (strlen(desc) > 0)
1810                         (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1811                 else
1812                         (void) printf("\t%s\n", nvpair_name(nvp));
1813         }
1814 }
1815
1816 /*
1817  * Import the given pool using the known configuration and a list of
1818  * properties to be set. The configuration should have come from
1819  * zpool_find_import(). The 'newname' parameters control whether the pool
1820  * is imported with a different name.
1821  */
1822 int
1823 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1824     nvlist_t *props, int flags)
1825 {
1826         zfs_cmd_t zc = {"\0"};
1827         zpool_load_policy_t policy;
1828         nvlist_t *nv = NULL;
1829         nvlist_t *nvinfo = NULL;
1830         nvlist_t *missing = NULL;
1831         char *thename;
1832         char *origname;
1833         int ret;
1834         int error = 0;
1835         char errbuf[1024];
1836
1837         verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1838             &origname) == 0);
1839
1840         (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1841             "cannot import pool '%s'"), origname);
1842
1843         if (newname != NULL) {
1844                 if (!zpool_name_valid(hdl, B_FALSE, newname))
1845                         return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1846                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1847                             newname));
1848                 thename = (char *)newname;
1849         } else {
1850                 thename = origname;
1851         }
1852
1853         if (props != NULL) {
1854                 uint64_t version;
1855                 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1856
1857                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1858                     &version) == 0);
1859
1860                 if ((props = zpool_valid_proplist(hdl, origname,
1861                     props, version, flags, errbuf)) == NULL)
1862                         return (-1);
1863                 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1864                         nvlist_free(props);
1865                         return (-1);
1866                 }
1867                 nvlist_free(props);
1868         }
1869
1870         (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1871
1872         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1873             &zc.zc_guid) == 0);
1874
1875         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1876                 zcmd_free_nvlists(&zc);
1877                 return (-1);
1878         }
1879         if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1880                 zcmd_free_nvlists(&zc);
1881                 return (-1);
1882         }
1883
1884         zc.zc_cookie = flags;
1885         while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1886             errno == ENOMEM) {
1887                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1888                         zcmd_free_nvlists(&zc);
1889                         return (-1);
1890                 }
1891         }
1892         if (ret != 0)
1893                 error = errno;
1894
1895         (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1896
1897         zcmd_free_nvlists(&zc);
1898
1899         zpool_get_load_policy(config, &policy);
1900
1901         if (error) {
1902                 char desc[1024];
1903                 char aux[256];
1904
1905                 /*
1906                  * Dry-run failed, but we print out what success
1907                  * looks like if we found a best txg
1908                  */
1909                 if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
1910                         zpool_rewind_exclaim(hdl, newname ? origname : thename,
1911                             B_TRUE, nv);
1912                         nvlist_free(nv);
1913                         return (-1);
1914                 }
1915
1916                 if (newname == NULL)
1917                         (void) snprintf(desc, sizeof (desc),
1918                             dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1919                             thename);
1920                 else
1921                         (void) snprintf(desc, sizeof (desc),
1922                             dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1923                             origname, thename);
1924
1925                 switch (error) {
1926                 case ENOTSUP:
1927                         if (nv != NULL && nvlist_lookup_nvlist(nv,
1928                             ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1929                             nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1930                                 (void) printf(dgettext(TEXT_DOMAIN, "This "
1931                                     "pool uses the following feature(s) not "
1932                                     "supported by this system:\n"));
1933                                 zpool_print_unsup_feat(nv);
1934                                 if (nvlist_exists(nvinfo,
1935                                     ZPOOL_CONFIG_CAN_RDONLY)) {
1936                                         (void) printf(dgettext(TEXT_DOMAIN,
1937                                             "All unsupported features are only "
1938                                             "required for writing to the pool."
1939                                             "\nThe pool can be imported using "
1940                                             "'-o readonly=on'.\n"));
1941                                 }
1942                         }
1943                         /*
1944                          * Unsupported version.
1945                          */
1946                         (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1947                         break;
1948
1949                 case EREMOTEIO:
1950                         if (nv != NULL && nvlist_lookup_nvlist(nv,
1951                             ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
1952                                 char *hostname = "<unknown>";
1953                                 uint64_t hostid = 0;
1954                                 mmp_state_t mmp_state;
1955
1956                                 mmp_state = fnvlist_lookup_uint64(nvinfo,
1957                                     ZPOOL_CONFIG_MMP_STATE);
1958
1959                                 if (nvlist_exists(nvinfo,
1960                                     ZPOOL_CONFIG_MMP_HOSTNAME))
1961                                         hostname = fnvlist_lookup_string(nvinfo,
1962                                             ZPOOL_CONFIG_MMP_HOSTNAME);
1963
1964                                 if (nvlist_exists(nvinfo,
1965                                     ZPOOL_CONFIG_MMP_HOSTID))
1966                                         hostid = fnvlist_lookup_uint64(nvinfo,
1967                                             ZPOOL_CONFIG_MMP_HOSTID);
1968
1969                                 if (mmp_state == MMP_STATE_ACTIVE) {
1970                                         (void) snprintf(aux, sizeof (aux),
1971                                             dgettext(TEXT_DOMAIN, "pool is imp"
1972                                             "orted on host '%s' (hostid=%lx).\n"
1973                                             "Export the pool on the other "
1974                                             "system, then run 'zpool import'."),
1975                                             hostname, (unsigned long) hostid);
1976                                 } else if (mmp_state == MMP_STATE_NO_HOSTID) {
1977                                         (void) snprintf(aux, sizeof (aux),
1978                                             dgettext(TEXT_DOMAIN, "pool has "
1979                                             "the multihost property on and "
1980                                             "the\nsystem's hostid is not set. "
1981                                             "Set a unique system hostid with "
1982                                             "the zgenhostid(8) command.\n"));
1983                                 }
1984
1985                                 (void) zfs_error_aux(hdl, aux);
1986                         }
1987                         (void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
1988                         break;
1989
1990                 case EINVAL:
1991                         (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1992                         break;
1993
1994                 case EROFS:
1995                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1996                             "one or more devices is read only"));
1997                         (void) zfs_error(hdl, EZFS_BADDEV, desc);
1998                         break;
1999
2000                 case ENXIO:
2001                         if (nv && nvlist_lookup_nvlist(nv,
2002                             ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
2003                             nvlist_lookup_nvlist(nvinfo,
2004                             ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
2005                                 (void) printf(dgettext(TEXT_DOMAIN,
2006                                     "The devices below are missing or "
2007                                     "corrupted, use '-m' to import the pool "
2008                                     "anyway:\n"));
2009                                 print_vdev_tree(hdl, NULL, missing, 2);
2010                                 (void) printf("\n");
2011                         }
2012                         (void) zpool_standard_error(hdl, error, desc);
2013                         break;
2014
2015                 case EEXIST:
2016                         (void) zpool_standard_error(hdl, error, desc);
2017                         break;
2018
2019                 case EBUSY:
2020                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2021                             "one or more devices are already in use\n"));
2022                         (void) zfs_error(hdl, EZFS_BADDEV, desc);
2023                         break;
2024                 case ENAMETOOLONG:
2025                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2026                             "new name of at least one dataset is longer than "
2027                             "the maximum allowable length"));
2028                         (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
2029                         break;
2030                 default:
2031                         (void) zpool_standard_error(hdl, error, desc);
2032                         zpool_explain_recover(hdl,
2033                             newname ? origname : thename, -error, nv);
2034                         break;
2035                 }
2036
2037                 nvlist_free(nv);
2038                 ret = -1;
2039         } else {
2040                 zpool_handle_t *zhp;
2041
2042                 /*
2043                  * This should never fail, but play it safe anyway.
2044                  */
2045                 if (zpool_open_silent(hdl, thename, &zhp) != 0)
2046                         ret = -1;
2047                 else if (zhp != NULL)
2048                         zpool_close(zhp);
2049                 if (policy.zlp_rewind &
2050                     (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
2051                         zpool_rewind_exclaim(hdl, newname ? origname : thename,
2052                             ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
2053                 }
2054                 nvlist_free(nv);
2055                 return (0);
2056         }
2057
2058         return (ret);
2059 }
2060
2061 /*
2062  * Translate vdev names to guids.  If a vdev_path is determined to be
2063  * unsuitable then a vd_errlist is allocated and the vdev path and errno
2064  * are added to it.
2065  */
2066 static int
2067 zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
2068     nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
2069 {
2070         nvlist_t *errlist = NULL;
2071         int error = 0;
2072
2073         for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
2074             elem = nvlist_next_nvpair(vds, elem)) {
2075                 boolean_t spare, cache;
2076
2077                 char *vd_path = nvpair_name(elem);
2078                 nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
2079                     NULL);
2080
2081                 if ((tgt == NULL) || cache || spare) {
2082                         if (errlist == NULL) {
2083                                 errlist = fnvlist_alloc();
2084                                 error = EINVAL;
2085                         }
2086
2087                         uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
2088                             (spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
2089                         fnvlist_add_int64(errlist, vd_path, err);
2090                         continue;
2091                 }
2092
2093                 uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
2094                 fnvlist_add_uint64(vdev_guids, vd_path, guid);
2095
2096                 char msg[MAXNAMELEN];
2097                 (void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
2098                 fnvlist_add_string(guids_to_paths, msg, vd_path);
2099         }
2100
2101         if (error != 0) {
2102                 verify(errlist != NULL);
2103                 if (vd_errlist != NULL)
2104                         *vd_errlist = errlist;
2105                 else
2106                         fnvlist_free(errlist);
2107         }
2108
2109         return (error);
2110 }
2111
2112 static int
2113 xlate_init_err(int err)
2114 {
2115         switch (err) {
2116         case ENODEV:
2117                 return (EZFS_NODEVICE);
2118         case EINVAL:
2119         case EROFS:
2120                 return (EZFS_BADDEV);
2121         case EBUSY:
2122                 return (EZFS_INITIALIZING);
2123         case ESRCH:
2124                 return (EZFS_NO_INITIALIZE);
2125         }
2126         return (err);
2127 }
2128
2129 /*
2130  * Begin, suspend, or cancel the initialization (initializing of all free
2131  * blocks) for the given vdevs in the given pool.
2132  */
2133 static int
2134 zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2135     nvlist_t *vds, boolean_t wait)
2136 {
2137         int err;
2138
2139         nvlist_t *vdev_guids = fnvlist_alloc();
2140         nvlist_t *guids_to_paths = fnvlist_alloc();
2141         nvlist_t *vd_errlist = NULL;
2142         nvlist_t *errlist;
2143         nvpair_t *elem;
2144
2145         err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2146             guids_to_paths, &vd_errlist);
2147
2148         if (err != 0) {
2149                 verify(vd_errlist != NULL);
2150                 goto list_errors;
2151         }
2152
2153         err = lzc_initialize(zhp->zpool_name, cmd_type,
2154             vdev_guids, &errlist);
2155
2156         if (err != 0) {
2157                 if (errlist != NULL) {
2158                         vd_errlist = fnvlist_lookup_nvlist(errlist,
2159                             ZPOOL_INITIALIZE_VDEVS);
2160                         goto list_errors;
2161                 }
2162                 (void) zpool_standard_error(zhp->zpool_hdl, err,
2163                     dgettext(TEXT_DOMAIN, "operation failed"));
2164                 goto out;
2165         }
2166
2167         if (wait) {
2168                 for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2169                     elem = nvlist_next_nvpair(vdev_guids, elem)) {
2170
2171                         uint64_t guid = fnvpair_value_uint64(elem);
2172
2173                         err = lzc_wait_tag(zhp->zpool_name,
2174                             ZPOOL_WAIT_INITIALIZE, guid, NULL);
2175                         if (err != 0) {
2176                                 (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2177                                     err, dgettext(TEXT_DOMAIN, "error "
2178                                     "waiting for '%s' to initialize"),
2179                                     nvpair_name(elem));
2180
2181                                 goto out;
2182                         }
2183                 }
2184         }
2185         goto out;
2186
2187 list_errors:
2188         for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
2189             elem = nvlist_next_nvpair(vd_errlist, elem)) {
2190                 int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
2191                 char *path;
2192
2193                 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2194                     &path) != 0)
2195                         path = nvpair_name(elem);
2196
2197                 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2198                     "cannot initialize '%s'", path);
2199         }
2200
2201 out:
2202         fnvlist_free(vdev_guids);
2203         fnvlist_free(guids_to_paths);
2204
2205         if (vd_errlist != NULL)
2206                 fnvlist_free(vd_errlist);
2207
2208         return (err == 0 ? 0 : -1);
2209 }
2210
2211 int
2212 zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2213     nvlist_t *vds)
2214 {
2215         return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
2216 }
2217
2218 int
2219 zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
2220     nvlist_t *vds)
2221 {
2222         return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
2223 }
2224
2225 static int
2226 xlate_trim_err(int err)
2227 {
2228         switch (err) {
2229         case ENODEV:
2230                 return (EZFS_NODEVICE);
2231         case EINVAL:
2232         case EROFS:
2233                 return (EZFS_BADDEV);
2234         case EBUSY:
2235                 return (EZFS_TRIMMING);
2236         case ESRCH:
2237                 return (EZFS_NO_TRIM);
2238         case EOPNOTSUPP:
2239                 return (EZFS_TRIM_NOTSUP);
2240         }
2241         return (err);
2242 }
2243
2244 static int
2245 zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
2246 {
2247         int err;
2248         nvpair_t *elem;
2249
2250         for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
2251             elem = nvlist_next_nvpair(vdev_guids, elem)) {
2252
2253                 uint64_t guid = fnvpair_value_uint64(elem);
2254
2255                 err = lzc_wait_tag(zhp->zpool_name,
2256                     ZPOOL_WAIT_TRIM, guid, NULL);
2257                 if (err != 0) {
2258                         (void) zpool_standard_error_fmt(zhp->zpool_hdl,
2259                             err, dgettext(TEXT_DOMAIN, "error "
2260                             "waiting to trim '%s'"), nvpair_name(elem));
2261
2262                         return (err);
2263                 }
2264         }
2265         return (0);
2266 }
2267
2268 /*
2269  * Check errlist and report any errors, omitting ones which should be
2270  * suppressed. Returns B_TRUE if any errors were reported.
2271  */
2272 static boolean_t
2273 check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
2274     nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
2275 {
2276         nvpair_t *elem;
2277         boolean_t reported_errs = B_FALSE;
2278         int num_vds = 0;
2279         int num_suppressed_errs = 0;
2280
2281         for (elem = nvlist_next_nvpair(vds, NULL);
2282             elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
2283                 num_vds++;
2284         }
2285
2286         for (elem = nvlist_next_nvpair(errlist, NULL);
2287             elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
2288                 int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
2289                 char *path;
2290
2291                 /*
2292                  * If only the pool was specified, and it was not a secure
2293                  * trim then suppress warnings for individual vdevs which
2294                  * do not support trimming.
2295                  */
2296                 if (vd_error == EZFS_TRIM_NOTSUP &&
2297                     trim_flags->fullpool &&
2298                     !trim_flags->secure) {
2299                         num_suppressed_errs++;
2300                         continue;
2301                 }
2302
2303                 reported_errs = B_TRUE;
2304                 if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
2305                     &path) != 0)
2306                         path = nvpair_name(elem);
2307
2308                 (void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
2309                     "cannot trim '%s'", path);
2310         }
2311
2312         if (num_suppressed_errs == num_vds) {
2313                 (void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
2314                     "no devices in pool support trim operations"));
2315                 (void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
2316                     dgettext(TEXT_DOMAIN, "cannot trim")));
2317                 reported_errs = B_TRUE;
2318         }
2319
2320         return (reported_errs);
2321 }
2322
2323 /*
2324  * Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
2325  * the given vdevs in the given pool.
2326  */
2327 int
2328 zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
2329     trimflags_t *trim_flags)
2330 {
2331         int err;
2332         int retval = 0;
2333
2334         nvlist_t *vdev_guids = fnvlist_alloc();
2335         nvlist_t *guids_to_paths = fnvlist_alloc();
2336         nvlist_t *errlist = NULL;
2337
2338         err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
2339             guids_to_paths, &errlist);
2340         if (err != 0) {
2341                 check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
2342                 retval = -1;
2343                 goto out;
2344         }
2345
2346         err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
2347             trim_flags->secure, vdev_guids, &errlist);
2348         if (err != 0) {
2349                 nvlist_t *vd_errlist;
2350                 if (errlist != NULL && nvlist_lookup_nvlist(errlist,
2351                     ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
2352                         if (check_trim_errs(zhp, trim_flags, guids_to_paths,
2353                             vds, vd_errlist)) {
2354                                 retval = -1;
2355                                 goto out;
2356                         }
2357                 } else {
2358                         char msg[1024];
2359
2360                         (void) snprintf(msg, sizeof (msg),
2361                             dgettext(TEXT_DOMAIN, "operation failed"));
2362                         zpool_standard_error(zhp->zpool_hdl, err, msg);
2363                         retval = -1;
2364                         goto out;
2365                 }
2366         }
2367
2368
2369         if (trim_flags->wait)
2370                 retval = zpool_trim_wait(zhp, vdev_guids);
2371
2372 out:
2373         if (errlist != NULL)
2374                 fnvlist_free(errlist);
2375         fnvlist_free(vdev_guids);
2376         fnvlist_free(guids_to_paths);
2377         return (retval);
2378 }
2379
2380 /*
2381  * Scan the pool.
2382  */
2383 int
2384 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
2385 {
2386         zfs_cmd_t zc = {"\0"};
2387         char msg[1024];
2388         int err;
2389         libzfs_handle_t *hdl = zhp->zpool_hdl;
2390
2391         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2392         zc.zc_cookie = func;
2393         zc.zc_flags = cmd;
2394
2395         if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
2396                 return (0);
2397
2398         err = errno;
2399
2400         /* ECANCELED on a scrub means we resumed a paused scrub */
2401         if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
2402             cmd == POOL_SCRUB_NORMAL)
2403                 return (0);
2404
2405         if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
2406                 return (0);
2407
2408         if (func == POOL_SCAN_SCRUB) {
2409                 if (cmd == POOL_SCRUB_PAUSE) {
2410                         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2411                             "cannot pause scrubbing %s"), zc.zc_name);
2412                 } else {
2413                         assert(cmd == POOL_SCRUB_NORMAL);
2414                         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2415                             "cannot scrub %s"), zc.zc_name);
2416                 }
2417         } else if (func == POOL_SCAN_RESILVER) {
2418                 assert(cmd == POOL_SCRUB_NORMAL);
2419                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2420                     "cannot restart resilver on %s"), zc.zc_name);
2421         } else if (func == POOL_SCAN_NONE) {
2422                 (void) snprintf(msg, sizeof (msg),
2423                     dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
2424                     zc.zc_name);
2425         } else {
2426                 assert(!"unexpected result");
2427         }
2428
2429         if (err == EBUSY) {
2430                 nvlist_t *nvroot;
2431                 pool_scan_stat_t *ps = NULL;
2432                 uint_t psc;
2433
2434                 verify(nvlist_lookup_nvlist(zhp->zpool_config,
2435                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2436                 (void) nvlist_lookup_uint64_array(nvroot,
2437                     ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
2438                 if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
2439                     ps->pss_state == DSS_SCANNING) {
2440                         if (cmd == POOL_SCRUB_PAUSE)
2441                                 return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
2442                         else
2443                                 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
2444                 } else {
2445                         return (zfs_error(hdl, EZFS_RESILVERING, msg));
2446                 }
2447         } else if (err == ENOENT) {
2448                 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
2449         } else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
2450                 return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg));
2451         } else {
2452                 return (zpool_standard_error(hdl, err, msg));
2453         }
2454 }
2455
2456 /*
2457  * Find a vdev that matches the search criteria specified. We use the
2458  * the nvpair name to determine how we should look for the device.
2459  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
2460  * spare; but FALSE if its an INUSE spare.
2461  */
2462 static nvlist_t *
2463 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
2464     boolean_t *l2cache, boolean_t *log)
2465 {
2466         uint_t c, children;
2467         nvlist_t **child;
2468         nvlist_t *ret;
2469         uint64_t is_log;
2470         char *srchkey;
2471         nvpair_t *pair = nvlist_next_nvpair(search, NULL);
2472
2473         /* Nothing to look for */
2474         if (search == NULL || pair == NULL)
2475                 return (NULL);
2476
2477         /* Obtain the key we will use to search */
2478         srchkey = nvpair_name(pair);
2479
2480         switch (nvpair_type(pair)) {
2481         case DATA_TYPE_UINT64:
2482                 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
2483                         uint64_t srchval, theguid;
2484
2485                         verify(nvpair_value_uint64(pair, &srchval) == 0);
2486                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2487                             &theguid) == 0);
2488                         if (theguid == srchval)
2489                                 return (nv);
2490                 }
2491                 break;
2492
2493         case DATA_TYPE_STRING: {
2494                 char *srchval, *val;
2495
2496                 verify(nvpair_value_string(pair, &srchval) == 0);
2497                 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
2498                         break;
2499
2500                 /*
2501                  * Search for the requested value. Special cases:
2502                  *
2503                  * - ZPOOL_CONFIG_PATH for whole disk entries.  These end in
2504                  *   "-part1", or "p1".  The suffix is hidden from the user,
2505                  *   but included in the string, so this matches around it.
2506                  * - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
2507                  *   is used to check all possible expanded paths.
2508                  * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2509                  *
2510                  * Otherwise, all other searches are simple string compares.
2511                  */
2512                 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
2513                         uint64_t wholedisk = 0;
2514
2515                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2516                             &wholedisk);
2517                         if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
2518                                 return (nv);
2519
2520                 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2521                         char *type, *idx, *end, *p;
2522                         uint64_t id, vdev_id;
2523
2524                         /*
2525                          * Determine our vdev type, keeping in mind
2526                          * that the srchval is composed of a type and
2527                          * vdev id pair (i.e. mirror-4).
2528                          */
2529                         if ((type = strdup(srchval)) == NULL)
2530                                 return (NULL);
2531
2532                         if ((p = strrchr(type, '-')) == NULL) {
2533                                 free(type);
2534                                 break;
2535                         }
2536                         idx = p + 1;
2537                         *p = '\0';
2538
2539                         /*
2540                          * If the types don't match then keep looking.
2541                          */
2542                         if (strncmp(val, type, strlen(val)) != 0) {
2543                                 free(type);
2544                                 break;
2545                         }
2546
2547                         verify(zpool_vdev_is_interior(type));
2548                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2549                             &id) == 0);
2550
2551                         errno = 0;
2552                         vdev_id = strtoull(idx, &end, 10);
2553
2554                         free(type);
2555                         if (errno != 0)
2556                                 return (NULL);
2557
2558                         /*
2559                          * Now verify that we have the correct vdev id.
2560                          */
2561                         if (vdev_id == id)
2562                                 return (nv);
2563                 }
2564
2565                 /*
2566                  * Common case
2567                  */
2568                 if (strcmp(srchval, val) == 0)
2569                         return (nv);
2570                 break;
2571         }
2572
2573         default:
2574                 break;
2575         }
2576
2577         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2578             &child, &children) != 0)
2579                 return (NULL);
2580
2581         for (c = 0; c < children; c++) {
2582                 if ((ret = vdev_to_nvlist_iter(child[c], search,
2583                     avail_spare, l2cache, NULL)) != NULL) {
2584                         /*
2585                          * The 'is_log' value is only set for the toplevel
2586                          * vdev, not the leaf vdevs.  So we always lookup the
2587                          * log device from the root of the vdev tree (where
2588                          * 'log' is non-NULL).
2589                          */
2590                         if (log != NULL &&
2591                             nvlist_lookup_uint64(child[c],
2592                             ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2593                             is_log) {
2594                                 *log = B_TRUE;
2595                         }
2596                         return (ret);
2597                 }
2598         }
2599
2600         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2601             &child, &children) == 0) {
2602                 for (c = 0; c < children; c++) {
2603                         if ((ret = vdev_to_nvlist_iter(child[c], search,
2604                             avail_spare, l2cache, NULL)) != NULL) {
2605                                 *avail_spare = B_TRUE;
2606                                 return (ret);
2607                         }
2608                 }
2609         }
2610
2611         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2612             &child, &children) == 0) {
2613                 for (c = 0; c < children; c++) {
2614                         if ((ret = vdev_to_nvlist_iter(child[c], search,
2615                             avail_spare, l2cache, NULL)) != NULL) {
2616                                 *l2cache = B_TRUE;
2617                                 return (ret);
2618                         }
2619                 }
2620         }
2621
2622         return (NULL);
2623 }
2624
2625 /*
2626  * Given a physical path or guid, find the associated vdev.
2627  */
2628 nvlist_t *
2629 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2630     boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2631 {
2632         nvlist_t *search, *nvroot, *ret;
2633         uint64_t guid;
2634         char *end;
2635
2636         verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2637
2638         guid = strtoull(ppath, &end, 0);
2639         if (guid != 0 && *end == '\0') {
2640                 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2641         } else {
2642                 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH,
2643                     ppath) == 0);
2644         }
2645
2646         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2647             &nvroot) == 0);
2648
2649         *avail_spare = B_FALSE;
2650         *l2cache = B_FALSE;
2651         if (log != NULL)
2652                 *log = B_FALSE;
2653         ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2654         nvlist_free(search);
2655
2656         return (ret);
2657 }
2658
2659 /*
2660  * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2661  */
2662 static boolean_t
2663 zpool_vdev_is_interior(const char *name)
2664 {
2665         if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2666             strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
2667             strncmp(name,
2668             VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
2669             strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2670                 return (B_TRUE);
2671         return (B_FALSE);
2672 }
2673
2674 nvlist_t *
2675 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2676     boolean_t *l2cache, boolean_t *log)
2677 {
2678         char *end;
2679         nvlist_t *nvroot, *search, *ret;
2680         uint64_t guid;
2681
2682         verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2683
2684         guid = strtoull(path, &end, 0);
2685         if (guid != 0 && *end == '\0') {
2686                 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2687         } else if (zpool_vdev_is_interior(path)) {
2688                 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2689         } else {
2690                 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2691         }
2692
2693         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2694             &nvroot) == 0);
2695
2696         *avail_spare = B_FALSE;
2697         *l2cache = B_FALSE;
2698         if (log != NULL)
2699                 *log = B_FALSE;
2700         ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2701         nvlist_free(search);
2702
2703         return (ret);
2704 }
2705
2706 static int
2707 vdev_is_online(nvlist_t *nv)
2708 {
2709         uint64_t ival;
2710
2711         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2712             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2713             nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2714                 return (0);
2715
2716         return (1);
2717 }
2718
2719 /*
2720  * Helper function for zpool_get_physpaths().
2721  */
2722 static int
2723 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2724     size_t *bytes_written)
2725 {
2726         size_t bytes_left, pos, rsz;
2727         char *tmppath;
2728         const char *format;
2729
2730         if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2731             &tmppath) != 0)
2732                 return (EZFS_NODEVICE);
2733
2734         pos = *bytes_written;
2735         bytes_left = physpath_size - pos;
2736         format = (pos == 0) ? "%s" : " %s";
2737
2738         rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2739         *bytes_written += rsz;
2740
2741         if (rsz >= bytes_left) {
2742                 /* if physpath was not copied properly, clear it */
2743                 if (bytes_left != 0) {
2744                         physpath[pos] = 0;
2745                 }
2746                 return (EZFS_NOSPC);
2747         }
2748         return (0);
2749 }
2750
2751 static int
2752 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2753     size_t *rsz, boolean_t is_spare)
2754 {
2755         char *type;
2756         int ret;
2757
2758         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2759                 return (EZFS_INVALCONFIG);
2760
2761         if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2762                 /*
2763                  * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2764                  * For a spare vdev, we only want to boot from the active
2765                  * spare device.
2766                  */
2767                 if (is_spare) {
2768                         uint64_t spare = 0;
2769                         (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2770                             &spare);
2771                         if (!spare)
2772                                 return (EZFS_INVALCONFIG);
2773                 }
2774
2775                 if (vdev_is_online(nv)) {
2776                         if ((ret = vdev_get_one_physpath(nv, physpath,
2777                             phypath_size, rsz)) != 0)
2778                                 return (ret);
2779                 }
2780         } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2781             strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2782             strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2783             (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2784                 nvlist_t **child;
2785                 uint_t count;
2786                 int i, ret;
2787
2788                 if (nvlist_lookup_nvlist_array(nv,
2789                     ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2790                         return (EZFS_INVALCONFIG);
2791
2792                 for (i = 0; i < count; i++) {
2793                         ret = vdev_get_physpaths(child[i], physpath,
2794                             phypath_size, rsz, is_spare);
2795                         if (ret == EZFS_NOSPC)
2796                                 return (ret);
2797                 }
2798         }
2799
2800         return (EZFS_POOL_INVALARG);
2801 }
2802
2803 /*
2804  * Get phys_path for a root pool config.
2805  * Return 0 on success; non-zero on failure.
2806  */
2807 static int
2808 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2809 {
2810         size_t rsz;
2811         nvlist_t *vdev_root;
2812         nvlist_t **child;
2813         uint_t count;
2814         char *type;
2815
2816         rsz = 0;
2817
2818         if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2819             &vdev_root) != 0)
2820                 return (EZFS_INVALCONFIG);
2821
2822         if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2823             nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2824             &child, &count) != 0)
2825                 return (EZFS_INVALCONFIG);
2826
2827         /*
2828          * root pool can only have a single top-level vdev.
2829          */
2830         if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2831                 return (EZFS_POOL_INVALARG);
2832
2833         (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2834             B_FALSE);
2835
2836         /* No online devices */
2837         if (rsz == 0)
2838                 return (EZFS_NODEVICE);
2839
2840         return (0);
2841 }
2842
2843 /*
2844  * Get phys_path for a root pool
2845  * Return 0 on success; non-zero on failure.
2846  */
2847 int
2848 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2849 {
2850         return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2851             phypath_size));
2852 }
2853
2854 /*
2855  * Convert a vdev path to a GUID.  Returns GUID or 0 on error.
2856  *
2857  * If is_spare, is_l2cache, or is_log is non-NULL, then store within it
2858  * if the VDEV is a spare, l2cache, or log device.  If they're NULL then
2859  * ignore them.
2860  */
2861 static uint64_t
2862 zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
2863     boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
2864 {
2865         uint64_t guid;
2866         boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
2867         nvlist_t *tgt;
2868
2869         if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
2870             &log)) == NULL)
2871                 return (0);
2872
2873         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
2874         if (is_spare != NULL)
2875                 *is_spare = spare;
2876         if (is_l2cache != NULL)
2877                 *is_l2cache = l2cache;
2878         if (is_log != NULL)
2879                 *is_log = log;
2880
2881         return (guid);
2882 }
2883
2884 /* Convert a vdev path to a GUID.  Returns GUID or 0 on error. */
2885 uint64_t
2886 zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
2887 {
2888         return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
2889 }
2890
2891 /*
2892  * Bring the specified vdev online.   The 'flags' parameter is a set of the
2893  * ZFS_ONLINE_* flags.
2894  */
2895 int
2896 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2897     vdev_state_t *newstate)
2898 {
2899         zfs_cmd_t zc = {"\0"};
2900         char msg[1024];
2901         char *pathname;
2902         nvlist_t *tgt;
2903         boolean_t avail_spare, l2cache, islog;
2904         libzfs_handle_t *hdl = zhp->zpool_hdl;
2905         int error;
2906
2907         if (flags & ZFS_ONLINE_EXPAND) {
2908                 (void) snprintf(msg, sizeof (msg),
2909                     dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2910         } else {
2911                 (void) snprintf(msg, sizeof (msg),
2912                     dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2913         }
2914
2915         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2916         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2917             &islog)) == NULL)
2918                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2919
2920         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2921
2922         if (avail_spare)
2923                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2924
2925         if ((flags & ZFS_ONLINE_EXPAND ||
2926             zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
2927             nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
2928                 uint64_t wholedisk = 0;
2929
2930                 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2931                     &wholedisk);
2932
2933                 /*
2934                  * XXX - L2ARC 1.0 devices can't support expansion.
2935                  */
2936                 if (l2cache) {
2937                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2938                             "cannot expand cache devices"));
2939                         return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2940                 }
2941
2942                 if (wholedisk) {
2943                         const char *fullpath = path;
2944                         char buf[MAXPATHLEN];
2945
2946                         if (path[0] != '/') {
2947                                 error = zfs_resolve_shortname(path, buf,
2948                                     sizeof (buf));
2949                                 if (error != 0)
2950                                         return (zfs_error(hdl, EZFS_NODEVICE,
2951                                             msg));
2952
2953                                 fullpath = buf;
2954                         }
2955
2956                         error = zpool_relabel_disk(hdl, fullpath, msg);
2957                         if (error != 0)
2958                                 return (error);
2959                 }
2960         }
2961
2962         zc.zc_cookie = VDEV_STATE_ONLINE;
2963         zc.zc_obj = flags;
2964
2965         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2966                 if (errno == EINVAL) {
2967                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2968                             "from this pool into a new one.  Use '%s' "
2969                             "instead"), "zpool detach");
2970                         return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2971                 }
2972                 return (zpool_standard_error(hdl, errno, msg));
2973         }
2974
2975         *newstate = zc.zc_cookie;
2976         return (0);
2977 }
2978
2979 /*
2980  * Take the specified vdev offline
2981  */
2982 int
2983 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2984 {
2985         zfs_cmd_t zc = {"\0"};
2986         char msg[1024];
2987         nvlist_t *tgt;
2988         boolean_t avail_spare, l2cache;
2989         libzfs_handle_t *hdl = zhp->zpool_hdl;
2990
2991         (void) snprintf(msg, sizeof (msg),
2992             dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2993
2994         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2995         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2996             NULL)) == NULL)
2997                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2998
2999         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3000
3001         if (avail_spare)
3002                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3003
3004         zc.zc_cookie = VDEV_STATE_OFFLINE;
3005         zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
3006
3007         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3008                 return (0);
3009
3010         switch (errno) {
3011         case EBUSY:
3012
3013                 /*
3014                  * There are no other replicas of this device.
3015                  */
3016                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3017
3018         case EEXIST:
3019                 /*
3020                  * The log device has unplayed logs
3021                  */
3022                 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
3023
3024         default:
3025                 return (zpool_standard_error(hdl, errno, msg));
3026         }
3027 }
3028
3029 /*
3030  * Mark the given vdev faulted.
3031  */
3032 int
3033 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3034 {
3035         zfs_cmd_t zc = {"\0"};
3036         char msg[1024];
3037         libzfs_handle_t *hdl = zhp->zpool_hdl;
3038
3039         (void) snprintf(msg, sizeof (msg),
3040             dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
3041
3042         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3043         zc.zc_guid = guid;
3044         zc.zc_cookie = VDEV_STATE_FAULTED;
3045         zc.zc_obj = aux;
3046
3047         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3048                 return (0);
3049
3050         switch (errno) {
3051         case EBUSY:
3052
3053                 /*
3054                  * There are no other replicas of this device.
3055                  */
3056                 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
3057
3058         default:
3059                 return (zpool_standard_error(hdl, errno, msg));
3060         }
3061
3062 }
3063
3064 /*
3065  * Mark the given vdev degraded.
3066  */
3067 int
3068 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
3069 {
3070         zfs_cmd_t zc = {"\0"};
3071         char msg[1024];
3072         libzfs_handle_t *hdl = zhp->zpool_hdl;
3073
3074         (void) snprintf(msg, sizeof (msg),
3075             dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
3076
3077         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3078         zc.zc_guid = guid;
3079         zc.zc_cookie = VDEV_STATE_DEGRADED;
3080         zc.zc_obj = aux;
3081
3082         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
3083                 return (0);
3084
3085         return (zpool_standard_error(hdl, errno, msg));
3086 }
3087
3088 /*
3089  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
3090  * a hot spare.
3091  */
3092 static boolean_t
3093 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
3094 {
3095         nvlist_t **child;
3096         uint_t c, children;
3097         char *type;
3098
3099         if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
3100             &children) == 0) {
3101                 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
3102                     &type) == 0);
3103
3104                 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
3105                     children == 2 && child[which] == tgt)
3106                         return (B_TRUE);
3107
3108                 for (c = 0; c < children; c++)
3109                         if (is_replacing_spare(child[c], tgt, which))
3110                                 return (B_TRUE);
3111         }
3112
3113         return (B_FALSE);
3114 }
3115
3116 /*
3117  * Attach new_disk (fully described by nvroot) to old_disk.
3118  * If 'replacing' is specified, the new disk will replace the old one.
3119  */
3120 int
3121 zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
3122     const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
3123 {
3124         zfs_cmd_t zc = {"\0"};
3125         char msg[1024];
3126         int ret;
3127         nvlist_t *tgt;
3128         boolean_t avail_spare, l2cache, islog;
3129         uint64_t val;
3130         char *newname;
3131         nvlist_t **child;
3132         uint_t children;
3133         nvlist_t *config_root;
3134         libzfs_handle_t *hdl = zhp->zpool_hdl;
3135
3136         if (replacing)
3137                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3138                     "cannot replace %s with %s"), old_disk, new_disk);
3139         else
3140                 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
3141                     "cannot attach %s to %s"), new_disk, old_disk);
3142
3143         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3144         if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
3145             &islog)) == NULL)
3146                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3147
3148         if (avail_spare)
3149                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3150
3151         if (l2cache)
3152                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3153
3154         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3155         zc.zc_cookie = replacing;
3156         zc.zc_simple = rebuild;
3157
3158         if (rebuild &&
3159             zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
3160                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3161                     "the loaded zfs module doesn't support device rebuilds"));
3162                 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
3163         }
3164
3165         if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
3166             &child, &children) != 0 || children != 1) {
3167                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3168                     "new device must be a single disk"));
3169                 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
3170         }
3171
3172         verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
3173             ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
3174
3175         if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
3176                 return (-1);
3177
3178         /*
3179          * If the target is a hot spare that has been swapped in, we can only
3180          * replace it with another hot spare.
3181          */
3182         if (replacing &&
3183             nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
3184             (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
3185             NULL) == NULL || !avail_spare) &&
3186             is_replacing_spare(config_root, tgt, 1)) {
3187                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3188                     "can only be replaced by another hot spare"));
3189                 free(newname);
3190                 return (zfs_error(hdl, EZFS_BADTARGET, msg));
3191         }
3192
3193         free(newname);
3194
3195         if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
3196                 return (-1);
3197
3198         ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
3199
3200         zcmd_free_nvlists(&zc);
3201
3202         if (ret == 0)
3203                 return (0);
3204
3205         switch (errno) {
3206         case ENOTSUP:
3207                 /*
3208                  * Can't attach to or replace this type of vdev.
3209                  */
3210                 if (replacing) {
3211                         uint64_t version = zpool_get_prop_int(zhp,
3212                             ZPOOL_PROP_VERSION, NULL);
3213
3214                         if (islog) {
3215                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3216                                     "cannot replace a log with a spare"));
3217                         } else if (rebuild) {
3218                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3219                                     "only mirror vdevs support sequential "
3220                                     "reconstruction"));
3221                         } else if (version >= SPA_VERSION_MULTI_REPLACE) {
3222                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3223                                     "already in replacing/spare config; wait "
3224                                     "for completion or use 'zpool detach'"));
3225                         } else {
3226                                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3227                                     "cannot replace a replacing device"));
3228                         }
3229                 } else {
3230                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3231                             "can only attach to mirrors and top-level "
3232                             "disks"));
3233                 }
3234                 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3235                 break;
3236
3237         case EINVAL:
3238                 /*
3239                  * The new device must be a single disk.
3240                  */
3241                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3242                     "new device must be a single disk"));
3243                 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3244                 break;
3245
3246         case EBUSY:
3247                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
3248                     "or device removal is in progress"),
3249                     new_disk);
3250                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3251                 break;
3252
3253         case EOVERFLOW:
3254                 /*
3255                  * The new device is too small.
3256                  */
3257                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3258                     "device is too small"));
3259                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3260                 break;
3261
3262         case EDOM:
3263                 /*
3264                  * The new device has a different optimal sector size.
3265                  */
3266                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3267                     "new device has a different optimal sector size; use the "
3268                     "option '-o ashift=N' to override the optimal size"));
3269                 (void) zfs_error(hdl, EZFS_BADDEV, msg);
3270                 break;
3271
3272         case ENAMETOOLONG:
3273                 /*
3274                  * The resulting top-level vdev spec won't fit in the label.
3275                  */
3276                 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
3277                 break;
3278
3279         default:
3280                 (void) zpool_standard_error(hdl, errno, msg);
3281         }
3282
3283         return (-1);
3284 }
3285
3286 /*
3287  * Detach the specified device.
3288  */
3289 int
3290 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
3291 {
3292         zfs_cmd_t zc = {"\0"};
3293         char msg[1024];
3294         nvlist_t *tgt;
3295         boolean_t avail_spare, l2cache;
3296         libzfs_handle_t *hdl = zhp->zpool_hdl;
3297
3298         (void) snprintf(msg, sizeof (msg),
3299             dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
3300
3301         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3302         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3303             NULL)) == NULL)
3304                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3305
3306         if (avail_spare)
3307                 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3308
3309         if (l2cache)
3310                 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
3311
3312         verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3313
3314         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
3315                 return (0);
3316
3317         switch (errno) {
3318
3319         case ENOTSUP:
3320                 /*
3321                  * Can't detach from this type of vdev.
3322                  */
3323                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
3324                     "applicable to mirror and replacing vdevs"));
3325                 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
3326                 break;
3327
3328         case EBUSY:
3329                 /*
3330                  * There are no other replicas of this device.
3331                  */
3332                 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
3333                 break;
3334
3335         default:
3336                 (void) zpool_standard_error(hdl, errno, msg);
3337         }
3338
3339         return (-1);
3340 }
3341
3342 /*
3343  * Find a mirror vdev in the source nvlist.
3344  *
3345  * The mchild array contains a list of disks in one of the top-level mirrors
3346  * of the source pool.  The schild array contains a list of disks that the
3347  * user specified on the command line.  We loop over the mchild array to
3348  * see if any entry in the schild array matches.
3349  *
3350  * If a disk in the mchild array is found in the schild array, we return
3351  * the index of that entry.  Otherwise we return -1.
3352  */
3353 static int
3354 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
3355     nvlist_t **schild, uint_t schildren)
3356 {
3357         uint_t mc;
3358
3359         for (mc = 0; mc < mchildren; mc++) {
3360                 uint_t sc;
3361                 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3362                     mchild[mc], 0);
3363
3364                 for (sc = 0; sc < schildren; sc++) {
3365                         char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
3366                             schild[sc], 0);
3367                         boolean_t result = (strcmp(mpath, spath) == 0);
3368
3369                         free(spath);
3370                         if (result) {
3371                                 free(mpath);
3372                                 return (mc);
3373                         }
3374                 }
3375
3376                 free(mpath);
3377         }
3378
3379         return (-1);
3380 }
3381
3382 /*
3383  * Split a mirror pool.  If newroot points to null, then a new nvlist
3384  * is generated and it is the responsibility of the caller to free it.
3385  */
3386 int
3387 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
3388     nvlist_t *props, splitflags_t flags)
3389 {
3390         zfs_cmd_t zc = {"\0"};
3391         char msg[1024];
3392         nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
3393         nvlist_t **varray = NULL, *zc_props = NULL;
3394         uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
3395         libzfs_handle_t *hdl = zhp->zpool_hdl;
3396         uint64_t vers, readonly = B_FALSE;
3397         boolean_t freelist = B_FALSE, memory_err = B_TRUE;
3398         int retval = 0;
3399
3400         (void) snprintf(msg, sizeof (msg),
3401             dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
3402
3403         if (!zpool_name_valid(hdl, B_FALSE, newname))
3404                 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
3405
3406         if ((config = zpool_get_config(zhp, NULL)) == NULL) {
3407                 (void) fprintf(stderr, gettext("Internal error: unable to "
3408                     "retrieve pool configuration\n"));
3409                 return (-1);
3410         }
3411
3412         verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
3413             == 0);
3414         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
3415
3416         if (props) {
3417                 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
3418                 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
3419                     props, vers, flags, msg)) == NULL)
3420                         return (-1);
3421                 (void) nvlist_lookup_uint64(zc_props,
3422                     zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
3423                 if (readonly) {
3424                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3425                             "property %s can only be set at import time"),
3426                             zpool_prop_to_name(ZPOOL_PROP_READONLY));
3427                         return (-1);
3428                 }
3429         }
3430
3431         if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
3432             &children) != 0) {
3433                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3434                     "Source pool is missing vdev tree"));
3435                 nvlist_free(zc_props);
3436                 return (-1);
3437         }
3438
3439         varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
3440         vcount = 0;
3441
3442         if (*newroot == NULL ||
3443             nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
3444             &newchild, &newchildren) != 0)
3445                 newchildren = 0;
3446
3447         for (c = 0; c < children; c++) {
3448                 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
3449                 char *type;
3450                 nvlist_t **mchild, *vdev;
3451                 uint_t mchildren;
3452                 int entry;
3453
3454                 /*
3455                  * Unlike cache & spares, slogs are stored in the
3456                  * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
3457                  */
3458                 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
3459                     &is_log);
3460                 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
3461                     &is_hole);
3462                 if (is_log || is_hole) {
3463                         /*
3464                          * Create a hole vdev and put it in the config.
3465                          */
3466                         if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
3467                                 goto out;
3468                         if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
3469                             VDEV_TYPE_HOLE) != 0)
3470                                 goto out;
3471                         if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
3472                             1) != 0)
3473                                 goto out;
3474                         if (lastlog == 0)
3475                                 lastlog = vcount;
3476                         varray[vcount++] = vdev;
3477                         continue;
3478                 }
3479                 lastlog = 0;
3480                 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3481                     == 0);
3482
3483                 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
3484                         vdev = child[c];
3485                         if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3486                                 goto out;
3487                         continue;
3488                 } else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3489                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3490                             "Source pool must be composed only of mirrors\n"));
3491                         retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3492                         goto out;
3493                 }
3494
3495                 verify(nvlist_lookup_nvlist_array(child[c],
3496                     ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3497
3498                 /* find or add an entry for this top-level vdev */
3499                 if (newchildren > 0 &&
3500                     (entry = find_vdev_entry(zhp, mchild, mchildren,
3501                     newchild, newchildren)) >= 0) {
3502                         /* We found a disk that the user specified. */
3503                         vdev = mchild[entry];
3504                         ++found;
3505                 } else {
3506                         /* User didn't specify a disk for this vdev. */
3507                         vdev = mchild[mchildren - 1];
3508                 }
3509
3510                 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3511                         goto out;
3512         }
3513
3514         /* did we find every disk the user specified? */
3515         if (found != newchildren) {
3516                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3517                     "include at most one disk from each mirror"));
3518                 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3519                 goto out;
3520         }
3521
3522         /* Prepare the nvlist for populating. */
3523         if (*newroot == NULL) {
3524                 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3525                         goto out;
3526                 freelist = B_TRUE;
3527                 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3528                     VDEV_TYPE_ROOT) != 0)
3529                         goto out;
3530         } else {
3531                 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3532         }
3533
3534         /* Add all the children we found */
3535         if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3536             lastlog == 0 ? vcount : lastlog) != 0)
3537                 goto out;
3538
3539         /*
3540          * If we're just doing a dry run, exit now with success.
3541          */
3542         if (flags.dryrun) {
3543                 memory_err = B_FALSE;
3544                 freelist = B_FALSE;
3545                 goto out;
3546         }
3547
3548         /* now build up the config list & call the ioctl */
3549         if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3550                 goto out;
3551
3552         if (nvlist_add_nvlist(newconfig,
3553             ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3554             nvlist_add_string(newconfig,
3555             ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3556             nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3557                 goto out;
3558
3559         /*
3560          * The new pool is automatically part of the namespace unless we
3561          * explicitly export it.
3562          */
3563         if (!flags.import)
3564                 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3565         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3566         (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3567         if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3568                 goto out;
3569         if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3570                 goto out;
3571
3572         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3573                 retval = zpool_standard_error(hdl, errno, msg);
3574                 goto out;
3575         }
3576
3577         freelist = B_FALSE;
3578         memory_err = B_FALSE;
3579
3580 out:
3581         if (varray != NULL) {
3582                 int v;
3583
3584                 for (v = 0; v < vcount; v++)
3585                         nvlist_free(varray[v]);
3586                 free(varray);
3587         }
3588         zcmd_free_nvlists(&zc);
3589         nvlist_free(zc_props);
3590         nvlist_free(newconfig);
3591         if (freelist) {
3592                 nvlist_free(*newroot);
3593                 *newroot = NULL;
3594         }
3595
3596         if (retval != 0)
3597                 return (retval);
3598
3599         if (memory_err)
3600                 return (no_memory(hdl));
3601
3602         return (0);
3603 }
3604
3605 /*
3606  * Remove the given device.
3607  */
3608 int
3609 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3610 {
3611         zfs_cmd_t zc = {"\0"};
3612         char msg[1024];
3613         nvlist_t *tgt;
3614         boolean_t avail_spare, l2cache, islog;
3615         libzfs_handle_t *hdl = zhp->zpool_hdl;
3616         uint64_t version;
3617
3618         (void) snprintf(msg, sizeof (msg),
3619             dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3620
3621         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3622         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3623             &islog)) == NULL)
3624                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3625
3626         version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3627         if (islog && version < SPA_VERSION_HOLES) {
3628                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3629                     "pool must be upgraded to support log removal"));
3630                 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3631         }
3632
3633         zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
3634
3635         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3636                 return (0);
3637
3638         switch (errno) {
3639
3640         case EINVAL:
3641                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3642                     "invalid config; all top-level vdevs must "
3643                     "have the same sector size and not be raidz."));
3644                 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
3645                 break;
3646
3647         case EBUSY:
3648                 if (islog) {
3649                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3650                             "Mount encrypted datasets to replay logs."));
3651                 } else {
3652                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3653                             "Pool busy; removal may already be in progress"));
3654                 }
3655                 (void) zfs_error(hdl, EZFS_BUSY, msg);
3656                 break;
3657
3658         case EACCES:
3659                 if (islog) {
3660                         zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3661                             "Mount encrypted datasets to replay logs."));
3662                         (void) zfs_error(hdl, EZFS_BUSY, msg);
3663                 } else {
3664                         (void) zpool_standard_error(hdl, errno, msg);
3665                 }
3666                 break;
3667
3668         default:
3669                 (void) zpool_standard_error(hdl, errno, msg);
3670         }
3671         return (-1);
3672 }
3673
3674 int
3675 zpool_vdev_remove_cancel(zpool_handle_t *zhp)
3676 {
3677         zfs_cmd_t zc;
3678         char msg[1024];
3679         libzfs_handle_t *hdl = zhp->zpool_hdl;
3680
3681         (void) snprintf(msg, sizeof (msg),
3682             dgettext(TEXT_DOMAIN, "cannot cancel removal"));
3683
3684         bzero(&zc, sizeof (zc));
3685         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3686         zc.zc_cookie = 1;
3687
3688         if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3689                 return (0);
3690
3691         return (zpool_standard_error(hdl, errno, msg));
3692 }
3693
3694 int
3695 zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
3696     uint64_t *sizep)
3697 {
3698         char msg[1024];
3699         nvlist_t *tgt;
3700         boolean_t avail_spare, l2cache, islog;
3701         libzfs_handle_t *hdl = zhp->zpool_hdl;
3702
3703         (void) snprintf(msg, sizeof (msg),
3704             dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
3705             path);
3706
3707         if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3708             &islog)) == NULL)
3709                 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3710
3711         if (avail_spare || l2cache || islog) {
3712                 *sizep = 0;
3713                 return (0);
3714         }
3715
3716         if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
3717                 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3718                     "indirect size not available"));
3719                 return (zfs_error(hdl, EINVAL, msg));
3720         }
3721         return (0);
3722 }
3723
3724 /*
3725  * Clear the errors for the pool, or the particular device if specified.
3726  */
3727 int
3728 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3729 {
3730         zfs_cmd_t zc = {"\0"};
3731         char msg[1024];
3732         nvlist_t *tgt;
3733         zpool_load_policy_t policy;
3734         boolean_t avail_spare, l2cache;
3735         libzfs_handle_t *hdl = zhp->zpool_hdl;
3736         nvlist_t *nvi = NULL;
3737         int error;
3738
3739         if (path)
3740                 (void) snprintf(msg, sizeof (msg),
3741                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3742                     path);
3743         else
3744                 (void) snprintf(msg, sizeof (msg),
3745                     dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3746                     zhp->zpool_name);
3747
3748         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3749         if (path) {
3750                 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3751                     &l2cache, NULL)) == NULL)
3752                         return (zfs_error(hdl, EZFS_NODEVICE, msg));
3753
3754                 /*
3755                  * Don't allow error clearing for hot spares.  Do allow
3756                  * error clearing for l2cache devices.
3757                  */
3758                 if (avail_spare)
3759                         return (zfs_error(hdl, EZFS_ISSPARE, msg));
3760
3761                 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3762                     &zc.zc_guid) == 0);
3763         }
3764
3765         zpool_get_load_policy(rewindnvl, &policy);
3766         zc.zc_cookie = policy.zlp_rewind;
3767
3768         if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3769                 return (-1);
3770
3771         if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3772                 return (-1);
3773
3774         while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3775             errno == ENOMEM) {
3776                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3777                         zcmd_free_nvlists(&zc);
3778                         return (-1);
3779                 }
3780         }
3781
3782         if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
3783             errno != EPERM && errno != EACCES)) {
3784                 if (policy.zlp_rewind &
3785                     (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3786                         (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3787                         zpool_rewind_exclaim(hdl, zc.zc_name,
3788                             ((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
3789                             nvi);
3790                         nvlist_free(nvi);
3791                 }
3792                 zcmd_free_nvlists(&zc);
3793                 return (0);
3794         }
3795
3796         zcmd_free_nvlists(&zc);
3797         return (zpool_standard_error(hdl, errno, msg));
3798 }
3799
3800 /*
3801  * Similar to zpool_clear(), but takes a GUID (used by fmd).
3802  */
3803 int
3804 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3805 {
3806         zfs_cmd_t zc = {"\0"};
3807         char msg[1024];
3808         libzfs_handle_t *hdl = zhp->zpool_hdl;
3809
3810         (void) snprintf(msg, sizeof (msg),
3811             dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3812             (u_longlong_t)guid);
3813
3814         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3815         zc.zc_guid = guid;
3816         zc.zc_cookie = ZPOOL_NO_REWIND;
3817
3818         if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
3819                 return (0);
3820
3821         return (zpool_standard_error(hdl, errno, msg));
3822 }
3823
3824 /*
3825  * Change the GUID for a pool.
3826  */
3827 int
3828 zpool_reguid(zpool_handle_t *zhp)
3829 {
3830         char msg[1024];
3831         libzfs_handle_t *hdl = zhp->zpool_hdl;
3832         zfs_cmd_t zc = {"\0"};
3833
3834         (void) snprintf(msg, sizeof (msg),
3835             dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3836
3837         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3838         if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3839                 return (0);
3840
3841         return (zpool_standard_error(hdl, errno, msg));
3842 }
3843
3844 /*
3845  * Reopen the pool.
3846  */
3847 int
3848 zpool_reopen_one(zpool_handle_t *zhp, void *data)
3849 {
3850         libzfs_handle_t *hdl = zpool_get_handle(zhp);
3851         const char *pool_name = zpool_get_name(zhp);
3852         boolean_t *scrub_restart = data;
3853         int error;
3854
3855         error = lzc_reopen(pool_name, *scrub_restart);
3856         if (error) {
3857                 return (zpool_standard_error_fmt(hdl, error,
3858                     dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
3859         }
3860
3861         return (0);
3862 }
3863
3864 /* call into libzfs_core to execute the sync IOCTL per pool */
3865 int
3866 zpool_sync_one(zpool_handle_t *zhp, void *data)
3867 {
3868         int ret;
3869         libzfs_handle_t *hdl = zpool_get_handle(zhp);
3870         const char *pool_name = zpool_get_name(zhp);
3871         boolean_t *force = data;
3872         nvlist_t *innvl = fnvlist_alloc();
3873
3874         fnvlist_add_boolean_value(innvl, "force", *force);
3875         if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
3876                 nvlist_free(innvl);
3877                 return (zpool_standard_error_fmt(hdl, ret,
3878                     dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
3879         }
3880         nvlist_free(innvl);
3881
3882         return (0);
3883 }
3884
3885 #define PATH_BUF_LEN    64
3886
3887 /*
3888  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
3889  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3890  * We also check if this is a whole disk, in which case we strip off the
3891  * trailing 's0' slice name.
3892  *
3893  * This routine is also responsible for identifying when disks have been
3894  * reconfigured in a new location.  The kernel will have opened the device by
3895  * devid, but the path will still refer to the old location.  To catch this, we
3896  * first do a path -> devid translation (which is fast for the common case).  If
3897  * the devid matches, we're done.  If not, we do a reverse devid -> path
3898  * translation and issue the appropriate ioctl() to update the path of the vdev.
3899  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3900  * of these checks.
3901  */
3902 char *
3903 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3904     int name_flags)
3905 {
3906         char *path, *type, *env;
3907         uint64_t value;
3908         char buf[PATH_BUF_LEN];
3909         char tmpbuf[PATH_BUF_LEN];
3910
3911         /*
3912          * vdev_name will be "root"/"root-0" for the root vdev, but it is the
3913          * zpool name that will be displayed to the user.
3914          */
3915         verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
3916         if (zhp != NULL && strcmp(type, "root") == 0)
3917                 return (zfs_strdup(hdl, zpool_get_name(zhp)));
3918
3919         env = getenv("ZPOOL_VDEV_NAME_PATH");
3920         if (env && (strtoul(env, NULL, 0) > 0 ||
3921             !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3922                 name_flags |= VDEV_NAME_PATH;
3923
3924         env = getenv("ZPOOL_VDEV_NAME_GUID");
3925         if (env && (strtoul(env, NULL, 0) > 0 ||
3926             !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3927                 name_flags |= VDEV_NAME_GUID;
3928
3929         env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
3930         if (env && (strtoul(env, NULL, 0) > 0 ||
3931             !strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
3932                 name_flags |= VDEV_NAME_FOLLOW_LINKS;
3933
3934         if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3935             name_flags & VDEV_NAME_GUID) {
3936                 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
3937                 (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
3938                 path = buf;
3939         } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3940                 if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
3941                         char *rp = realpath(path, NULL);
3942                         if (rp) {
3943                                 strlcpy(buf, rp, sizeof (buf));
3944                                 path = buf;
3945                                 free(rp);
3946                         }
3947                 }
3948
3949                 /*
3950                  * For a block device only use the name.
3951                  */
3952                 if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
3953                     !(name_flags & VDEV_NAME_PATH)) {
3954                         path = zfs_strip_path(path);
3955                 }
3956
3957                 /*
3958                  * Remove the partition from the path it this is a whole disk.
3959                  */
3960                 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
3961                     == 0 && value && !(name_flags & VDEV_NAME_PATH)) {
3962                         return (zfs_strip_partition(path));
3963                 }
3964         } else {
3965                 path = type;
3966
3967                 /*
3968                  * If it's a raidz device, we need to stick in the parity level.
3969                  */
3970                 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3971                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3972                             &value) == 0);
3973                         (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3974                             (u_longlong_t)value);
3975                         path = buf;
3976                 }
3977
3978                 /*
3979                  * We identify each top-level vdev by using a <type-id>
3980                  * naming convention.
3981                  */
3982                 if (name_flags & VDEV_NAME_TYPE_ID) {
3983                         uint64_t id;
3984                         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3985                             &id) == 0);
3986                         (void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
3987                             path, (u_longlong_t)id);
3988                         path = tmpbuf;
3989                 }
3990         }
3991
3992         return (zfs_strdup(hdl, path));
3993 }
3994
3995 static int
3996 zbookmark_mem_compare(const void *a, const void *b)
3997 {
3998         return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3999 }
4000
4001 /*
4002  * Retrieve the persistent error log, uniquify the members, and return to the
4003  * caller.
4004  */
4005 int
4006 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
4007 {
4008         zfs_cmd_t zc = {"\0"};
4009         libzfs_handle_t *hdl = zhp->zpool_hdl;
4010         uint64_t count;
4011         zbookmark_phys_t *zb = NULL;
4012         int i;
4013
4014         /*
4015          * Retrieve the raw error list from the kernel.  If the number of errors
4016          * has increased, allocate more space and continue until we get the
4017          * entire list.
4018          */
4019         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
4020             &count) == 0);
4021         if (count == 0)
4022                 return (0);
4023         zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
4024             count * sizeof (zbookmark_phys_t));
4025         zc.zc_nvlist_dst_size = count;
4026         (void) strcpy(zc.zc_name, zhp->zpool_name);
4027         for (;;) {
4028                 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
4029                     &zc) != 0) {
4030                         free((void *)(uintptr_t)zc.zc_nvlist_dst);
4031                         if (errno == ENOMEM) {
4032                                 void *dst;
4033
4034                                 count = zc.zc_nvlist_dst_size;
4035                                 dst = zfs_alloc(zhp->zpool_hdl, count *
4036                                     sizeof (zbookmark_phys_t));
4037                                 zc.zc_nvlist_dst = (uintptr_t)dst;
4038                         } else {
4039                                 return (zpool_standard_error_fmt(hdl, errno,
4040                                     dgettext(TEXT_DOMAIN, "errors: List of "
4041                                     "errors unavailable")));
4042                         }
4043                 } else {
4044                         break;
4045                 }
4046         }
4047
4048         /*
4049          * Sort the resulting bookmarks.  This is a little confusing due to the
4050          * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
4051          * to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
4052          * _not_ copied as part of the process.  So we point the start of our
4053          * array appropriate and decrement the total number of elements.
4054          */
4055         zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
4056             zc.zc_nvlist_dst_size;
4057         count -= zc.zc_nvlist_dst_size;
4058
4059         qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
4060
4061         verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
4062
4063         /*
4064          * Fill in the nverrlistp with nvlist's of dataset and object numbers.
4065          */
4066         for (i = 0; i < count; i++) {
4067                 nvlist_t *nv;
4068
4069                 /* ignoring zb_blkid and zb_level for now */
4070                 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
4071                     zb[i-1].zb_object == zb[i].zb_object)
4072                         continue;
4073
4074                 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
4075                         goto nomem;
4076                 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
4077                     zb[i].zb_objset) != 0) {
4078                         nvlist_free(nv);
4079                         goto nomem;
4080                 }
4081                 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
4082                     zb[i].zb_object) != 0) {
4083                         nvlist_free(nv);
4084                         goto nomem;
4085                 }
4086                 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
4087                         nvlist_free(nv);
4088                         goto nomem;
4089                 }
4090                 nvlist_free(nv);
4091         }
4092
4093         free((void *)(uintptr_t)zc.zc_nvlist_dst);
4094         return (0);
4095
4096 nomem:
4097         free((void *)(uintptr_t)zc.zc_nvlist_dst);
4098         return (no_memory(zhp->zpool_hdl));
4099 }
4100
4101 /*
4102  * Upgrade a ZFS pool to the latest on-disk version.
4103  */
4104 int
4105 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
4106 {
4107         zfs_cmd_t zc = {"\0"};
4108         libzfs_handle_t *hdl = zhp->zpool_hdl;
4109
4110         (void) strcpy(zc.zc_name, zhp->zpool_name);
4111         zc.zc_cookie = new_version;
4112
4113         if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
4114                 return (zpool_standard_error_fmt(hdl, errno,
4115                     dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
4116                     zhp->zpool_name));
4117         return (0);
4118 }
4119
4120 void
4121 zfs_save_arguments(int argc, char **argv, char *string, int len)
4122 {
4123         int i;
4124
4125         (void) strlcpy(string, basename(argv[0]), len);
4126         for (i = 1; i < argc; i++) {
4127                 (void) strlcat(string, " ", len);
4128                 (void) strlcat(string, argv[i], len);
4129         }
4130 }
4131
4132 int
4133 zpool_log_history(libzfs_handle_t *hdl, const char *message)
4134 {
4135         zfs_cmd_t zc = {"\0"};
4136         nvlist_t *args;
4137         int err;
4138
4139         args = fnvlist_alloc();
4140         fnvlist_add_string(args, "message", message);
4141         err = zcmd_write_src_nvlist(hdl, &zc, args);
4142         if (err == 0)
4143                 err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
4144         nvlist_free(args);
4145         zcmd_free_nvlists(&zc);
4146         return (err);
4147 }
4148
4149 /*
4150  * Perform ioctl to get some command history of a pool.
4151  *
4152  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
4153  * logical offset of the history buffer to start reading from.
4154  *
4155  * Upon return, 'off' is the next logical offset to read from and
4156  * 'len' is the actual amount of bytes read into 'buf'.
4157  */
4158 static int
4159 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
4160 {
4161         zfs_cmd_t zc = {"\0"};
4162         libzfs_handle_t *hdl = zhp->zpool_hdl;
4163
4164         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4165
4166         zc.zc_history = (uint64_t)(uintptr_t)buf;
4167         zc.zc_history_len = *len;
4168         zc.zc_history_offset = *off;
4169
4170         if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
4171                 switch (errno) {
4172                 case EPERM:
4173                         return (zfs_error_fmt(hdl, EZFS_PERM,
4174                             dgettext(TEXT_DOMAIN,
4175                             "cannot show history for pool '%s'"),
4176                             zhp->zpool_name));
4177                 case ENOENT:
4178                         return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
4179                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
4180                             "'%s'"), zhp->zpool_name));
4181                 case ENOTSUP:
4182                         return (zfs_error_fmt(hdl, EZFS_BADVERSION,
4183                             dgettext(TEXT_DOMAIN, "cannot get history for pool "
4184                             "'%s', pool must be upgraded"), zhp->zpool_name));
4185                 default:
4186                         return (zpool_standard_error_fmt(hdl, errno,
4187                             dgettext(TEXT_DOMAIN,
4188                             "cannot get history for '%s'"), zhp->zpool_name));
4189                 }
4190         }
4191
4192         *len = zc.zc_history_len;
4193         *off = zc.zc_history_offset;
4194
4195         return (0);
4196 }
4197
4198 /*
4199  * Retrieve the command history of a pool.
4200  */
4201 int
4202 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
4203     boolean_t *eof)
4204 {
4205         char *buf;
4206         int buflen = 128 * 1024;
4207         nvlist_t **records = NULL;
4208         uint_t numrecords = 0;
4209         int err, i;
4210         uint64_t start = *off;
4211
4212         buf = malloc(buflen);
4213         if (buf == NULL)
4214                 return (ENOMEM);
4215         /* process about 1MB a time */
4216         while (*off - start < 1024 * 1024) {
4217                 uint64_t bytes_read = buflen;
4218                 uint64_t leftover;
4219
4220                 if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
4221                         break;
4222
4223                 /* if nothing else was read in, we're at EOF, just return */
4224                 if (!bytes_read) {
4225                         *eof = B_TRUE;
4226                         break;
4227                 }
4228
4229                 if ((err = zpool_history_unpack(buf, bytes_read,
4230                     &leftover, &records, &numrecords)) != 0)
4231                         break;
4232                 *off -= leftover;
4233                 if (leftover == bytes_read) {
4234                         /*
4235                          * no progress made, because buffer is not big enough
4236                          * to hold this record; resize and retry.
4237                          */
4238                         buflen *= 2;
4239                         free(buf);
4240                         buf = malloc(buflen);
4241                         if (buf == NULL)
4242                                 return (ENOMEM);
4243                 }
4244         }
4245
4246         free(buf);
4247
4248         if (!err) {
4249                 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
4250                 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
4251                     records, numrecords) == 0);
4252         }
4253         for (i = 0; i < numrecords; i++)
4254                 nvlist_free(records[i]);
4255         free(records);
4256
4257         return (err);
4258 }
4259
4260 /*
4261  * Retrieve the next event given the passed 'zevent_fd' file descriptor.
4262  * If there is a new event available 'nvp' will contain a newly allocated
4263  * nvlist and 'dropped' will be set to the number of missed events since
4264  * the last call to this function.  When 'nvp' is set to NULL it indicates
4265  * no new events are available.  In either case the function returns 0 and
4266  * it is up to the caller to free 'nvp'.  In the case of a fatal error the
4267  * function will return a non-zero value.  When the function is called in
4268  * blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
4269  * it will not return until a new event is available.
4270  */
4271 int
4272 zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
4273     int *dropped, unsigned flags, int zevent_fd)
4274 {
4275         zfs_cmd_t zc = {"\0"};
4276         int error = 0;
4277
4278         *nvp = NULL;
4279         *dropped = 0;
4280         zc.zc_cleanup_fd = zevent_fd;
4281
4282         if (flags & ZEVENT_NONBLOCK)
4283                 zc.zc_guid = ZEVENT_NONBLOCK;
4284
4285         if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
4286                 return (-1);
4287
4288 retry:
4289         if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
4290                 switch (errno) {
4291                 case ESHUTDOWN:
4292                         error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
4293                             dgettext(TEXT_DOMAIN, "zfs shutdown"));
4294                         goto out;
4295                 case ENOENT:
4296                         /* Blocking error case should not occur */
4297                         if (!(flags & ZEVENT_NONBLOCK))
4298                                 error = zpool_standard_error_fmt(hdl, errno,
4299                                     dgettext(TEXT_DOMAIN, "cannot get event"));
4300
4301                         goto out;
4302                 case ENOMEM:
4303                         if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
4304                                 error = zfs_error_fmt(hdl, EZFS_NOMEM,
4305                                     dgettext(TEXT_DOMAIN, "cannot get event"));
4306                                 goto out;
4307                         } else {
4308                                 goto retry;
4309                         }
4310                 default:
4311                         error = zpool_standard_error_fmt(hdl, errno,
4312                             dgettext(TEXT_DOMAIN, "cannot get event"));
4313                         goto out;
4314                 }
4315         }
4316
4317         error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
4318         if (error != 0)
4319                 goto out;
4320
4321         *dropped = (int)zc.zc_cookie;
4322 out:
4323         zcmd_free_nvlists(&zc);
4324
4325         return (error);
4326 }
4327
4328 /*
4329  * Clear all events.
4330  */
4331 int
4332 zpool_events_clear(libzfs_handle_t *hdl, int *count)
4333 {
4334         zfs_cmd_t zc = {"\0"};
4335         char msg[1024];
4336
4337         (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
4338             "cannot clear events"));
4339
4340         if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
4341                 return (zpool_standard_error_fmt(hdl, errno, msg));
4342
4343         if (count != NULL)
4344                 *count = (int)zc.zc_cookie; /* # of events cleared */
4345
4346         return (0);
4347 }
4348
4349 /*
4350  * Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
4351  * the passed zevent_fd file handle.  On success zero is returned,
4352  * otherwise -1 is returned and hdl->libzfs_error is set to the errno.
4353  */
4354 int
4355 zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
4356 {
4357         zfs_cmd_t zc = {"\0"};
4358         int error = 0;
4359
4360         zc.zc_guid = eid;
4361         zc.zc_cleanup_fd = zevent_fd;
4362
4363         if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
4364                 switch (errno) {
4365                 case ENOENT:
4366                         error = zfs_error_fmt(hdl, EZFS_NOENT,
4367                             dgettext(TEXT_DOMAIN, "cannot get event"));
4368                         break;
4369
4370                 case ENOMEM:
4371                         error = zfs_error_fmt(hdl, EZFS_NOMEM,
4372                             dgettext(TEXT_DOMAIN, "cannot get event"));
4373                         break;
4374
4375                 default:
4376                         error = zpool_standard_error_fmt(hdl, errno,
4377                             dgettext(TEXT_DOMAIN, "cannot get event"));
4378                         break;
4379                 }
4380         }
4381
4382         return (error);
4383 }
4384
4385 static void
4386 zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4387     char *pathname, size_t len, boolean_t always_unmounted)
4388 {
4389         zfs_cmd_t zc = {"\0"};
4390         boolean_t mounted = B_FALSE;
4391         char *mntpnt = NULL;
4392         char dsname[ZFS_MAX_DATASET_NAME_LEN];
4393
4394         if (dsobj == 0) {
4395                 /* special case for the MOS */
4396                 (void) snprintf(pathname, len, "<metadata>:<0x%llx>",
4397                     (longlong_t)obj);
4398                 return;
4399         }
4400
4401         /* get the dataset's name */
4402         (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
4403         zc.zc_obj = dsobj;
4404         if (zfs_ioctl(zhp->zpool_hdl,
4405             ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
4406                 /* just write out a path of two object numbers */
4407                 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
4408                     (longlong_t)dsobj, (longlong_t)obj);
4409                 return;
4410         }
4411         (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
4412
4413         /* find out if the dataset is mounted */
4414         mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
4415             &mntpnt);
4416
4417         /* get the corrupted object's path */
4418         (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
4419         zc.zc_obj = obj;
4420         if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
4421             &zc) == 0) {
4422                 if (mounted) {
4423                         (void) snprintf(pathname, len, "%s%s", mntpnt,
4424                             zc.zc_value);
4425                 } else {
4426                         (void) snprintf(pathname, len, "%s:%s",
4427                             dsname, zc.zc_value);
4428                 }
4429         } else {
4430                 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
4431                     (longlong_t)obj);
4432         }
4433         free(mntpnt);
4434 }
4435
4436 void
4437 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4438     char *pathname, size_t len)
4439 {
4440         zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
4441 }
4442
4443 void
4444 zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
4445     char *pathname, size_t len)
4446 {
4447         zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
4448 }
4449 /*
4450  * Wait while the specified activity is in progress in the pool.
4451  */
4452 int
4453 zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
4454 {
4455         boolean_t missing;
4456
4457         int error = zpool_wait_status(zhp, activity, &missing, NULL);
4458
4459         if (missing) {
4460                 (void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
4461                     dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4462                     zhp->zpool_name);
4463                 return (ENOENT);
4464         } else {
4465                 return (error);
4466         }
4467 }
4468
4469 /*
4470  * Wait for the given activity and return the status of the wait (whether or not
4471  * any waiting was done) in the 'waited' parameter. Non-existent pools are
4472  * reported via the 'missing' parameter, rather than by printing an error
4473  * message. This is convenient when this function is called in a loop over a
4474  * long period of time (as it is, for example, by zpool's wait cmd). In that
4475  * scenario, a pool being exported or destroyed should be considered a normal
4476  * event, so we don't want to print an error when we find that the pool doesn't
4477  * exist.
4478  */
4479 int
4480 zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
4481     boolean_t *missing, boolean_t *waited)
4482 {
4483         int error = lzc_wait(zhp->zpool_name, activity, waited);
4484         *missing = (error == ENOENT);
4485         if (*missing)
4486                 return (0);
4487
4488         if (error != 0) {
4489                 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4490                     dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
4491                     zhp->zpool_name);
4492         }
4493
4494         return (error);
4495 }
4496
4497 int
4498 zpool_set_bootenv(zpool_handle_t *zhp, const char *envmap)
4499 {
4500         int error = lzc_set_bootenv(zhp->zpool_name, envmap);
4501         if (error != 0) {
4502                 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4503                     dgettext(TEXT_DOMAIN,
4504                     "error setting bootenv in pool '%s'"), zhp->zpool_name);
4505         }
4506
4507         return (error);
4508 }
4509
4510 int
4511 zpool_get_bootenv(zpool_handle_t *zhp, char *outbuf, size_t size, off_t offset)
4512 {
4513         nvlist_t *nvl = NULL;
4514         int error = lzc_get_bootenv(zhp->zpool_name, &nvl);
4515         if (error != 0) {
4516                 (void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
4517                     dgettext(TEXT_DOMAIN,
4518                     "error getting bootenv in pool '%s'"), zhp->zpool_name);
4519                 return (-1);
4520         }
4521         char *envmap = fnvlist_lookup_string(nvl, "envmap");
4522         if (offset >= strlen(envmap)) {
4523                 fnvlist_free(nvl);
4524                 return (0);
4525         }
4526
4527         strncpy(outbuf, envmap + offset, size);
4528         int bytes = MIN(strlen(envmap + offset), size);
4529         fnvlist_free(nvl);
4530         return (bytes);
4531 }