]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
MFV r354378,r354379,r354386: 10499 Multi-modifier protection (MMP)
[FreeBSD/FreeBSD.git] / cddl / contrib / opensolaris / lib / libzfs / common / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25  * Copyright 2015 RackTop Systems.
26  * Copyright 2016 Nexenta Systems, Inc.
27  */
28
29 /*
30  * Pool import support functions.
31  *
32  * To import a pool, we rely on reading the configuration information from the
33  * ZFS label of each device.  If we successfully read the label, then we
34  * organize the configuration information in the following hierarchy:
35  *
36  *      pool guid -> toplevel vdev guid -> label txg
37  *
38  * Duplicate entries matching this same tuple will be discarded.  Once we have
39  * examined every device, we pick the best label txg config for each toplevel
40  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
41  * update any paths that have changed.  Finally, we attempt to import the pool
42  * using our derived config, and record the results.
43  */
44
45 #include <aio.h>
46 #include <ctype.h>
47 #include <devid.h>
48 #include <dirent.h>
49 #include <errno.h>
50 #include <libintl.h>
51 #include <stddef.h>
52 #include <stdlib.h>
53 #include <string.h>
54 #include <sys/stat.h>
55 #include <unistd.h>
56 #include <fcntl.h>
57 #include <thread_pool.h>
58 #include <libgeom.h>
59
60 #include <sys/vdev_impl.h>
61
62 #include "libzfs.h"
63 #include "libzfs_impl.h"
64
65 /*
66  * Intermediate structures used to gather configuration information.
67  */
68 typedef struct config_entry {
69         uint64_t                ce_txg;
70         nvlist_t                *ce_config;
71         struct config_entry     *ce_next;
72 } config_entry_t;
73
74 typedef struct vdev_entry {
75         uint64_t                ve_guid;
76         config_entry_t          *ve_configs;
77         struct vdev_entry       *ve_next;
78 } vdev_entry_t;
79
80 typedef struct pool_entry {
81         uint64_t                pe_guid;
82         vdev_entry_t            *pe_vdevs;
83         struct pool_entry       *pe_next;
84 } pool_entry_t;
85
86 typedef struct name_entry {
87         char                    *ne_name;
88         uint64_t                ne_guid;
89         struct name_entry       *ne_next;
90 } name_entry_t;
91
92 typedef struct pool_list {
93         pool_entry_t            *pools;
94         name_entry_t            *names;
95 } pool_list_t;
96
97 static char *
98 get_devid(const char *path)
99 {
100 #ifdef have_devid
101         int fd;
102         ddi_devid_t devid;
103         char *minor, *ret;
104
105         if ((fd = open(path, O_RDONLY)) < 0)
106                 return (NULL);
107
108         minor = NULL;
109         ret = NULL;
110         if (devid_get(fd, &devid) == 0) {
111                 if (devid_get_minor_name(fd, &minor) == 0)
112                         ret = devid_str_encode(devid, minor);
113                 if (minor != NULL)
114                         devid_str_free(minor);
115                 devid_free(devid);
116         }
117         (void) close(fd);
118
119         return (ret);
120 #else
121         return (NULL);
122 #endif
123 }
124
125
126 /*
127  * Go through and fix up any path and/or devid information for the given vdev
128  * configuration.
129  */
130 static int
131 fix_paths(nvlist_t *nv, name_entry_t *names)
132 {
133         nvlist_t **child;
134         uint_t c, children;
135         uint64_t guid;
136         name_entry_t *ne, *best;
137         char *path, *devid;
138         int matched;
139
140         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
141             &child, &children) == 0) {
142                 for (c = 0; c < children; c++)
143                         if (fix_paths(child[c], names) != 0)
144                                 return (-1);
145                 return (0);
146         }
147
148         /*
149          * This is a leaf (file or disk) vdev.  In either case, go through
150          * the name list and see if we find a matching guid.  If so, replace
151          * the path and see if we can calculate a new devid.
152          *
153          * There may be multiple names associated with a particular guid, in
154          * which case we have overlapping slices or multiple paths to the same
155          * disk.  If this is the case, then we want to pick the path that is
156          * the most similar to the original, where "most similar" is the number
157          * of matching characters starting from the end of the path.  This will
158          * preserve slice numbers even if the disks have been reorganized, and
159          * will also catch preferred disk names if multiple paths exist.
160          */
161         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
162         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
163                 path = NULL;
164
165         matched = 0;
166         best = NULL;
167         for (ne = names; ne != NULL; ne = ne->ne_next) {
168                 if (ne->ne_guid == guid) {
169                         const char *src, *dst;
170                         int count;
171
172                         if (path == NULL) {
173                                 best = ne;
174                                 break;
175                         }
176
177                         src = ne->ne_name + strlen(ne->ne_name) - 1;
178                         dst = path + strlen(path) - 1;
179                         for (count = 0; src >= ne->ne_name && dst >= path;
180                             src--, dst--, count++)
181                                 if (*src != *dst)
182                                         break;
183
184                         /*
185                          * At this point, 'count' is the number of characters
186                          * matched from the end.
187                          */
188                         if (count > matched || best == NULL) {
189                                 best = ne;
190                                 matched = count;
191                         }
192                 }
193         }
194
195         if (best == NULL)
196                 return (0);
197
198         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
199                 return (-1);
200
201         if ((devid = get_devid(best->ne_name)) == NULL) {
202                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
203         } else {
204                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) {
205                         devid_str_free(devid);
206                         return (-1);
207                 }
208                 devid_str_free(devid);
209         }
210
211         return (0);
212 }
213
214 /*
215  * Add the given configuration to the list of known devices.
216  */
217 static int
218 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
219     nvlist_t *config)
220 {
221         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
222         pool_entry_t *pe;
223         vdev_entry_t *ve;
224         config_entry_t *ce;
225         name_entry_t *ne;
226
227         /*
228          * If this is a hot spare not currently in use or level 2 cache
229          * device, add it to the list of names to translate, but don't do
230          * anything else.
231          */
232         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
233             &state) == 0 &&
234             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
235             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
236                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
237                         return (-1);
238
239                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
240                         free(ne);
241                         return (-1);
242                 }
243
244                 ne->ne_guid = vdev_guid;
245                 ne->ne_next = pl->names;
246                 pl->names = ne;
247
248                 return (0);
249         }
250
251         /*
252          * If we have a valid config but cannot read any of these fields, then
253          * it means we have a half-initialized label.  In vdev_label_init()
254          * we write a label with txg == 0 so that we can identify the device
255          * in case the user refers to the same disk later on.  If we fail to
256          * create the pool, we'll be left with a label in this state
257          * which should not be considered part of a valid pool.
258          */
259         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
260             &pool_guid) != 0 ||
261             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
262             &vdev_guid) != 0 ||
263             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
264             &top_guid) != 0 ||
265             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
266             &txg) != 0 || txg == 0) {
267                 return (0);
268         }
269
270         /*
271          * First, see if we know about this pool.  If not, then add it to the
272          * list of known pools.
273          */
274         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
275                 if (pe->pe_guid == pool_guid)
276                         break;
277         }
278
279         if (pe == NULL) {
280                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
281                         return (-1);
282                 }
283                 pe->pe_guid = pool_guid;
284                 pe->pe_next = pl->pools;
285                 pl->pools = pe;
286         }
287
288         /*
289          * Second, see if we know about this toplevel vdev.  Add it if its
290          * missing.
291          */
292         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
293                 if (ve->ve_guid == top_guid)
294                         break;
295         }
296
297         if (ve == NULL) {
298                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
299                         return (-1);
300                 }
301                 ve->ve_guid = top_guid;
302                 ve->ve_next = pe->pe_vdevs;
303                 pe->pe_vdevs = ve;
304         }
305
306         /*
307          * Third, see if we have a config with a matching transaction group.  If
308          * so, then we do nothing.  Otherwise, add it to the list of known
309          * configs.
310          */
311         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
312                 if (ce->ce_txg == txg)
313                         break;
314         }
315
316         if (ce == NULL) {
317                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
318                         return (-1);
319                 }
320                 ce->ce_txg = txg;
321                 ce->ce_config = fnvlist_dup(config);
322                 ce->ce_next = ve->ve_configs;
323                 ve->ve_configs = ce;
324         }
325
326         /*
327          * At this point we've successfully added our config to the list of
328          * known configs.  The last thing to do is add the vdev guid -> path
329          * mappings so that we can fix up the configuration as necessary before
330          * doing the import.
331          */
332         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
333                 return (-1);
334
335         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
336                 free(ne);
337                 return (-1);
338         }
339
340         ne->ne_guid = vdev_guid;
341         ne->ne_next = pl->names;
342         pl->names = ne;
343
344         return (0);
345 }
346
347 /*
348  * Returns true if the named pool matches the given GUID.
349  */
350 static int
351 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
352     boolean_t *isactive)
353 {
354         zpool_handle_t *zhp;
355         uint64_t theguid;
356
357         if (zpool_open_silent(hdl, name, &zhp) != 0)
358                 return (-1);
359
360         if (zhp == NULL) {
361                 *isactive = B_FALSE;
362                 return (0);
363         }
364
365         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
366             &theguid) == 0);
367
368         zpool_close(zhp);
369
370         *isactive = (theguid == guid);
371         return (0);
372 }
373
374 static nvlist_t *
375 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
376 {
377         nvlist_t *nvl;
378         zfs_cmd_t zc = { 0 };
379         int err, dstbuf_size;
380
381         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
382                 return (NULL);
383
384         dstbuf_size = MAX(CONFIG_BUF_MINSIZE, zc.zc_nvlist_conf_size * 4);
385
386         if (zcmd_alloc_dst_nvlist(hdl, &zc, dstbuf_size) != 0) {
387                 zcmd_free_nvlists(&zc);
388                 return (NULL);
389         }
390
391         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
392             &zc)) != 0 && errno == ENOMEM) {
393                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
394                         zcmd_free_nvlists(&zc);
395                         return (NULL);
396                 }
397         }
398
399         if (err) {
400                 zcmd_free_nvlists(&zc);
401                 return (NULL);
402         }
403
404         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
405                 zcmd_free_nvlists(&zc);
406                 return (NULL);
407         }
408
409         zcmd_free_nvlists(&zc);
410         return (nvl);
411 }
412
413 /*
414  * Determine if the vdev id is a hole in the namespace.
415  */
416 boolean_t
417 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
418 {
419         for (int c = 0; c < holes; c++) {
420
421                 /* Top-level is a hole */
422                 if (hole_array[c] == id)
423                         return (B_TRUE);
424         }
425         return (B_FALSE);
426 }
427
428 /*
429  * Convert our list of pools into the definitive set of configurations.  We
430  * start by picking the best config for each toplevel vdev.  Once that's done,
431  * we assemble the toplevel vdevs into a full config for the pool.  We make a
432  * pass to fix up any incorrect paths, and then add it to the main list to
433  * return to the user.
434  */
435 static nvlist_t *
436 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
437     nvlist_t *policy)
438 {
439         pool_entry_t *pe;
440         vdev_entry_t *ve;
441         config_entry_t *ce;
442         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
443         nvlist_t **spares, **l2cache;
444         uint_t i, nspares, nl2cache;
445         boolean_t config_seen;
446         uint64_t best_txg;
447         char *name, *hostname = NULL;
448         uint64_t guid;
449         uint_t children = 0;
450         nvlist_t **child = NULL;
451         uint_t holes;
452         uint64_t *hole_array, max_id;
453         uint_t c;
454         boolean_t isactive;
455         uint64_t hostid;
456         nvlist_t *nvl;
457         boolean_t found_one = B_FALSE;
458         boolean_t valid_top_config = B_FALSE;
459
460         if (nvlist_alloc(&ret, 0, 0) != 0)
461                 goto nomem;
462
463         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
464                 uint64_t id, max_txg = 0;
465
466                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
467                         goto nomem;
468                 config_seen = B_FALSE;
469
470                 /*
471                  * Iterate over all toplevel vdevs.  Grab the pool configuration
472                  * from the first one we find, and then go through the rest and
473                  * add them as necessary to the 'vdevs' member of the config.
474                  */
475                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
476
477                         /*
478                          * Determine the best configuration for this vdev by
479                          * selecting the config with the latest transaction
480                          * group.
481                          */
482                         best_txg = 0;
483                         for (ce = ve->ve_configs; ce != NULL;
484                             ce = ce->ce_next) {
485
486                                 if (ce->ce_txg > best_txg) {
487                                         tmp = ce->ce_config;
488                                         best_txg = ce->ce_txg;
489                                 }
490                         }
491
492                         /*
493                          * We rely on the fact that the max txg for the
494                          * pool will contain the most up-to-date information
495                          * about the valid top-levels in the vdev namespace.
496                          */
497                         if (best_txg > max_txg) {
498                                 (void) nvlist_remove(config,
499                                     ZPOOL_CONFIG_VDEV_CHILDREN,
500                                     DATA_TYPE_UINT64);
501                                 (void) nvlist_remove(config,
502                                     ZPOOL_CONFIG_HOLE_ARRAY,
503                                     DATA_TYPE_UINT64_ARRAY);
504
505                                 max_txg = best_txg;
506                                 hole_array = NULL;
507                                 holes = 0;
508                                 max_id = 0;
509                                 valid_top_config = B_FALSE;
510
511                                 if (nvlist_lookup_uint64(tmp,
512                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
513                                         verify(nvlist_add_uint64(config,
514                                             ZPOOL_CONFIG_VDEV_CHILDREN,
515                                             max_id) == 0);
516                                         valid_top_config = B_TRUE;
517                                 }
518
519                                 if (nvlist_lookup_uint64_array(tmp,
520                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
521                                     &holes) == 0) {
522                                         verify(nvlist_add_uint64_array(config,
523                                             ZPOOL_CONFIG_HOLE_ARRAY,
524                                             hole_array, holes) == 0);
525                                 }
526                         }
527
528                         if (!config_seen) {
529                                 /*
530                                  * Copy the relevant pieces of data to the pool
531                                  * configuration:
532                                  *
533                                  *      version
534                                  *      pool guid
535                                  *      name
536                                  *      comment (if available)
537                                  *      pool state
538                                  *      hostid (if available)
539                                  *      hostname (if available)
540                                  */
541                                 uint64_t state, version;
542                                 char *comment = NULL;
543
544                                 version = fnvlist_lookup_uint64(tmp,
545                                     ZPOOL_CONFIG_VERSION);
546                                 fnvlist_add_uint64(config,
547                                     ZPOOL_CONFIG_VERSION, version);
548                                 guid = fnvlist_lookup_uint64(tmp,
549                                     ZPOOL_CONFIG_POOL_GUID);
550                                 fnvlist_add_uint64(config,
551                                     ZPOOL_CONFIG_POOL_GUID, guid);
552                                 name = fnvlist_lookup_string(tmp,
553                                     ZPOOL_CONFIG_POOL_NAME);
554                                 fnvlist_add_string(config,
555                                     ZPOOL_CONFIG_POOL_NAME, name);
556
557                                 if (nvlist_lookup_string(tmp,
558                                     ZPOOL_CONFIG_COMMENT, &comment) == 0)
559                                         fnvlist_add_string(config,
560                                             ZPOOL_CONFIG_COMMENT, comment);
561
562                                 state = fnvlist_lookup_uint64(tmp,
563                                     ZPOOL_CONFIG_POOL_STATE);
564                                 fnvlist_add_uint64(config,
565                                     ZPOOL_CONFIG_POOL_STATE, state);
566
567                                 hostid = 0;
568                                 if (nvlist_lookup_uint64(tmp,
569                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
570                                         fnvlist_add_uint64(config,
571                                             ZPOOL_CONFIG_HOSTID, hostid);
572                                         hostname = fnvlist_lookup_string(tmp,
573                                             ZPOOL_CONFIG_HOSTNAME);
574                                         fnvlist_add_string(config,
575                                             ZPOOL_CONFIG_HOSTNAME, hostname);
576                                 }
577
578                                 config_seen = B_TRUE;
579                         }
580
581                         /*
582                          * Add this top-level vdev to the child array.
583                          */
584                         verify(nvlist_lookup_nvlist(tmp,
585                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
586                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
587                             &id) == 0);
588
589                         if (id >= children) {
590                                 nvlist_t **newchild;
591
592                                 newchild = zfs_alloc(hdl, (id + 1) *
593                                     sizeof (nvlist_t *));
594                                 if (newchild == NULL)
595                                         goto nomem;
596
597                                 for (c = 0; c < children; c++)
598                                         newchild[c] = child[c];
599
600                                 free(child);
601                                 child = newchild;
602                                 children = id + 1;
603                         }
604                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
605                                 goto nomem;
606
607                 }
608
609                 /*
610                  * If we have information about all the top-levels then
611                  * clean up the nvlist which we've constructed. This
612                  * means removing any extraneous devices that are
613                  * beyond the valid range or adding devices to the end
614                  * of our array which appear to be missing.
615                  */
616                 if (valid_top_config) {
617                         if (max_id < children) {
618                                 for (c = max_id; c < children; c++)
619                                         nvlist_free(child[c]);
620                                 children = max_id;
621                         } else if (max_id > children) {
622                                 nvlist_t **newchild;
623
624                                 newchild = zfs_alloc(hdl, (max_id) *
625                                     sizeof (nvlist_t *));
626                                 if (newchild == NULL)
627                                         goto nomem;
628
629                                 for (c = 0; c < children; c++)
630                                         newchild[c] = child[c];
631
632                                 free(child);
633                                 child = newchild;
634                                 children = max_id;
635                         }
636                 }
637
638                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
639                     &guid) == 0);
640
641                 /*
642                  * The vdev namespace may contain holes as a result of
643                  * device removal. We must add them back into the vdev
644                  * tree before we process any missing devices.
645                  */
646                 if (holes > 0) {
647                         ASSERT(valid_top_config);
648
649                         for (c = 0; c < children; c++) {
650                                 nvlist_t *holey;
651
652                                 if (child[c] != NULL ||
653                                     !vdev_is_hole(hole_array, holes, c))
654                                         continue;
655
656                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
657                                     0) != 0)
658                                         goto nomem;
659
660                                 /*
661                                  * Holes in the namespace are treated as
662                                  * "hole" top-level vdevs and have a
663                                  * special flag set on them.
664                                  */
665                                 if (nvlist_add_string(holey,
666                                     ZPOOL_CONFIG_TYPE,
667                                     VDEV_TYPE_HOLE) != 0 ||
668                                     nvlist_add_uint64(holey,
669                                     ZPOOL_CONFIG_ID, c) != 0 ||
670                                     nvlist_add_uint64(holey,
671                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
672                                         nvlist_free(holey);
673                                         goto nomem;
674                                 }
675                                 child[c] = holey;
676                         }
677                 }
678
679                 /*
680                  * Look for any missing top-level vdevs.  If this is the case,
681                  * create a faked up 'missing' vdev as a placeholder.  We cannot
682                  * simply compress the child array, because the kernel performs
683                  * certain checks to make sure the vdev IDs match their location
684                  * in the configuration.
685                  */
686                 for (c = 0; c < children; c++) {
687                         if (child[c] == NULL) {
688                                 nvlist_t *missing;
689                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
690                                     0) != 0)
691                                         goto nomem;
692                                 if (nvlist_add_string(missing,
693                                     ZPOOL_CONFIG_TYPE,
694                                     VDEV_TYPE_MISSING) != 0 ||
695                                     nvlist_add_uint64(missing,
696                                     ZPOOL_CONFIG_ID, c) != 0 ||
697                                     nvlist_add_uint64(missing,
698                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
699                                         nvlist_free(missing);
700                                         goto nomem;
701                                 }
702                                 child[c] = missing;
703                         }
704                 }
705
706                 /*
707                  * Put all of this pool's top-level vdevs into a root vdev.
708                  */
709                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
710                         goto nomem;
711                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
712                     VDEV_TYPE_ROOT) != 0 ||
713                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
714                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
715                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
716                     child, children) != 0) {
717                         nvlist_free(nvroot);
718                         goto nomem;
719                 }
720
721                 for (c = 0; c < children; c++)
722                         nvlist_free(child[c]);
723                 free(child);
724                 children = 0;
725                 child = NULL;
726
727                 /*
728                  * Go through and fix up any paths and/or devids based on our
729                  * known list of vdev GUID -> path mappings.
730                  */
731                 if (fix_paths(nvroot, pl->names) != 0) {
732                         nvlist_free(nvroot);
733                         goto nomem;
734                 }
735
736                 /*
737                  * Add the root vdev to this pool's configuration.
738                  */
739                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
740                     nvroot) != 0) {
741                         nvlist_free(nvroot);
742                         goto nomem;
743                 }
744                 nvlist_free(nvroot);
745
746                 /*
747                  * zdb uses this path to report on active pools that were
748                  * imported or created using -R.
749                  */
750                 if (active_ok)
751                         goto add_pool;
752
753                 /*
754                  * Determine if this pool is currently active, in which case we
755                  * can't actually import it.
756                  */
757                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
758                     &name) == 0);
759                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
760                     &guid) == 0);
761
762                 if (pool_active(hdl, name, guid, &isactive) != 0)
763                         goto error;
764
765                 if (isactive) {
766                         nvlist_free(config);
767                         config = NULL;
768                         continue;
769                 }
770
771                 if (policy != NULL) {
772                         if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
773                             policy) != 0)
774                                 goto nomem;
775                 }
776
777                 if ((nvl = refresh_config(hdl, config)) == NULL) {
778                         nvlist_free(config);
779                         config = NULL;
780                         continue;
781                 }
782
783                 nvlist_free(config);
784                 config = nvl;
785
786                 /*
787                  * Go through and update the paths for spares, now that we have
788                  * them.
789                  */
790                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
791                     &nvroot) == 0);
792                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
793                     &spares, &nspares) == 0) {
794                         for (i = 0; i < nspares; i++) {
795                                 if (fix_paths(spares[i], pl->names) != 0)
796                                         goto nomem;
797                         }
798                 }
799
800                 /*
801                  * Update the paths for l2cache devices.
802                  */
803                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
804                     &l2cache, &nl2cache) == 0) {
805                         for (i = 0; i < nl2cache; i++) {
806                                 if (fix_paths(l2cache[i], pl->names) != 0)
807                                         goto nomem;
808                         }
809                 }
810
811                 /*
812                  * Restore the original information read from the actual label.
813                  */
814                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
815                     DATA_TYPE_UINT64);
816                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
817                     DATA_TYPE_STRING);
818                 if (hostid != 0) {
819                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
820                             hostid) == 0);
821                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
822                             hostname) == 0);
823                 }
824
825 add_pool:
826                 /*
827                  * Add this pool to the list of configs.
828                  */
829                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
830                     &name) == 0);
831                 if (nvlist_add_nvlist(ret, name, config) != 0)
832                         goto nomem;
833
834                 found_one = B_TRUE;
835                 nvlist_free(config);
836                 config = NULL;
837         }
838
839         if (!found_one) {
840                 nvlist_free(ret);
841                 ret = NULL;
842         }
843
844         return (ret);
845
846 nomem:
847         (void) no_memory(hdl);
848 error:
849         nvlist_free(config);
850         nvlist_free(ret);
851         for (c = 0; c < children; c++)
852                 nvlist_free(child[c]);
853         free(child);
854
855         return (NULL);
856 }
857
858 /*
859  * Return the offset of the given label.
860  */
861 static uint64_t
862 label_offset(uint64_t size, int l)
863 {
864         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
865         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
866             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
867 }
868
869 /*
870  * Given a file descriptor, read the label information and return an nvlist
871  * describing the configuration, if there is one.
872  * Return 0 on success, or -1 on failure
873  */
874 int
875 zpool_read_label(int fd, nvlist_t **config)
876 {
877         struct stat64 statbuf;
878         int l;
879         vdev_label_t *label;
880         uint64_t state, txg, size;
881
882         *config = NULL;
883
884         if (fstat64(fd, &statbuf) == -1)
885                 return (-1);
886         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
887
888         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
889                 return (-1);
890
891         for (l = 0; l < VDEV_LABELS; l++) {
892                 if (pread64(fd, label, sizeof (vdev_label_t),
893                     label_offset(size, l)) != sizeof (vdev_label_t))
894                         continue;
895
896                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
897                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
898                         continue;
899
900                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
901                     &state) != 0 || state > POOL_STATE_L2CACHE) {
902                         nvlist_free(*config);
903                         continue;
904                 }
905
906                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
907                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
908                     &txg) != 0 || txg == 0)) {
909                         nvlist_free(*config);
910                         continue;
911                 }
912
913                 free(label);
914                 return (0);
915         }
916
917         free(label);
918         *config = NULL;
919         errno = ENOENT;
920         return (-1);
921 }
922
923 /*
924  * Given a file descriptor, read the label information and return an nvlist
925  * describing the configuration, if there is one.
926  * returns the number of valid labels found
927  * If a label is found, returns it via config.  The caller is responsible for
928  * freeing it.
929  */
930 int
931 zpool_read_all_labels(int fd, nvlist_t **config)
932 {
933         struct stat64 statbuf;
934         struct aiocb aiocbs[VDEV_LABELS];
935         struct aiocb *aiocbps[VDEV_LABELS];
936         int l;
937         vdev_phys_t *labels;
938         uint64_t state, txg, size;
939         int nlabels = 0;
940
941         *config = NULL;
942
943         if (fstat64(fd, &statbuf) == -1)
944                 return (0);
945         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
946
947         if ((labels = calloc(VDEV_LABELS, sizeof (vdev_phys_t))) == NULL)
948                 return (0);
949
950         memset(aiocbs, 0, sizeof(aiocbs));
951         for (l = 0; l < VDEV_LABELS; l++) {
952                 aiocbs[l].aio_fildes = fd;
953                 aiocbs[l].aio_offset = label_offset(size, l) + VDEV_SKIP_SIZE;
954                 aiocbs[l].aio_buf = &labels[l];
955                 aiocbs[l].aio_nbytes = sizeof(vdev_phys_t);
956                 aiocbs[l].aio_lio_opcode = LIO_READ;
957                 aiocbps[l] = &aiocbs[l];
958         }
959
960         if (lio_listio(LIO_WAIT, aiocbps, VDEV_LABELS, NULL) != 0) {
961                 if (errno == EAGAIN || errno == EINTR || errno == EIO) {
962                         for (l = 0; l < VDEV_LABELS; l++) {
963                                 errno = 0;
964                                 int r = aio_error(&aiocbs[l]);
965                                 if (r != EINVAL)
966                                         (void)aio_return(&aiocbs[l]);
967                         }
968                 }
969                 free(labels);
970                 return (0);
971         }
972
973         for (l = 0; l < VDEV_LABELS; l++) {
974                 nvlist_t *temp = NULL;
975
976                 if (aio_return(&aiocbs[l]) != sizeof(vdev_phys_t))
977                         continue;
978
979                 if (nvlist_unpack(labels[l].vp_nvlist,
980                     sizeof (labels[l].vp_nvlist), &temp, 0) != 0)
981                         continue;
982
983                 if (nvlist_lookup_uint64(temp, ZPOOL_CONFIG_POOL_STATE,
984                     &state) != 0 || state > POOL_STATE_L2CACHE) {
985                         nvlist_free(temp);
986                         temp = NULL;
987                         continue;
988                 }
989
990                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
991                     (nvlist_lookup_uint64(temp, ZPOOL_CONFIG_POOL_TXG,
992                     &txg) != 0 || txg == 0)) {
993                         nvlist_free(temp);
994                         temp = NULL;
995                         continue;
996                 }
997                 if (temp)
998                         *config = temp;
999
1000                 nlabels++;
1001         }
1002
1003         free(labels);
1004         return (nlabels);
1005 }
1006
1007 typedef struct rdsk_node {
1008         char *rn_name;
1009         int rn_dfd;
1010         libzfs_handle_t *rn_hdl;
1011         nvlist_t *rn_config;
1012         avl_tree_t *rn_avl;
1013         avl_node_t rn_node;
1014         boolean_t rn_nozpool;
1015 } rdsk_node_t;
1016
1017 static int
1018 slice_cache_compare(const void *arg1, const void *arg2)
1019 {
1020         const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
1021         const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
1022         char *nm1slice, *nm2slice;
1023         int rv;
1024
1025         /*
1026          * slices zero and two are the most likely to provide results,
1027          * so put those first
1028          */
1029         nm1slice = strstr(nm1, "s0");
1030         nm2slice = strstr(nm2, "s0");
1031         if (nm1slice && !nm2slice) {
1032                 return (-1);
1033         }
1034         if (!nm1slice && nm2slice) {
1035                 return (1);
1036         }
1037         nm1slice = strstr(nm1, "s2");
1038         nm2slice = strstr(nm2, "s2");
1039         if (nm1slice && !nm2slice) {
1040                 return (-1);
1041         }
1042         if (!nm1slice && nm2slice) {
1043                 return (1);
1044         }
1045
1046         rv = strcmp(nm1, nm2);
1047         if (rv == 0)
1048                 return (0);
1049         return (rv > 0 ? 1 : -1);
1050 }
1051
1052 #ifdef illumos
1053 static void
1054 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
1055     diskaddr_t size, uint_t blksz)
1056 {
1057         rdsk_node_t tmpnode;
1058         rdsk_node_t *node;
1059         char sname[MAXNAMELEN];
1060
1061         tmpnode.rn_name = &sname[0];
1062         (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
1063             diskname, partno);
1064         /*
1065          * protect against division by zero for disk labels that
1066          * contain a bogus sector size
1067          */
1068         if (blksz == 0)
1069                 blksz = DEV_BSIZE;
1070         /* too small to contain a zpool? */
1071         if ((size < (SPA_MINDEVSIZE / blksz)) &&
1072             (node = avl_find(r, &tmpnode, NULL)))
1073                 node->rn_nozpool = B_TRUE;
1074 }
1075 #endif  /* illumos */
1076
1077 static void
1078 nozpool_all_slices(avl_tree_t *r, const char *sname)
1079 {
1080 #ifdef illumos
1081         char diskname[MAXNAMELEN];
1082         char *ptr;
1083         int i;
1084
1085         (void) strncpy(diskname, sname, MAXNAMELEN);
1086         if (((ptr = strrchr(diskname, 's')) == NULL) &&
1087             ((ptr = strrchr(diskname, 'p')) == NULL))
1088                 return;
1089         ptr[0] = 's';
1090         ptr[1] = '\0';
1091         for (i = 0; i < NDKMAP; i++)
1092                 check_one_slice(r, diskname, i, 0, 1);
1093         ptr[0] = 'p';
1094         for (i = 0; i <= FD_NUMPART; i++)
1095                 check_one_slice(r, diskname, i, 0, 1);
1096 #endif  /* illumos */
1097 }
1098
1099 #ifdef illumos
1100 static void
1101 check_slices(avl_tree_t *r, int fd, const char *sname)
1102 {
1103         struct extvtoc vtoc;
1104         struct dk_gpt *gpt;
1105         char diskname[MAXNAMELEN];
1106         char *ptr;
1107         int i;
1108
1109         (void) strncpy(diskname, sname, MAXNAMELEN);
1110         if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1111                 return;
1112         ptr[1] = '\0';
1113
1114         if (read_extvtoc(fd, &vtoc) >= 0) {
1115                 for (i = 0; i < NDKMAP; i++)
1116                         check_one_slice(r, diskname, i,
1117                             vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1118         } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1119                 /*
1120                  * on x86 we'll still have leftover links that point
1121                  * to slices s[9-15], so use NDKMAP instead
1122                  */
1123                 for (i = 0; i < NDKMAP; i++)
1124                         check_one_slice(r, diskname, i,
1125                             gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1126                 /* nodes p[1-4] are never used with EFI labels */
1127                 ptr[0] = 'p';
1128                 for (i = 1; i <= FD_NUMPART; i++)
1129                         check_one_slice(r, diskname, i, 0, 1);
1130                 efi_free(gpt);
1131         }
1132 }
1133 #endif  /* illumos */
1134
1135 static void
1136 zpool_open_func(void *arg)
1137 {
1138         rdsk_node_t *rn = arg;
1139         struct stat64 statbuf;
1140         nvlist_t *config;
1141         int fd;
1142
1143         if (rn->rn_nozpool)
1144                 return;
1145         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1146                 /* symlink to a device that's no longer there */
1147                 if (errno == ENOENT)
1148                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1149                 return;
1150         }
1151         /*
1152          * Ignore failed stats.  We only want regular
1153          * files, character devs and block devs.
1154          */
1155         if (fstat64(fd, &statbuf) != 0 ||
1156             (!S_ISREG(statbuf.st_mode) &&
1157             !S_ISCHR(statbuf.st_mode) &&
1158             !S_ISBLK(statbuf.st_mode))) {
1159                 (void) close(fd);
1160                 return;
1161         }
1162         /* this file is too small to hold a zpool */
1163 #ifdef illumos
1164         if (S_ISREG(statbuf.st_mode) &&
1165             statbuf.st_size < SPA_MINDEVSIZE) {
1166                 (void) close(fd);
1167                 return;
1168         } else if (!S_ISREG(statbuf.st_mode)) {
1169                 /*
1170                  * Try to read the disk label first so we don't have to
1171                  * open a bunch of minor nodes that can't have a zpool.
1172                  */
1173                 check_slices(rn->rn_avl, fd, rn->rn_name);
1174         }
1175 #else   /* !illumos */
1176         if (statbuf.st_size < SPA_MINDEVSIZE) {
1177                 (void) close(fd);
1178                 return;
1179         }
1180 #endif  /* illumos */
1181
1182         if ((zpool_read_label(fd, &config)) != 0 && errno == ENOMEM) {
1183                 (void) close(fd);
1184                 (void) no_memory(rn->rn_hdl);
1185                 return;
1186         }
1187         (void) close(fd);
1188
1189         rn->rn_config = config;
1190 }
1191
1192 /*
1193  * Given a file descriptor, clear (zero) the label information.
1194  */
1195 int
1196 zpool_clear_label(int fd)
1197 {
1198         struct stat64 statbuf;
1199         int l;
1200         vdev_label_t *label;
1201         uint64_t size;
1202
1203         if (fstat64(fd, &statbuf) == -1)
1204                 return (0);
1205         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1206
1207         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1208                 return (-1);
1209
1210         for (l = 0; l < VDEV_LABELS; l++) {
1211                 if (pwrite64(fd, label, sizeof (vdev_label_t),
1212                     label_offset(size, l)) != sizeof (vdev_label_t)) {
1213                         free(label);
1214                         return (-1);
1215                 }
1216         }
1217
1218         free(label);
1219         return (0);
1220 }
1221
1222 /*
1223  * Given a list of directories to search, find all pools stored on disk.  This
1224  * includes partial pools which are not available to import.  If no args are
1225  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1226  * poolname or guid (but not both) are provided by the caller when trying
1227  * to import a specific pool.
1228  */
1229 static nvlist_t *
1230 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1231 {
1232         int i, dirs = iarg->paths;
1233         struct dirent64 *dp;
1234         char path[MAXPATHLEN];
1235         char *end, **dir = iarg->path;
1236         size_t pathleft;
1237         nvlist_t *ret = NULL;
1238         static char *default_dir = "/dev";
1239         pool_list_t pools = { 0 };
1240         pool_entry_t *pe, *penext;
1241         vdev_entry_t *ve, *venext;
1242         config_entry_t *ce, *cenext;
1243         name_entry_t *ne, *nenext;
1244         avl_tree_t slice_cache;
1245         rdsk_node_t *slice;
1246         void *cookie;
1247
1248         if (dirs == 0) {
1249                 dirs = 1;
1250                 dir = &default_dir;
1251         }
1252
1253         /*
1254          * Go through and read the label configuration information from every
1255          * possible device, organizing the information according to pool GUID
1256          * and toplevel GUID.
1257          */
1258         for (i = 0; i < dirs; i++) {
1259                 tpool_t *t;
1260                 char rdsk[MAXPATHLEN];
1261                 int dfd;
1262                 boolean_t config_failed = B_FALSE;
1263                 DIR *dirp;
1264
1265                 /* use realpath to normalize the path */
1266                 if (realpath(dir[i], path) == 0) {
1267                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1268                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1269                         goto error;
1270                 }
1271                 end = &path[strlen(path)];
1272                 *end++ = '/';
1273                 *end = 0;
1274                 pathleft = &path[sizeof (path)] - end;
1275
1276 #ifdef illumos
1277                 /*
1278                  * Using raw devices instead of block devices when we're
1279                  * reading the labels skips a bunch of slow operations during
1280                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1281                  */
1282                 if (strcmp(path, ZFS_DISK_ROOTD) == 0)
1283                         (void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
1284                 else
1285 #endif
1286                         (void) strlcpy(rdsk, path, sizeof (rdsk));
1287
1288                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1289                     (dirp = fdopendir(dfd)) == NULL) {
1290                         if (dfd >= 0)
1291                                 (void) close(dfd);
1292                         zfs_error_aux(hdl, strerror(errno));
1293                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1294                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1295                             rdsk);
1296                         goto error;
1297                 }
1298
1299                 avl_create(&slice_cache, slice_cache_compare,
1300                     sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1301
1302                 if (strcmp(rdsk, "/dev/") == 0) {
1303                         struct gmesh mesh;
1304                         struct gclass *mp;
1305                         struct ggeom *gp;
1306                         struct gprovider *pp;
1307
1308                         errno = geom_gettree(&mesh);
1309                         if (errno != 0) {
1310                                 zfs_error_aux(hdl, strerror(errno));
1311                                 (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1312                                     dgettext(TEXT_DOMAIN, "cannot get GEOM tree"));
1313                                 goto error;
1314                         }
1315
1316                         LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
1317                                 LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
1318                                         LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
1319                                                 slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1320                                                 slice->rn_name = zfs_strdup(hdl, pp->lg_name);
1321                                                 slice->rn_avl = &slice_cache;
1322                                                 slice->rn_dfd = dfd;
1323                                                 slice->rn_hdl = hdl;
1324                                                 slice->rn_nozpool = B_FALSE;
1325                                                 avl_add(&slice_cache, slice);
1326                                         }
1327                                 }
1328                         }
1329
1330                         geom_deletetree(&mesh);
1331                         goto skipdir;
1332                 }
1333
1334                 /*
1335                  * This is not MT-safe, but we have no MT consumers of libzfs
1336                  */
1337                 while ((dp = readdir64(dirp)) != NULL) {
1338                         const char *name = dp->d_name;
1339                         if (name[0] == '.' &&
1340                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1341                                 continue;
1342
1343                         slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1344                         slice->rn_name = zfs_strdup(hdl, name);
1345                         slice->rn_avl = &slice_cache;
1346                         slice->rn_dfd = dfd;
1347                         slice->rn_hdl = hdl;
1348                         slice->rn_nozpool = B_FALSE;
1349                         avl_add(&slice_cache, slice);
1350                 }
1351 skipdir:
1352                 /*
1353                  * create a thread pool to do all of this in parallel;
1354                  * rn_nozpool is not protected, so this is racy in that
1355                  * multiple tasks could decide that the same slice can
1356                  * not hold a zpool, which is benign.  Also choose
1357                  * double the number of processors; we hold a lot of
1358                  * locks in the kernel, so going beyond this doesn't
1359                  * buy us much.
1360                  */
1361                 t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1362                     0, NULL);
1363                 for (slice = avl_first(&slice_cache); slice;
1364                     (slice = avl_walk(&slice_cache, slice,
1365                     AVL_AFTER)))
1366                         (void) tpool_dispatch(t, zpool_open_func, slice);
1367                 tpool_wait(t);
1368                 tpool_destroy(t);
1369
1370                 cookie = NULL;
1371                 while ((slice = avl_destroy_nodes(&slice_cache,
1372                     &cookie)) != NULL) {
1373                         if (slice->rn_config != NULL && !config_failed) {
1374                                 nvlist_t *config = slice->rn_config;
1375                                 boolean_t matched = B_TRUE;
1376
1377                                 if (iarg->poolname != NULL) {
1378                                         char *pname;
1379
1380                                         matched = nvlist_lookup_string(config,
1381                                             ZPOOL_CONFIG_POOL_NAME,
1382                                             &pname) == 0 &&
1383                                             strcmp(iarg->poolname, pname) == 0;
1384                                 } else if (iarg->guid != 0) {
1385                                         uint64_t this_guid;
1386
1387                                         matched = nvlist_lookup_uint64(config,
1388                                             ZPOOL_CONFIG_POOL_GUID,
1389                                             &this_guid) == 0 &&
1390                                             iarg->guid == this_guid;
1391                                 }
1392                                 if (matched) {
1393                                         /*
1394                                          * use the non-raw path for the config
1395                                          */
1396                                         (void) strlcpy(end, slice->rn_name,
1397                                             pathleft);
1398                                         if (add_config(hdl, &pools, path,
1399                                             config) != 0)
1400                                                 config_failed = B_TRUE;
1401                                 }
1402                                 nvlist_free(config);
1403                         }
1404                         free(slice->rn_name);
1405                         free(slice);
1406                 }
1407                 avl_destroy(&slice_cache);
1408
1409                 (void) closedir(dirp);
1410
1411                 if (config_failed)
1412                         goto error;
1413         }
1414
1415         ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);
1416
1417 error:
1418         for (pe = pools.pools; pe != NULL; pe = penext) {
1419                 penext = pe->pe_next;
1420                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1421                         venext = ve->ve_next;
1422                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1423                                 cenext = ce->ce_next;
1424                                 nvlist_free(ce->ce_config);
1425                                 free(ce);
1426                         }
1427                         free(ve);
1428                 }
1429                 free(pe);
1430         }
1431
1432         for (ne = pools.names; ne != NULL; ne = nenext) {
1433                 nenext = ne->ne_next;
1434                 free(ne->ne_name);
1435                 free(ne);
1436         }
1437
1438         return (ret);
1439 }
1440
1441 nvlist_t *
1442 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1443 {
1444         importargs_t iarg = { 0 };
1445
1446         iarg.paths = argc;
1447         iarg.path = argv;
1448
1449         return (zpool_find_import_impl(hdl, &iarg));
1450 }
1451
1452 /*
1453  * Given a cache file, return the contents as a list of importable pools.
1454  * poolname or guid (but not both) are provided by the caller when trying
1455  * to import a specific pool.
1456  */
1457 nvlist_t *
1458 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1459     char *poolname, uint64_t guid)
1460 {
1461         char *buf;
1462         int fd;
1463         struct stat64 statbuf;
1464         nvlist_t *raw, *src, *dst;
1465         nvlist_t *pools;
1466         nvpair_t *elem;
1467         char *name;
1468         uint64_t this_guid;
1469         boolean_t active;
1470
1471         verify(poolname == NULL || guid == 0);
1472
1473         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1474                 zfs_error_aux(hdl, "%s", strerror(errno));
1475                 (void) zfs_error(hdl, EZFS_BADCACHE,
1476                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1477                 return (NULL);
1478         }
1479
1480         if (fstat64(fd, &statbuf) != 0) {
1481                 zfs_error_aux(hdl, "%s", strerror(errno));
1482                 (void) close(fd);
1483                 (void) zfs_error(hdl, EZFS_BADCACHE,
1484                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1485                 return (NULL);
1486         }
1487
1488         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1489                 (void) close(fd);
1490                 return (NULL);
1491         }
1492
1493         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1494                 (void) close(fd);
1495                 free(buf);
1496                 (void) zfs_error(hdl, EZFS_BADCACHE,
1497                     dgettext(TEXT_DOMAIN,
1498                     "failed to read cache file contents"));
1499                 return (NULL);
1500         }
1501
1502         (void) close(fd);
1503
1504         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1505                 free(buf);
1506                 (void) zfs_error(hdl, EZFS_BADCACHE,
1507                     dgettext(TEXT_DOMAIN,
1508                     "invalid or corrupt cache file contents"));
1509                 return (NULL);
1510         }
1511
1512         free(buf);
1513
1514         /*
1515          * Go through and get the current state of the pools and refresh their
1516          * state.
1517          */
1518         if (nvlist_alloc(&pools, 0, 0) != 0) {
1519                 (void) no_memory(hdl);
1520                 nvlist_free(raw);
1521                 return (NULL);
1522         }
1523
1524         elem = NULL;
1525         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1526                 src = fnvpair_value_nvlist(elem);
1527
1528                 name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
1529                 if (poolname != NULL && strcmp(poolname, name) != 0)
1530                         continue;
1531
1532                 this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
1533                 if (guid != 0 && guid != this_guid)
1534                         continue;
1535
1536                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1537                         nvlist_free(raw);
1538                         nvlist_free(pools);
1539                         return (NULL);
1540                 }
1541
1542                 if (active)
1543                         continue;
1544
1545                 if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
1546                     cachefile) != 0) {
1547                         (void) no_memory(hdl);
1548                         nvlist_free(raw);
1549                         nvlist_free(pools);
1550                         return (NULL);
1551                 }
1552
1553                 if ((dst = refresh_config(hdl, src)) == NULL) {
1554                         nvlist_free(raw);
1555                         nvlist_free(pools);
1556                         return (NULL);
1557                 }
1558
1559                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1560                         (void) no_memory(hdl);
1561                         nvlist_free(dst);
1562                         nvlist_free(raw);
1563                         nvlist_free(pools);
1564                         return (NULL);
1565                 }
1566                 nvlist_free(dst);
1567         }
1568
1569         nvlist_free(raw);
1570         return (pools);
1571 }
1572
1573 static int
1574 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1575 {
1576         importargs_t *import = data;
1577         int found = 0;
1578
1579         if (import->poolname != NULL) {
1580                 char *pool_name;
1581
1582                 verify(nvlist_lookup_string(zhp->zpool_config,
1583                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1584                 if (strcmp(pool_name, import->poolname) == 0)
1585                         found = 1;
1586         } else {
1587                 uint64_t pool_guid;
1588
1589                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1590                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1591                 if (pool_guid == import->guid)
1592                         found = 1;
1593         }
1594
1595         zpool_close(zhp);
1596         return (found);
1597 }
1598
1599 nvlist_t *
1600 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1601 {
1602         nvlist_t *pools = NULL;
1603
1604         verify(import->poolname == NULL || import->guid == 0);
1605
1606         if (import->unique)
1607                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1608
1609         if (import->cachefile != NULL)
1610                 pools = zpool_find_import_cached(hdl, import->cachefile,
1611                     import->poolname, import->guid);
1612         else
1613                 pools = zpool_find_import_impl(hdl, import);
1614
1615         return (pools);
1616 }
1617
1618 static boolean_t
1619 pool_match(nvlist_t *cfg, char *tgt)
1620 {
1621         uint64_t v, guid = strtoull(tgt, NULL, 0);
1622         char *s;
1623
1624         if (guid != 0) {
1625                 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0)
1626                         return (v == guid);
1627         } else {
1628                 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0)
1629                         return (strcmp(s, tgt) == 0);
1630         }
1631         return (B_FALSE);
1632 }
1633
1634 int
1635 zpool_tryimport(libzfs_handle_t *hdl, char *target, nvlist_t **configp,
1636     importargs_t *args)
1637 {
1638         nvlist_t *pools;
1639         nvlist_t *match = NULL;
1640         nvlist_t *config = NULL;
1641         char *sepp = NULL;
1642         int count = 0;
1643         char *targetdup = strdup(target);
1644
1645         *configp = NULL;
1646
1647         if ((sepp = strpbrk(targetdup, "/@")) != NULL) {
1648                 *sepp = '\0';
1649         }
1650
1651         pools = zpool_search_import(hdl, args);
1652
1653         if (pools != NULL) {
1654                 nvpair_t *elem = NULL;
1655                 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
1656                         VERIFY0(nvpair_value_nvlist(elem, &config));
1657                         if (pool_match(config, targetdup)) {
1658                                 count++;
1659                                 if (match != NULL) {
1660                                         /* multiple matches found */
1661                                         continue;
1662                                 } else {
1663                                         match = config;
1664                                 }
1665                         }
1666                 }
1667         }
1668
1669         if (count == 0) {
1670                 free(targetdup);
1671                 return (ENOENT);
1672         }
1673
1674         if (count > 1) {
1675                 free(targetdup);
1676                 return (EINVAL);
1677         }
1678
1679         *configp = match;
1680         free(targetdup);
1681
1682         return (0);
1683 }
1684
1685 boolean_t
1686 find_guid(nvlist_t *nv, uint64_t guid)
1687 {
1688         uint64_t tmp;
1689         nvlist_t **child;
1690         uint_t c, children;
1691
1692         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1693         if (tmp == guid)
1694                 return (B_TRUE);
1695
1696         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1697             &child, &children) == 0) {
1698                 for (c = 0; c < children; c++)
1699                         if (find_guid(child[c], guid))
1700                                 return (B_TRUE);
1701         }
1702
1703         return (B_FALSE);
1704 }
1705
1706 typedef struct aux_cbdata {
1707         const char      *cb_type;
1708         uint64_t        cb_guid;
1709         zpool_handle_t  *cb_zhp;
1710 } aux_cbdata_t;
1711
1712 static int
1713 find_aux(zpool_handle_t *zhp, void *data)
1714 {
1715         aux_cbdata_t *cbp = data;
1716         nvlist_t **list;
1717         uint_t i, count;
1718         uint64_t guid;
1719         nvlist_t *nvroot;
1720
1721         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1722             &nvroot) == 0);
1723
1724         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1725             &list, &count) == 0) {
1726                 for (i = 0; i < count; i++) {
1727                         verify(nvlist_lookup_uint64(list[i],
1728                             ZPOOL_CONFIG_GUID, &guid) == 0);
1729                         if (guid == cbp->cb_guid) {
1730                                 cbp->cb_zhp = zhp;
1731                                 return (1);
1732                         }
1733                 }
1734         }
1735
1736         zpool_close(zhp);
1737         return (0);
1738 }
1739
1740 /*
1741  * Determines if the pool is in use.  If so, it returns true and the state of
1742  * the pool as well as the name of the pool.  Both strings are allocated and
1743  * must be freed by the caller.
1744  */
1745 int
1746 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1747     boolean_t *inuse)
1748 {
1749         nvlist_t *config;
1750         char *name;
1751         boolean_t ret;
1752         uint64_t guid, vdev_guid;
1753         zpool_handle_t *zhp;
1754         nvlist_t *pool_config;
1755         uint64_t stateval, isspare;
1756         aux_cbdata_t cb = { 0 };
1757         boolean_t isactive;
1758
1759         *inuse = B_FALSE;
1760
1761         if (zpool_read_label(fd, &config) != 0 && errno == ENOMEM) {
1762                 (void) no_memory(hdl);
1763                 return (-1);
1764         }
1765
1766         if (config == NULL)
1767                 return (0);
1768
1769         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1770             &stateval) == 0);
1771         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1772             &vdev_guid) == 0);
1773
1774         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1775                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1776                     &name) == 0);
1777                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1778                     &guid) == 0);
1779         }
1780
1781         switch (stateval) {
1782         case POOL_STATE_EXPORTED:
1783                 /*
1784                  * A pool with an exported state may in fact be imported
1785                  * read-only, so check the in-core state to see if it's
1786                  * active and imported read-only.  If it is, set
1787                  * its state to active.
1788                  */
1789                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1790                     (zhp = zpool_open_canfail(hdl, name)) != NULL) {
1791                         if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1792                                 stateval = POOL_STATE_ACTIVE;
1793
1794                         /*
1795                          * All we needed the zpool handle for is the
1796                          * readonly prop check.
1797                          */
1798                         zpool_close(zhp);
1799                 }
1800
1801                 ret = B_TRUE;
1802                 break;
1803
1804         case POOL_STATE_ACTIVE:
1805                 /*
1806                  * For an active pool, we have to determine if it's really part
1807                  * of a currently active pool (in which case the pool will exist
1808                  * and the guid will be the same), or whether it's part of an
1809                  * active pool that was disconnected without being explicitly
1810                  * exported.
1811                  */
1812                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1813                         nvlist_free(config);
1814                         return (-1);
1815                 }
1816
1817                 if (isactive) {
1818                         /*
1819                          * Because the device may have been removed while
1820                          * offlined, we only report it as active if the vdev is
1821                          * still present in the config.  Otherwise, pretend like
1822                          * it's not in use.
1823                          */
1824                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1825                             (pool_config = zpool_get_config(zhp, NULL))
1826                             != NULL) {
1827                                 nvlist_t *nvroot;
1828
1829                                 verify(nvlist_lookup_nvlist(pool_config,
1830                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1831                                 ret = find_guid(nvroot, vdev_guid);
1832                         } else {
1833                                 ret = B_FALSE;
1834                         }
1835
1836                         /*
1837                          * If this is an active spare within another pool, we
1838                          * treat it like an unused hot spare.  This allows the
1839                          * user to create a pool with a hot spare that currently
1840                          * in use within another pool.  Since we return B_TRUE,
1841                          * libdiskmgt will continue to prevent generic consumers
1842                          * from using the device.
1843                          */
1844                         if (ret && nvlist_lookup_uint64(config,
1845                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1846                                 stateval = POOL_STATE_SPARE;
1847
1848                         if (zhp != NULL)
1849                                 zpool_close(zhp);
1850                 } else {
1851                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1852                         ret = B_TRUE;
1853                 }
1854                 break;
1855
1856         case POOL_STATE_SPARE:
1857                 /*
1858                  * For a hot spare, it can be either definitively in use, or
1859                  * potentially active.  To determine if it's in use, we iterate
1860                  * over all pools in the system and search for one with a spare
1861                  * with a matching guid.
1862                  *
1863                  * Due to the shared nature of spares, we don't actually report
1864                  * the potentially active case as in use.  This means the user
1865                  * can freely create pools on the hot spares of exported pools,
1866                  * but to do otherwise makes the resulting code complicated, and
1867                  * we end up having to deal with this case anyway.
1868                  */
1869                 cb.cb_zhp = NULL;
1870                 cb.cb_guid = vdev_guid;
1871                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1872                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1873                         name = (char *)zpool_get_name(cb.cb_zhp);
1874                         ret = B_TRUE;
1875                 } else {
1876                         ret = B_FALSE;
1877                 }
1878                 break;
1879
1880         case POOL_STATE_L2CACHE:
1881
1882                 /*
1883                  * Check if any pool is currently using this l2cache device.
1884                  */
1885                 cb.cb_zhp = NULL;
1886                 cb.cb_guid = vdev_guid;
1887                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1888                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1889                         name = (char *)zpool_get_name(cb.cb_zhp);
1890                         ret = B_TRUE;
1891                 } else {
1892                         ret = B_FALSE;
1893                 }
1894                 break;
1895
1896         default:
1897                 ret = B_FALSE;
1898         }
1899
1900
1901         if (ret) {
1902                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1903                         if (cb.cb_zhp)
1904                                 zpool_close(cb.cb_zhp);
1905                         nvlist_free(config);
1906                         return (-1);
1907                 }
1908                 *state = (pool_state_t)stateval;
1909         }
1910
1911         if (cb.cb_zhp)
1912                 zpool_close(cb.cb_zhp);
1913
1914         nvlist_free(config);
1915         *inuse = ret;
1916         return (0);
1917 }