]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
Import lua 5.3.4 to contrib
[FreeBSD/FreeBSD.git] / cddl / contrib / opensolaris / lib / libzfs / common / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
25  * Copyright 2015 RackTop Systems.
26  * Copyright 2016 Nexenta Systems, Inc.
27  */
28
29 /*
30  * Pool import support functions.
31  *
32  * To import a pool, we rely on reading the configuration information from the
33  * ZFS label of each device.  If we successfully read the label, then we
34  * organize the configuration information in the following hierarchy:
35  *
36  *      pool guid -> toplevel vdev guid -> label txg
37  *
38  * Duplicate entries matching this same tuple will be discarded.  Once we have
39  * examined every device, we pick the best label txg config for each toplevel
40  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
41  * update any paths that have changed.  Finally, we attempt to import the pool
42  * using our derived config, and record the results.
43  */
44
45 #include <aio.h>
46 #include <ctype.h>
47 #include <devid.h>
48 #include <dirent.h>
49 #include <errno.h>
50 #include <libintl.h>
51 #include <stddef.h>
52 #include <stdlib.h>
53 #include <string.h>
54 #include <sys/stat.h>
55 #include <unistd.h>
56 #include <fcntl.h>
57 #include <thread_pool.h>
58 #include <libgeom.h>
59
60 #include <sys/vdev_impl.h>
61
62 #include "libzfs.h"
63 #include "libzfs_impl.h"
64
65 /*
66  * Intermediate structures used to gather configuration information.
67  */
68 typedef struct config_entry {
69         uint64_t                ce_txg;
70         nvlist_t                *ce_config;
71         struct config_entry     *ce_next;
72 } config_entry_t;
73
74 typedef struct vdev_entry {
75         uint64_t                ve_guid;
76         config_entry_t          *ve_configs;
77         struct vdev_entry       *ve_next;
78 } vdev_entry_t;
79
80 typedef struct pool_entry {
81         uint64_t                pe_guid;
82         vdev_entry_t            *pe_vdevs;
83         struct pool_entry       *pe_next;
84 } pool_entry_t;
85
86 typedef struct name_entry {
87         char                    *ne_name;
88         uint64_t                ne_guid;
89         struct name_entry       *ne_next;
90 } name_entry_t;
91
92 typedef struct pool_list {
93         pool_entry_t            *pools;
94         name_entry_t            *names;
95 } pool_list_t;
96
97 static char *
98 get_devid(const char *path)
99 {
100 #ifdef have_devid
101         int fd;
102         ddi_devid_t devid;
103         char *minor, *ret;
104
105         if ((fd = open(path, O_RDONLY)) < 0)
106                 return (NULL);
107
108         minor = NULL;
109         ret = NULL;
110         if (devid_get(fd, &devid) == 0) {
111                 if (devid_get_minor_name(fd, &minor) == 0)
112                         ret = devid_str_encode(devid, minor);
113                 if (minor != NULL)
114                         devid_str_free(minor);
115                 devid_free(devid);
116         }
117         (void) close(fd);
118
119         return (ret);
120 #else
121         return (NULL);
122 #endif
123 }
124
125
126 /*
127  * Go through and fix up any path and/or devid information for the given vdev
128  * configuration.
129  */
130 static int
131 fix_paths(nvlist_t *nv, name_entry_t *names)
132 {
133         nvlist_t **child;
134         uint_t c, children;
135         uint64_t guid;
136         name_entry_t *ne, *best;
137         char *path, *devid;
138         int matched;
139
140         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
141             &child, &children) == 0) {
142                 for (c = 0; c < children; c++)
143                         if (fix_paths(child[c], names) != 0)
144                                 return (-1);
145                 return (0);
146         }
147
148         /*
149          * This is a leaf (file or disk) vdev.  In either case, go through
150          * the name list and see if we find a matching guid.  If so, replace
151          * the path and see if we can calculate a new devid.
152          *
153          * There may be multiple names associated with a particular guid, in
154          * which case we have overlapping slices or multiple paths to the same
155          * disk.  If this is the case, then we want to pick the path that is
156          * the most similar to the original, where "most similar" is the number
157          * of matching characters starting from the end of the path.  This will
158          * preserve slice numbers even if the disks have been reorganized, and
159          * will also catch preferred disk names if multiple paths exist.
160          */
161         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
162         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
163                 path = NULL;
164
165         matched = 0;
166         best = NULL;
167         for (ne = names; ne != NULL; ne = ne->ne_next) {
168                 if (ne->ne_guid == guid) {
169                         const char *src, *dst;
170                         int count;
171
172                         if (path == NULL) {
173                                 best = ne;
174                                 break;
175                         }
176
177                         src = ne->ne_name + strlen(ne->ne_name) - 1;
178                         dst = path + strlen(path) - 1;
179                         for (count = 0; src >= ne->ne_name && dst >= path;
180                             src--, dst--, count++)
181                                 if (*src != *dst)
182                                         break;
183
184                         /*
185                          * At this point, 'count' is the number of characters
186                          * matched from the end.
187                          */
188                         if (count > matched || best == NULL) {
189                                 best = ne;
190                                 matched = count;
191                         }
192                 }
193         }
194
195         if (best == NULL)
196                 return (0);
197
198         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
199                 return (-1);
200
201         if ((devid = get_devid(best->ne_name)) == NULL) {
202                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
203         } else {
204                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) {
205                         devid_str_free(devid);
206                         return (-1);
207                 }
208                 devid_str_free(devid);
209         }
210
211         return (0);
212 }
213
214 /*
215  * Add the given configuration to the list of known devices.
216  */
217 static int
218 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
219     nvlist_t *config)
220 {
221         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
222         pool_entry_t *pe;
223         vdev_entry_t *ve;
224         config_entry_t *ce;
225         name_entry_t *ne;
226
227         /*
228          * If this is a hot spare not currently in use or level 2 cache
229          * device, add it to the list of names to translate, but don't do
230          * anything else.
231          */
232         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
233             &state) == 0 &&
234             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
235             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
236                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
237                         return (-1);
238
239                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
240                         free(ne);
241                         return (-1);
242                 }
243                 ne->ne_guid = vdev_guid;
244                 ne->ne_next = pl->names;
245                 pl->names = ne;
246                 return (0);
247         }
248
249         /*
250          * If we have a valid config but cannot read any of these fields, then
251          * it means we have a half-initialized label.  In vdev_label_init()
252          * we write a label with txg == 0 so that we can identify the device
253          * in case the user refers to the same disk later on.  If we fail to
254          * create the pool, we'll be left with a label in this state
255          * which should not be considered part of a valid pool.
256          */
257         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
258             &pool_guid) != 0 ||
259             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
260             &vdev_guid) != 0 ||
261             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
262             &top_guid) != 0 ||
263             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
264             &txg) != 0 || txg == 0) {
265                 nvlist_free(config);
266                 return (0);
267         }
268
269         /*
270          * First, see if we know about this pool.  If not, then add it to the
271          * list of known pools.
272          */
273         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
274                 if (pe->pe_guid == pool_guid)
275                         break;
276         }
277
278         if (pe == NULL) {
279                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
280                         nvlist_free(config);
281                         return (-1);
282                 }
283                 pe->pe_guid = pool_guid;
284                 pe->pe_next = pl->pools;
285                 pl->pools = pe;
286         }
287
288         /*
289          * Second, see if we know about this toplevel vdev.  Add it if its
290          * missing.
291          */
292         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
293                 if (ve->ve_guid == top_guid)
294                         break;
295         }
296
297         if (ve == NULL) {
298                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
299                         nvlist_free(config);
300                         return (-1);
301                 }
302                 ve->ve_guid = top_guid;
303                 ve->ve_next = pe->pe_vdevs;
304                 pe->pe_vdevs = ve;
305         }
306
307         /*
308          * Third, see if we have a config with a matching transaction group.  If
309          * so, then we do nothing.  Otherwise, add it to the list of known
310          * configs.
311          */
312         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
313                 if (ce->ce_txg == txg)
314                         break;
315         }
316
317         if (ce == NULL) {
318                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
319                         nvlist_free(config);
320                         return (-1);
321                 }
322                 ce->ce_txg = txg;
323                 ce->ce_config = config;
324                 ce->ce_next = ve->ve_configs;
325                 ve->ve_configs = ce;
326         } else {
327                 nvlist_free(config);
328         }
329
330         /*
331          * At this point we've successfully added our config to the list of
332          * known configs.  The last thing to do is add the vdev guid -> path
333          * mappings so that we can fix up the configuration as necessary before
334          * doing the import.
335          */
336         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
337                 return (-1);
338
339         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
340                 free(ne);
341                 return (-1);
342         }
343
344         ne->ne_guid = vdev_guid;
345         ne->ne_next = pl->names;
346         pl->names = ne;
347
348         return (0);
349 }
350
351 /*
352  * Returns true if the named pool matches the given GUID.
353  */
354 static int
355 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
356     boolean_t *isactive)
357 {
358         zpool_handle_t *zhp;
359         uint64_t theguid;
360
361         if (zpool_open_silent(hdl, name, &zhp) != 0)
362                 return (-1);
363
364         if (zhp == NULL) {
365                 *isactive = B_FALSE;
366                 return (0);
367         }
368
369         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
370             &theguid) == 0);
371
372         zpool_close(zhp);
373
374         *isactive = (theguid == guid);
375         return (0);
376 }
377
378 static nvlist_t *
379 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
380 {
381         nvlist_t *nvl;
382         zfs_cmd_t zc = { 0 };
383         int err, dstbuf_size;
384
385         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
386                 return (NULL);
387
388         dstbuf_size = MAX(CONFIG_BUF_MINSIZE, zc.zc_nvlist_conf_size * 4);
389
390         if (zcmd_alloc_dst_nvlist(hdl, &zc, dstbuf_size) != 0) {
391                 zcmd_free_nvlists(&zc);
392                 return (NULL);
393         }
394
395         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
396             &zc)) != 0 && errno == ENOMEM) {
397                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
398                         zcmd_free_nvlists(&zc);
399                         return (NULL);
400                 }
401         }
402
403         if (err) {
404                 zcmd_free_nvlists(&zc);
405                 return (NULL);
406         }
407
408         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
409                 zcmd_free_nvlists(&zc);
410                 return (NULL);
411         }
412
413         zcmd_free_nvlists(&zc);
414         return (nvl);
415 }
416
417 /*
418  * Determine if the vdev id is a hole in the namespace.
419  */
420 boolean_t
421 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
422 {
423         for (int c = 0; c < holes; c++) {
424
425                 /* Top-level is a hole */
426                 if (hole_array[c] == id)
427                         return (B_TRUE);
428         }
429         return (B_FALSE);
430 }
431
432 /*
433  * Convert our list of pools into the definitive set of configurations.  We
434  * start by picking the best config for each toplevel vdev.  Once that's done,
435  * we assemble the toplevel vdevs into a full config for the pool.  We make a
436  * pass to fix up any incorrect paths, and then add it to the main list to
437  * return to the user.
438  */
439 static nvlist_t *
440 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
441 {
442         pool_entry_t *pe;
443         vdev_entry_t *ve;
444         config_entry_t *ce;
445         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
446         nvlist_t **spares, **l2cache;
447         uint_t i, nspares, nl2cache;
448         boolean_t config_seen;
449         uint64_t best_txg;
450         char *name, *hostname = NULL;
451         uint64_t guid;
452         uint_t children = 0;
453         nvlist_t **child = NULL;
454         uint_t holes;
455         uint64_t *hole_array, max_id;
456         uint_t c;
457         boolean_t isactive;
458         uint64_t hostid;
459         nvlist_t *nvl;
460         boolean_t found_one = B_FALSE;
461         boolean_t valid_top_config = B_FALSE;
462
463         if (nvlist_alloc(&ret, 0, 0) != 0)
464                 goto nomem;
465
466         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
467                 uint64_t id, max_txg = 0;
468
469                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
470                         goto nomem;
471                 config_seen = B_FALSE;
472
473                 /*
474                  * Iterate over all toplevel vdevs.  Grab the pool configuration
475                  * from the first one we find, and then go through the rest and
476                  * add them as necessary to the 'vdevs' member of the config.
477                  */
478                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
479
480                         /*
481                          * Determine the best configuration for this vdev by
482                          * selecting the config with the latest transaction
483                          * group.
484                          */
485                         best_txg = 0;
486                         for (ce = ve->ve_configs; ce != NULL;
487                             ce = ce->ce_next) {
488
489                                 if (ce->ce_txg > best_txg) {
490                                         tmp = ce->ce_config;
491                                         best_txg = ce->ce_txg;
492                                 }
493                         }
494
495                         /*
496                          * We rely on the fact that the max txg for the
497                          * pool will contain the most up-to-date information
498                          * about the valid top-levels in the vdev namespace.
499                          */
500                         if (best_txg > max_txg) {
501                                 (void) nvlist_remove(config,
502                                     ZPOOL_CONFIG_VDEV_CHILDREN,
503                                     DATA_TYPE_UINT64);
504                                 (void) nvlist_remove(config,
505                                     ZPOOL_CONFIG_HOLE_ARRAY,
506                                     DATA_TYPE_UINT64_ARRAY);
507
508                                 max_txg = best_txg;
509                                 hole_array = NULL;
510                                 holes = 0;
511                                 max_id = 0;
512                                 valid_top_config = B_FALSE;
513
514                                 if (nvlist_lookup_uint64(tmp,
515                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
516                                         verify(nvlist_add_uint64(config,
517                                             ZPOOL_CONFIG_VDEV_CHILDREN,
518                                             max_id) == 0);
519                                         valid_top_config = B_TRUE;
520                                 }
521
522                                 if (nvlist_lookup_uint64_array(tmp,
523                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
524                                     &holes) == 0) {
525                                         verify(nvlist_add_uint64_array(config,
526                                             ZPOOL_CONFIG_HOLE_ARRAY,
527                                             hole_array, holes) == 0);
528                                 }
529                         }
530
531                         if (!config_seen) {
532                                 /*
533                                  * Copy the relevant pieces of data to the pool
534                                  * configuration:
535                                  *
536                                  *      version
537                                  *      pool guid
538                                  *      name
539                                  *      comment (if available)
540                                  *      pool state
541                                  *      hostid (if available)
542                                  *      hostname (if available)
543                                  */
544                                 uint64_t state, version;
545                                 char *comment = NULL;
546
547                                 version = fnvlist_lookup_uint64(tmp,
548                                     ZPOOL_CONFIG_VERSION);
549                                 fnvlist_add_uint64(config,
550                                     ZPOOL_CONFIG_VERSION, version);
551                                 guid = fnvlist_lookup_uint64(tmp,
552                                     ZPOOL_CONFIG_POOL_GUID);
553                                 fnvlist_add_uint64(config,
554                                     ZPOOL_CONFIG_POOL_GUID, guid);
555                                 name = fnvlist_lookup_string(tmp,
556                                     ZPOOL_CONFIG_POOL_NAME);
557                                 fnvlist_add_string(config,
558                                     ZPOOL_CONFIG_POOL_NAME, name);
559
560                                 if (nvlist_lookup_string(tmp,
561                                     ZPOOL_CONFIG_COMMENT, &comment) == 0)
562                                         fnvlist_add_string(config,
563                                             ZPOOL_CONFIG_COMMENT, comment);
564
565                                 state = fnvlist_lookup_uint64(tmp,
566                                     ZPOOL_CONFIG_POOL_STATE);
567                                 fnvlist_add_uint64(config,
568                                     ZPOOL_CONFIG_POOL_STATE, state);
569
570                                 hostid = 0;
571                                 if (nvlist_lookup_uint64(tmp,
572                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
573                                         fnvlist_add_uint64(config,
574                                             ZPOOL_CONFIG_HOSTID, hostid);
575                                         hostname = fnvlist_lookup_string(tmp,
576                                             ZPOOL_CONFIG_HOSTNAME);
577                                         fnvlist_add_string(config,
578                                             ZPOOL_CONFIG_HOSTNAME, hostname);
579                                 }
580
581                                 config_seen = B_TRUE;
582                         }
583
584                         /*
585                          * Add this top-level vdev to the child array.
586                          */
587                         verify(nvlist_lookup_nvlist(tmp,
588                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
589                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
590                             &id) == 0);
591
592                         if (id >= children) {
593                                 nvlist_t **newchild;
594
595                                 newchild = zfs_alloc(hdl, (id + 1) *
596                                     sizeof (nvlist_t *));
597                                 if (newchild == NULL)
598                                         goto nomem;
599
600                                 for (c = 0; c < children; c++)
601                                         newchild[c] = child[c];
602
603                                 free(child);
604                                 child = newchild;
605                                 children = id + 1;
606                         }
607                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
608                                 goto nomem;
609
610                 }
611
612                 /*
613                  * If we have information about all the top-levels then
614                  * clean up the nvlist which we've constructed. This
615                  * means removing any extraneous devices that are
616                  * beyond the valid range or adding devices to the end
617                  * of our array which appear to be missing.
618                  */
619                 if (valid_top_config) {
620                         if (max_id < children) {
621                                 for (c = max_id; c < children; c++)
622                                         nvlist_free(child[c]);
623                                 children = max_id;
624                         } else if (max_id > children) {
625                                 nvlist_t **newchild;
626
627                                 newchild = zfs_alloc(hdl, (max_id) *
628                                     sizeof (nvlist_t *));
629                                 if (newchild == NULL)
630                                         goto nomem;
631
632                                 for (c = 0; c < children; c++)
633                                         newchild[c] = child[c];
634
635                                 free(child);
636                                 child = newchild;
637                                 children = max_id;
638                         }
639                 }
640
641                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
642                     &guid) == 0);
643
644                 /*
645                  * The vdev namespace may contain holes as a result of
646                  * device removal. We must add them back into the vdev
647                  * tree before we process any missing devices.
648                  */
649                 if (holes > 0) {
650                         ASSERT(valid_top_config);
651
652                         for (c = 0; c < children; c++) {
653                                 nvlist_t *holey;
654
655                                 if (child[c] != NULL ||
656                                     !vdev_is_hole(hole_array, holes, c))
657                                         continue;
658
659                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
660                                     0) != 0)
661                                         goto nomem;
662
663                                 /*
664                                  * Holes in the namespace are treated as
665                                  * "hole" top-level vdevs and have a
666                                  * special flag set on them.
667                                  */
668                                 if (nvlist_add_string(holey,
669                                     ZPOOL_CONFIG_TYPE,
670                                     VDEV_TYPE_HOLE) != 0 ||
671                                     nvlist_add_uint64(holey,
672                                     ZPOOL_CONFIG_ID, c) != 0 ||
673                                     nvlist_add_uint64(holey,
674                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
675                                         nvlist_free(holey);
676                                         goto nomem;
677                                 }
678                                 child[c] = holey;
679                         }
680                 }
681
682                 /*
683                  * Look for any missing top-level vdevs.  If this is the case,
684                  * create a faked up 'missing' vdev as a placeholder.  We cannot
685                  * simply compress the child array, because the kernel performs
686                  * certain checks to make sure the vdev IDs match their location
687                  * in the configuration.
688                  */
689                 for (c = 0; c < children; c++) {
690                         if (child[c] == NULL) {
691                                 nvlist_t *missing;
692                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
693                                     0) != 0)
694                                         goto nomem;
695                                 if (nvlist_add_string(missing,
696                                     ZPOOL_CONFIG_TYPE,
697                                     VDEV_TYPE_MISSING) != 0 ||
698                                     nvlist_add_uint64(missing,
699                                     ZPOOL_CONFIG_ID, c) != 0 ||
700                                     nvlist_add_uint64(missing,
701                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
702                                         nvlist_free(missing);
703                                         goto nomem;
704                                 }
705                                 child[c] = missing;
706                         }
707                 }
708
709                 /*
710                  * Put all of this pool's top-level vdevs into a root vdev.
711                  */
712                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
713                         goto nomem;
714                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
715                     VDEV_TYPE_ROOT) != 0 ||
716                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
717                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
718                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
719                     child, children) != 0) {
720                         nvlist_free(nvroot);
721                         goto nomem;
722                 }
723
724                 for (c = 0; c < children; c++)
725                         nvlist_free(child[c]);
726                 free(child);
727                 children = 0;
728                 child = NULL;
729
730                 /*
731                  * Go through and fix up any paths and/or devids based on our
732                  * known list of vdev GUID -> path mappings.
733                  */
734                 if (fix_paths(nvroot, pl->names) != 0) {
735                         nvlist_free(nvroot);
736                         goto nomem;
737                 }
738
739                 /*
740                  * Add the root vdev to this pool's configuration.
741                  */
742                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
743                     nvroot) != 0) {
744                         nvlist_free(nvroot);
745                         goto nomem;
746                 }
747                 nvlist_free(nvroot);
748
749                 /*
750                  * zdb uses this path to report on active pools that were
751                  * imported or created using -R.
752                  */
753                 if (active_ok)
754                         goto add_pool;
755
756                 /*
757                  * Determine if this pool is currently active, in which case we
758                  * can't actually import it.
759                  */
760                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
761                     &name) == 0);
762                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
763                     &guid) == 0);
764
765                 if (pool_active(hdl, name, guid, &isactive) != 0)
766                         goto error;
767
768                 if (isactive) {
769                         nvlist_free(config);
770                         config = NULL;
771                         continue;
772                 }
773
774                 if ((nvl = refresh_config(hdl, config)) == NULL) {
775                         nvlist_free(config);
776                         config = NULL;
777                         continue;
778                 }
779
780                 nvlist_free(config);
781                 config = nvl;
782
783                 /*
784                  * Go through and update the paths for spares, now that we have
785                  * them.
786                  */
787                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
788                     &nvroot) == 0);
789                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
790                     &spares, &nspares) == 0) {
791                         for (i = 0; i < nspares; i++) {
792                                 if (fix_paths(spares[i], pl->names) != 0)
793                                         goto nomem;
794                         }
795                 }
796
797                 /*
798                  * Update the paths for l2cache devices.
799                  */
800                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
801                     &l2cache, &nl2cache) == 0) {
802                         for (i = 0; i < nl2cache; i++) {
803                                 if (fix_paths(l2cache[i], pl->names) != 0)
804                                         goto nomem;
805                         }
806                 }
807
808                 /*
809                  * Restore the original information read from the actual label.
810                  */
811                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
812                     DATA_TYPE_UINT64);
813                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
814                     DATA_TYPE_STRING);
815                 if (hostid != 0) {
816                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
817                             hostid) == 0);
818                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
819                             hostname) == 0);
820                 }
821
822 add_pool:
823                 /*
824                  * Add this pool to the list of configs.
825                  */
826                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
827                     &name) == 0);
828                 if (nvlist_add_nvlist(ret, name, config) != 0)
829                         goto nomem;
830
831                 found_one = B_TRUE;
832                 nvlist_free(config);
833                 config = NULL;
834         }
835
836         if (!found_one) {
837                 nvlist_free(ret);
838                 ret = NULL;
839         }
840
841         return (ret);
842
843 nomem:
844         (void) no_memory(hdl);
845 error:
846         nvlist_free(config);
847         nvlist_free(ret);
848         for (c = 0; c < children; c++)
849                 nvlist_free(child[c]);
850         free(child);
851
852         return (NULL);
853 }
854
855 /*
856  * Return the offset of the given label.
857  */
858 static uint64_t
859 label_offset(uint64_t size, int l)
860 {
861         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
862         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
863             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
864 }
865
866 /*
867  * Given a file descriptor, read the label information and return an nvlist
868  * describing the configuration, if there is one.
869  * Return 0 on success, or -1 on failure
870  */
871 int
872 zpool_read_label(int fd, nvlist_t **config)
873 {
874         struct stat64 statbuf;
875         int l;
876         vdev_label_t *label;
877         uint64_t state, txg, size;
878
879         *config = NULL;
880
881         if (fstat64(fd, &statbuf) == -1)
882                 return (-1);
883         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
884
885         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
886                 return (-1);
887
888         for (l = 0; l < VDEV_LABELS; l++) {
889                 if (pread64(fd, label, sizeof (vdev_label_t),
890                     label_offset(size, l)) != sizeof (vdev_label_t))
891                         continue;
892
893                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
894                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
895                         continue;
896
897                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
898                     &state) != 0 || state > POOL_STATE_L2CACHE) {
899                         nvlist_free(*config);
900                         continue;
901                 }
902
903                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
904                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
905                     &txg) != 0 || txg == 0)) {
906                         nvlist_free(*config);
907                         continue;
908                 }
909
910                 free(label);
911                 return (0);
912         }
913
914         free(label);
915         *config = NULL;
916         return (-1);
917 }
918
919 /*
920  * Given a file descriptor, read the label information and return an nvlist
921  * describing the configuration, if there is one.
922  * returns the number of valid labels found
923  * If a label is found, returns it via config.  The caller is responsible for
924  * freeing it.
925  */
926 int
927 zpool_read_all_labels(int fd, nvlist_t **config)
928 {
929         struct stat64 statbuf;
930         struct aiocb aiocbs[VDEV_LABELS];
931         struct aiocb *aiocbps[VDEV_LABELS];
932         int l;
933         vdev_phys_t *labels;
934         uint64_t state, txg, size;
935         int nlabels = 0;
936
937         *config = NULL;
938
939         if (fstat64(fd, &statbuf) == -1)
940                 return (0);
941         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
942
943         if ((labels = calloc(VDEV_LABELS, sizeof (vdev_phys_t))) == NULL)
944                 return (0);
945
946         memset(aiocbs, 0, sizeof(aiocbs));
947         for (l = 0; l < VDEV_LABELS; l++) {
948                 aiocbs[l].aio_fildes = fd;
949                 aiocbs[l].aio_offset = label_offset(size, l) + VDEV_SKIP_SIZE;
950                 aiocbs[l].aio_buf = &labels[l];
951                 aiocbs[l].aio_nbytes = sizeof(vdev_phys_t);
952                 aiocbs[l].aio_lio_opcode = LIO_READ;
953                 aiocbps[l] = &aiocbs[l];
954         }
955
956         if (lio_listio(LIO_WAIT, aiocbps, VDEV_LABELS, NULL) != 0) {
957                 if (errno == EAGAIN || errno == EINTR || errno == EIO) {
958                         for (l = 0; l < VDEV_LABELS; l++) {
959                                 errno = 0;
960                                 int r = aio_error(&aiocbs[l]);
961                                 if (r != EINVAL)
962                                         (void)aio_return(&aiocbs[l]);
963                         }
964                 }
965                 free(labels);
966                 return (0);
967         }
968
969         for (l = 0; l < VDEV_LABELS; l++) {
970                 nvlist_t *temp = NULL;
971
972                 if (aio_return(&aiocbs[l]) != sizeof(vdev_phys_t))
973                         continue;
974
975                 if (nvlist_unpack(labels[l].vp_nvlist,
976                     sizeof (labels[l].vp_nvlist), &temp, 0) != 0)
977                         continue;
978
979                 if (nvlist_lookup_uint64(temp, ZPOOL_CONFIG_POOL_STATE,
980                     &state) != 0 || state > POOL_STATE_L2CACHE) {
981                         nvlist_free(temp);
982                         temp = NULL;
983                         continue;
984                 }
985
986                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
987                     (nvlist_lookup_uint64(temp, ZPOOL_CONFIG_POOL_TXG,
988                     &txg) != 0 || txg == 0)) {
989                         nvlist_free(temp);
990                         temp = NULL;
991                         continue;
992                 }
993                 if (temp)
994                         *config = temp;
995
996                 nlabels++;
997         }
998
999         free(labels);
1000         return (nlabels);
1001 }
1002
1003 typedef struct rdsk_node {
1004         char *rn_name;
1005         int rn_dfd;
1006         libzfs_handle_t *rn_hdl;
1007         nvlist_t *rn_config;
1008         avl_tree_t *rn_avl;
1009         avl_node_t rn_node;
1010         boolean_t rn_nozpool;
1011 } rdsk_node_t;
1012
1013 static int
1014 slice_cache_compare(const void *arg1, const void *arg2)
1015 {
1016         const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
1017         const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
1018         char *nm1slice, *nm2slice;
1019         int rv;
1020
1021         /*
1022          * slices zero and two are the most likely to provide results,
1023          * so put those first
1024          */
1025         nm1slice = strstr(nm1, "s0");
1026         nm2slice = strstr(nm2, "s0");
1027         if (nm1slice && !nm2slice) {
1028                 return (-1);
1029         }
1030         if (!nm1slice && nm2slice) {
1031                 return (1);
1032         }
1033         nm1slice = strstr(nm1, "s2");
1034         nm2slice = strstr(nm2, "s2");
1035         if (nm1slice && !nm2slice) {
1036                 return (-1);
1037         }
1038         if (!nm1slice && nm2slice) {
1039                 return (1);
1040         }
1041
1042         rv = strcmp(nm1, nm2);
1043         if (rv == 0)
1044                 return (0);
1045         return (rv > 0 ? 1 : -1);
1046 }
1047
1048 #ifdef illumos
1049 static void
1050 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
1051     diskaddr_t size, uint_t blksz)
1052 {
1053         rdsk_node_t tmpnode;
1054         rdsk_node_t *node;
1055         char sname[MAXNAMELEN];
1056
1057         tmpnode.rn_name = &sname[0];
1058         (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
1059             diskname, partno);
1060         /*
1061          * protect against division by zero for disk labels that
1062          * contain a bogus sector size
1063          */
1064         if (blksz == 0)
1065                 blksz = DEV_BSIZE;
1066         /* too small to contain a zpool? */
1067         if ((size < (SPA_MINDEVSIZE / blksz)) &&
1068             (node = avl_find(r, &tmpnode, NULL)))
1069                 node->rn_nozpool = B_TRUE;
1070 }
1071 #endif  /* illumos */
1072
1073 static void
1074 nozpool_all_slices(avl_tree_t *r, const char *sname)
1075 {
1076 #ifdef illumos
1077         char diskname[MAXNAMELEN];
1078         char *ptr;
1079         int i;
1080
1081         (void) strncpy(diskname, sname, MAXNAMELEN);
1082         if (((ptr = strrchr(diskname, 's')) == NULL) &&
1083             ((ptr = strrchr(diskname, 'p')) == NULL))
1084                 return;
1085         ptr[0] = 's';
1086         ptr[1] = '\0';
1087         for (i = 0; i < NDKMAP; i++)
1088                 check_one_slice(r, diskname, i, 0, 1);
1089         ptr[0] = 'p';
1090         for (i = 0; i <= FD_NUMPART; i++)
1091                 check_one_slice(r, diskname, i, 0, 1);
1092 #endif  /* illumos */
1093 }
1094
1095 #ifdef illumos
1096 static void
1097 check_slices(avl_tree_t *r, int fd, const char *sname)
1098 {
1099         struct extvtoc vtoc;
1100         struct dk_gpt *gpt;
1101         char diskname[MAXNAMELEN];
1102         char *ptr;
1103         int i;
1104
1105         (void) strncpy(diskname, sname, MAXNAMELEN);
1106         if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1107                 return;
1108         ptr[1] = '\0';
1109
1110         if (read_extvtoc(fd, &vtoc) >= 0) {
1111                 for (i = 0; i < NDKMAP; i++)
1112                         check_one_slice(r, diskname, i,
1113                             vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1114         } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1115                 /*
1116                  * on x86 we'll still have leftover links that point
1117                  * to slices s[9-15], so use NDKMAP instead
1118                  */
1119                 for (i = 0; i < NDKMAP; i++)
1120                         check_one_slice(r, diskname, i,
1121                             gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1122                 /* nodes p[1-4] are never used with EFI labels */
1123                 ptr[0] = 'p';
1124                 for (i = 1; i <= FD_NUMPART; i++)
1125                         check_one_slice(r, diskname, i, 0, 1);
1126                 efi_free(gpt);
1127         }
1128 }
1129 #endif  /* illumos */
1130
1131 static void
1132 zpool_open_func(void *arg)
1133 {
1134         rdsk_node_t *rn = arg;
1135         struct stat64 statbuf;
1136         nvlist_t *config;
1137         int fd;
1138
1139         if (rn->rn_nozpool)
1140                 return;
1141         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1142                 /* symlink to a device that's no longer there */
1143                 if (errno == ENOENT)
1144                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1145                 return;
1146         }
1147         /*
1148          * Ignore failed stats.  We only want regular
1149          * files, character devs and block devs.
1150          */
1151         if (fstat64(fd, &statbuf) != 0 ||
1152             (!S_ISREG(statbuf.st_mode) &&
1153             !S_ISCHR(statbuf.st_mode) &&
1154             !S_ISBLK(statbuf.st_mode))) {
1155                 (void) close(fd);
1156                 return;
1157         }
1158         /* this file is too small to hold a zpool */
1159 #ifdef illumos
1160         if (S_ISREG(statbuf.st_mode) &&
1161             statbuf.st_size < SPA_MINDEVSIZE) {
1162                 (void) close(fd);
1163                 return;
1164         } else if (!S_ISREG(statbuf.st_mode)) {
1165                 /*
1166                  * Try to read the disk label first so we don't have to
1167                  * open a bunch of minor nodes that can't have a zpool.
1168                  */
1169                 check_slices(rn->rn_avl, fd, rn->rn_name);
1170         }
1171 #else   /* !illumos */
1172         if (statbuf.st_size < SPA_MINDEVSIZE) {
1173                 (void) close(fd);
1174                 return;
1175         }
1176 #endif  /* illumos */
1177
1178         if ((zpool_read_label(fd, &config)) != 0 && errno == ENOMEM) {
1179                 (void) close(fd);
1180                 (void) no_memory(rn->rn_hdl);
1181                 return;
1182         }
1183         (void) close(fd);
1184
1185         rn->rn_config = config;
1186 }
1187
1188 /*
1189  * Given a file descriptor, clear (zero) the label information.
1190  */
1191 int
1192 zpool_clear_label(int fd)
1193 {
1194         struct stat64 statbuf;
1195         int l;
1196         vdev_label_t *label;
1197         uint64_t size;
1198
1199         if (fstat64(fd, &statbuf) == -1)
1200                 return (0);
1201         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1202
1203         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1204                 return (-1);
1205
1206         for (l = 0; l < VDEV_LABELS; l++) {
1207                 if (pwrite64(fd, label, sizeof (vdev_label_t),
1208                     label_offset(size, l)) != sizeof (vdev_label_t)) {
1209                         free(label);
1210                         return (-1);
1211                 }
1212         }
1213
1214         free(label);
1215         return (0);
1216 }
1217
1218 /*
1219  * Given a list of directories to search, find all pools stored on disk.  This
1220  * includes partial pools which are not available to import.  If no args are
1221  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1222  * poolname or guid (but not both) are provided by the caller when trying
1223  * to import a specific pool.
1224  */
1225 static nvlist_t *
1226 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1227 {
1228         int i, dirs = iarg->paths;
1229         struct dirent64 *dp;
1230         char path[MAXPATHLEN];
1231         char *end, **dir = iarg->path;
1232         size_t pathleft;
1233         nvlist_t *ret = NULL;
1234         static char *default_dir = "/dev";
1235         pool_list_t pools = { 0 };
1236         pool_entry_t *pe, *penext;
1237         vdev_entry_t *ve, *venext;
1238         config_entry_t *ce, *cenext;
1239         name_entry_t *ne, *nenext;
1240         avl_tree_t slice_cache;
1241         rdsk_node_t *slice;
1242         void *cookie;
1243
1244         if (dirs == 0) {
1245                 dirs = 1;
1246                 dir = &default_dir;
1247         }
1248
1249         /*
1250          * Go through and read the label configuration information from every
1251          * possible device, organizing the information according to pool GUID
1252          * and toplevel GUID.
1253          */
1254         for (i = 0; i < dirs; i++) {
1255                 tpool_t *t;
1256                 char rdsk[MAXPATHLEN];
1257                 int dfd;
1258                 boolean_t config_failed = B_FALSE;
1259                 DIR *dirp;
1260
1261                 /* use realpath to normalize the path */
1262                 if (realpath(dir[i], path) == 0) {
1263                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1264                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1265                         goto error;
1266                 }
1267                 end = &path[strlen(path)];
1268                 *end++ = '/';
1269                 *end = 0;
1270                 pathleft = &path[sizeof (path)] - end;
1271
1272 #ifdef illumos
1273                 /*
1274                  * Using raw devices instead of block devices when we're
1275                  * reading the labels skips a bunch of slow operations during
1276                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1277                  */
1278                 if (strcmp(path, ZFS_DISK_ROOTD) == 0)
1279                         (void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
1280                 else
1281 #endif
1282                         (void) strlcpy(rdsk, path, sizeof (rdsk));
1283
1284                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1285                     (dirp = fdopendir(dfd)) == NULL) {
1286                         if (dfd >= 0)
1287                                 (void) close(dfd);
1288                         zfs_error_aux(hdl, strerror(errno));
1289                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1290                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1291                             rdsk);
1292                         goto error;
1293                 }
1294
1295                 avl_create(&slice_cache, slice_cache_compare,
1296                     sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1297
1298                 if (strcmp(rdsk, "/dev/") == 0) {
1299                         struct gmesh mesh;
1300                         struct gclass *mp;
1301                         struct ggeom *gp;
1302                         struct gprovider *pp;
1303
1304                         errno = geom_gettree(&mesh);
1305                         if (errno != 0) {
1306                                 zfs_error_aux(hdl, strerror(errno));
1307                                 (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1308                                     dgettext(TEXT_DOMAIN, "cannot get GEOM tree"));
1309                                 goto error;
1310                         }
1311
1312                         LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
1313                                 LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
1314                                         LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
1315                                                 slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1316                                                 slice->rn_name = zfs_strdup(hdl, pp->lg_name);
1317                                                 slice->rn_avl = &slice_cache;
1318                                                 slice->rn_dfd = dfd;
1319                                                 slice->rn_hdl = hdl;
1320                                                 slice->rn_nozpool = B_FALSE;
1321                                                 avl_add(&slice_cache, slice);
1322                                         }
1323                                 }
1324                         }
1325
1326                         geom_deletetree(&mesh);
1327                         goto skipdir;
1328                 }
1329
1330                 /*
1331                  * This is not MT-safe, but we have no MT consumers of libzfs
1332                  */
1333                 while ((dp = readdir64(dirp)) != NULL) {
1334                         const char *name = dp->d_name;
1335                         if (name[0] == '.' &&
1336                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1337                                 continue;
1338
1339                         slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1340                         slice->rn_name = zfs_strdup(hdl, name);
1341                         slice->rn_avl = &slice_cache;
1342                         slice->rn_dfd = dfd;
1343                         slice->rn_hdl = hdl;
1344                         slice->rn_nozpool = B_FALSE;
1345                         avl_add(&slice_cache, slice);
1346                 }
1347 skipdir:
1348                 /*
1349                  * create a thread pool to do all of this in parallel;
1350                  * rn_nozpool is not protected, so this is racy in that
1351                  * multiple tasks could decide that the same slice can
1352                  * not hold a zpool, which is benign.  Also choose
1353                  * double the number of processors; we hold a lot of
1354                  * locks in the kernel, so going beyond this doesn't
1355                  * buy us much.
1356                  */
1357                 t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1358                     0, NULL);
1359                 for (slice = avl_first(&slice_cache); slice;
1360                     (slice = avl_walk(&slice_cache, slice,
1361                     AVL_AFTER)))
1362                         (void) tpool_dispatch(t, zpool_open_func, slice);
1363                 tpool_wait(t);
1364                 tpool_destroy(t);
1365
1366                 cookie = NULL;
1367                 while ((slice = avl_destroy_nodes(&slice_cache,
1368                     &cookie)) != NULL) {
1369                         if (slice->rn_config != NULL && !config_failed) {
1370                                 nvlist_t *config = slice->rn_config;
1371                                 boolean_t matched = B_TRUE;
1372
1373                                 if (iarg->poolname != NULL) {
1374                                         char *pname;
1375
1376                                         matched = nvlist_lookup_string(config,
1377                                             ZPOOL_CONFIG_POOL_NAME,
1378                                             &pname) == 0 &&
1379                                             strcmp(iarg->poolname, pname) == 0;
1380                                 } else if (iarg->guid != 0) {
1381                                         uint64_t this_guid;
1382
1383                                         matched = nvlist_lookup_uint64(config,
1384                                             ZPOOL_CONFIG_POOL_GUID,
1385                                             &this_guid) == 0 &&
1386                                             iarg->guid == this_guid;
1387                                 }
1388                                 if (!matched) {
1389                                         nvlist_free(config);
1390                                 } else {
1391                                         /*
1392                                          * use the non-raw path for the config
1393                                          */
1394                                         (void) strlcpy(end, slice->rn_name,
1395                                             pathleft);
1396                                         if (add_config(hdl, &pools, path,
1397                                             config) != 0)
1398                                                 config_failed = B_TRUE;
1399                                 }
1400                         }
1401                         free(slice->rn_name);
1402                         free(slice);
1403                 }
1404                 avl_destroy(&slice_cache);
1405
1406                 (void) closedir(dirp);
1407
1408                 if (config_failed)
1409                         goto error;
1410         }
1411
1412         ret = get_configs(hdl, &pools, iarg->can_be_active);
1413
1414 error:
1415         for (pe = pools.pools; pe != NULL; pe = penext) {
1416                 penext = pe->pe_next;
1417                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1418                         venext = ve->ve_next;
1419                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1420                                 cenext = ce->ce_next;
1421                                 nvlist_free(ce->ce_config);
1422                                 free(ce);
1423                         }
1424                         free(ve);
1425                 }
1426                 free(pe);
1427         }
1428
1429         for (ne = pools.names; ne != NULL; ne = nenext) {
1430                 nenext = ne->ne_next;
1431                 free(ne->ne_name);
1432                 free(ne);
1433         }
1434
1435         return (ret);
1436 }
1437
1438 nvlist_t *
1439 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1440 {
1441         importargs_t iarg = { 0 };
1442
1443         iarg.paths = argc;
1444         iarg.path = argv;
1445
1446         return (zpool_find_import_impl(hdl, &iarg));
1447 }
1448
1449 /*
1450  * Given a cache file, return the contents as a list of importable pools.
1451  * poolname or guid (but not both) are provided by the caller when trying
1452  * to import a specific pool.
1453  */
1454 nvlist_t *
1455 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1456     char *poolname, uint64_t guid)
1457 {
1458         char *buf;
1459         int fd;
1460         struct stat64 statbuf;
1461         nvlist_t *raw, *src, *dst;
1462         nvlist_t *pools;
1463         nvpair_t *elem;
1464         char *name;
1465         uint64_t this_guid;
1466         boolean_t active;
1467
1468         verify(poolname == NULL || guid == 0);
1469
1470         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1471                 zfs_error_aux(hdl, "%s", strerror(errno));
1472                 (void) zfs_error(hdl, EZFS_BADCACHE,
1473                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1474                 return (NULL);
1475         }
1476
1477         if (fstat64(fd, &statbuf) != 0) {
1478                 zfs_error_aux(hdl, "%s", strerror(errno));
1479                 (void) close(fd);
1480                 (void) zfs_error(hdl, EZFS_BADCACHE,
1481                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1482                 return (NULL);
1483         }
1484
1485         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1486                 (void) close(fd);
1487                 return (NULL);
1488         }
1489
1490         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1491                 (void) close(fd);
1492                 free(buf);
1493                 (void) zfs_error(hdl, EZFS_BADCACHE,
1494                     dgettext(TEXT_DOMAIN,
1495                     "failed to read cache file contents"));
1496                 return (NULL);
1497         }
1498
1499         (void) close(fd);
1500
1501         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1502                 free(buf);
1503                 (void) zfs_error(hdl, EZFS_BADCACHE,
1504                     dgettext(TEXT_DOMAIN,
1505                     "invalid or corrupt cache file contents"));
1506                 return (NULL);
1507         }
1508
1509         free(buf);
1510
1511         /*
1512          * Go through and get the current state of the pools and refresh their
1513          * state.
1514          */
1515         if (nvlist_alloc(&pools, 0, 0) != 0) {
1516                 (void) no_memory(hdl);
1517                 nvlist_free(raw);
1518                 return (NULL);
1519         }
1520
1521         elem = NULL;
1522         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1523                 src = fnvpair_value_nvlist(elem);
1524
1525                 name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
1526                 if (poolname != NULL && strcmp(poolname, name) != 0)
1527                         continue;
1528
1529                 this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
1530                 if (guid != 0 && guid != this_guid)
1531                         continue;
1532
1533                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1534                         nvlist_free(raw);
1535                         nvlist_free(pools);
1536                         return (NULL);
1537                 }
1538
1539                 if (active)
1540                         continue;
1541
1542                 if ((dst = refresh_config(hdl, src)) == NULL) {
1543                         nvlist_free(raw);
1544                         nvlist_free(pools);
1545                         return (NULL);
1546                 }
1547
1548                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1549                         (void) no_memory(hdl);
1550                         nvlist_free(dst);
1551                         nvlist_free(raw);
1552                         nvlist_free(pools);
1553                         return (NULL);
1554                 }
1555                 nvlist_free(dst);
1556         }
1557
1558         nvlist_free(raw);
1559         return (pools);
1560 }
1561
1562 static int
1563 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1564 {
1565         importargs_t *import = data;
1566         int found = 0;
1567
1568         if (import->poolname != NULL) {
1569                 char *pool_name;
1570
1571                 verify(nvlist_lookup_string(zhp->zpool_config,
1572                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1573                 if (strcmp(pool_name, import->poolname) == 0)
1574                         found = 1;
1575         } else {
1576                 uint64_t pool_guid;
1577
1578                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1579                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1580                 if (pool_guid == import->guid)
1581                         found = 1;
1582         }
1583
1584         zpool_close(zhp);
1585         return (found);
1586 }
1587
1588 nvlist_t *
1589 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1590 {
1591         verify(import->poolname == NULL || import->guid == 0);
1592
1593         if (import->unique)
1594                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1595
1596         if (import->cachefile != NULL)
1597                 return (zpool_find_import_cached(hdl, import->cachefile,
1598                     import->poolname, import->guid));
1599
1600         return (zpool_find_import_impl(hdl, import));
1601 }
1602
1603 boolean_t
1604 find_guid(nvlist_t *nv, uint64_t guid)
1605 {
1606         uint64_t tmp;
1607         nvlist_t **child;
1608         uint_t c, children;
1609
1610         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1611         if (tmp == guid)
1612                 return (B_TRUE);
1613
1614         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1615             &child, &children) == 0) {
1616                 for (c = 0; c < children; c++)
1617                         if (find_guid(child[c], guid))
1618                                 return (B_TRUE);
1619         }
1620
1621         return (B_FALSE);
1622 }
1623
1624 typedef struct aux_cbdata {
1625         const char      *cb_type;
1626         uint64_t        cb_guid;
1627         zpool_handle_t  *cb_zhp;
1628 } aux_cbdata_t;
1629
1630 static int
1631 find_aux(zpool_handle_t *zhp, void *data)
1632 {
1633         aux_cbdata_t *cbp = data;
1634         nvlist_t **list;
1635         uint_t i, count;
1636         uint64_t guid;
1637         nvlist_t *nvroot;
1638
1639         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1640             &nvroot) == 0);
1641
1642         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1643             &list, &count) == 0) {
1644                 for (i = 0; i < count; i++) {
1645                         verify(nvlist_lookup_uint64(list[i],
1646                             ZPOOL_CONFIG_GUID, &guid) == 0);
1647                         if (guid == cbp->cb_guid) {
1648                                 cbp->cb_zhp = zhp;
1649                                 return (1);
1650                         }
1651                 }
1652         }
1653
1654         zpool_close(zhp);
1655         return (0);
1656 }
1657
1658 /*
1659  * Determines if the pool is in use.  If so, it returns true and the state of
1660  * the pool as well as the name of the pool.  Both strings are allocated and
1661  * must be freed by the caller.
1662  */
1663 int
1664 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1665     boolean_t *inuse)
1666 {
1667         nvlist_t *config;
1668         char *name;
1669         boolean_t ret;
1670         uint64_t guid, vdev_guid;
1671         zpool_handle_t *zhp;
1672         nvlist_t *pool_config;
1673         uint64_t stateval, isspare;
1674         aux_cbdata_t cb = { 0 };
1675         boolean_t isactive;
1676
1677         *inuse = B_FALSE;
1678
1679         if (zpool_read_label(fd, &config) != 0 && errno == ENOMEM) {
1680                 (void) no_memory(hdl);
1681                 return (-1);
1682         }
1683
1684         if (config == NULL)
1685                 return (0);
1686
1687         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1688             &stateval) == 0);
1689         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1690             &vdev_guid) == 0);
1691
1692         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1693                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1694                     &name) == 0);
1695                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1696                     &guid) == 0);
1697         }
1698
1699         switch (stateval) {
1700         case POOL_STATE_EXPORTED:
1701                 /*
1702                  * A pool with an exported state may in fact be imported
1703                  * read-only, so check the in-core state to see if it's
1704                  * active and imported read-only.  If it is, set
1705                  * its state to active.
1706                  */
1707                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1708                     (zhp = zpool_open_canfail(hdl, name)) != NULL) {
1709                         if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1710                                 stateval = POOL_STATE_ACTIVE;
1711
1712                         /*
1713                          * All we needed the zpool handle for is the
1714                          * readonly prop check.
1715                          */
1716                         zpool_close(zhp);
1717                 }
1718
1719                 ret = B_TRUE;
1720                 break;
1721
1722         case POOL_STATE_ACTIVE:
1723                 /*
1724                  * For an active pool, we have to determine if it's really part
1725                  * of a currently active pool (in which case the pool will exist
1726                  * and the guid will be the same), or whether it's part of an
1727                  * active pool that was disconnected without being explicitly
1728                  * exported.
1729                  */
1730                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1731                         nvlist_free(config);
1732                         return (-1);
1733                 }
1734
1735                 if (isactive) {
1736                         /*
1737                          * Because the device may have been removed while
1738                          * offlined, we only report it as active if the vdev is
1739                          * still present in the config.  Otherwise, pretend like
1740                          * it's not in use.
1741                          */
1742                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1743                             (pool_config = zpool_get_config(zhp, NULL))
1744                             != NULL) {
1745                                 nvlist_t *nvroot;
1746
1747                                 verify(nvlist_lookup_nvlist(pool_config,
1748                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1749                                 ret = find_guid(nvroot, vdev_guid);
1750                         } else {
1751                                 ret = B_FALSE;
1752                         }
1753
1754                         /*
1755                          * If this is an active spare within another pool, we
1756                          * treat it like an unused hot spare.  This allows the
1757                          * user to create a pool with a hot spare that currently
1758                          * in use within another pool.  Since we return B_TRUE,
1759                          * libdiskmgt will continue to prevent generic consumers
1760                          * from using the device.
1761                          */
1762                         if (ret && nvlist_lookup_uint64(config,
1763                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1764                                 stateval = POOL_STATE_SPARE;
1765
1766                         if (zhp != NULL)
1767                                 zpool_close(zhp);
1768                 } else {
1769                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1770                         ret = B_TRUE;
1771                 }
1772                 break;
1773
1774         case POOL_STATE_SPARE:
1775                 /*
1776                  * For a hot spare, it can be either definitively in use, or
1777                  * potentially active.  To determine if it's in use, we iterate
1778                  * over all pools in the system and search for one with a spare
1779                  * with a matching guid.
1780                  *
1781                  * Due to the shared nature of spares, we don't actually report
1782                  * the potentially active case as in use.  This means the user
1783                  * can freely create pools on the hot spares of exported pools,
1784                  * but to do otherwise makes the resulting code complicated, and
1785                  * we end up having to deal with this case anyway.
1786                  */
1787                 cb.cb_zhp = NULL;
1788                 cb.cb_guid = vdev_guid;
1789                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1790                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1791                         name = (char *)zpool_get_name(cb.cb_zhp);
1792                         ret = B_TRUE;
1793                 } else {
1794                         ret = B_FALSE;
1795                 }
1796                 break;
1797
1798         case POOL_STATE_L2CACHE:
1799
1800                 /*
1801                  * Check if any pool is currently using this l2cache device.
1802                  */
1803                 cb.cb_zhp = NULL;
1804                 cb.cb_guid = vdev_guid;
1805                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1806                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1807                         name = (char *)zpool_get_name(cb.cb_zhp);
1808                         ret = B_TRUE;
1809                 } else {
1810                         ret = B_FALSE;
1811                 }
1812                 break;
1813
1814         default:
1815                 ret = B_FALSE;
1816         }
1817
1818
1819         if (ret) {
1820                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1821                         if (cb.cb_zhp)
1822                                 zpool_close(cb.cb_zhp);
1823                         nvlist_free(config);
1824                         return (-1);
1825                 }
1826                 *state = (pool_state_t)stateval;
1827         }
1828
1829         if (cb.cb_zhp)
1830                 zpool_close(cb.cb_zhp);
1831
1832         nvlist_free(config);
1833         *inuse = ret;
1834         return (0);
1835 }