]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
MFC r265821:
[FreeBSD/stable/10.git] / cddl / contrib / opensolaris / lib / libzfs / common / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25  */
26
27 /*
28  * Pool import support functions.
29  *
30  * To import a pool, we rely on reading the configuration information from the
31  * ZFS label of each device.  If we successfully read the label, then we
32  * organize the configuration information in the following hierarchy:
33  *
34  *      pool guid -> toplevel vdev guid -> label txg
35  *
36  * Duplicate entries matching this same tuple will be discarded.  Once we have
37  * examined every device, we pick the best label txg config for each toplevel
38  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
39  * update any paths that have changed.  Finally, we attempt to import the pool
40  * using our derived config, and record the results.
41  */
42
43 #include <ctype.h>
44 #include <devid.h>
45 #include <dirent.h>
46 #include <errno.h>
47 #include <libintl.h>
48 #include <stddef.h>
49 #include <stdlib.h>
50 #include <string.h>
51 #include <sys/stat.h>
52 #include <unistd.h>
53 #include <fcntl.h>
54 #include <thread_pool.h>
55 #include <libgeom.h>
56
57 #include <sys/vdev_impl.h>
58
59 #include "libzfs.h"
60 #include "libzfs_impl.h"
61
62 /*
63  * Intermediate structures used to gather configuration information.
64  */
65 typedef struct config_entry {
66         uint64_t                ce_txg;
67         nvlist_t                *ce_config;
68         struct config_entry     *ce_next;
69 } config_entry_t;
70
71 typedef struct vdev_entry {
72         uint64_t                ve_guid;
73         config_entry_t          *ve_configs;
74         struct vdev_entry       *ve_next;
75 } vdev_entry_t;
76
77 typedef struct pool_entry {
78         uint64_t                pe_guid;
79         vdev_entry_t            *pe_vdevs;
80         struct pool_entry       *pe_next;
81 } pool_entry_t;
82
83 typedef struct name_entry {
84         char                    *ne_name;
85         uint64_t                ne_guid;
86         struct name_entry       *ne_next;
87 } name_entry_t;
88
89 typedef struct pool_list {
90         pool_entry_t            *pools;
91         name_entry_t            *names;
92 } pool_list_t;
93
94 static char *
95 get_devid(const char *path)
96 {
97 #ifdef have_devid
98         int fd;
99         ddi_devid_t devid;
100         char *minor, *ret;
101
102         if ((fd = open(path, O_RDONLY)) < 0)
103                 return (NULL);
104
105         minor = NULL;
106         ret = NULL;
107         if (devid_get(fd, &devid) == 0) {
108                 if (devid_get_minor_name(fd, &minor) == 0)
109                         ret = devid_str_encode(devid, minor);
110                 if (minor != NULL)
111                         devid_str_free(minor);
112                 devid_free(devid);
113         }
114         (void) close(fd);
115
116         return (ret);
117 #else
118         return (NULL);
119 #endif
120 }
121
122
123 /*
124  * Go through and fix up any path and/or devid information for the given vdev
125  * configuration.
126  */
127 static int
128 fix_paths(nvlist_t *nv, name_entry_t *names)
129 {
130         nvlist_t **child;
131         uint_t c, children;
132         uint64_t guid;
133         name_entry_t *ne, *best;
134         char *path, *devid;
135         int matched;
136
137         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
138             &child, &children) == 0) {
139                 for (c = 0; c < children; c++)
140                         if (fix_paths(child[c], names) != 0)
141                                 return (-1);
142                 return (0);
143         }
144
145         /*
146          * This is a leaf (file or disk) vdev.  In either case, go through
147          * the name list and see if we find a matching guid.  If so, replace
148          * the path and see if we can calculate a new devid.
149          *
150          * There may be multiple names associated with a particular guid, in
151          * which case we have overlapping slices or multiple paths to the same
152          * disk.  If this is the case, then we want to pick the path that is
153          * the most similar to the original, where "most similar" is the number
154          * of matching characters starting from the end of the path.  This will
155          * preserve slice numbers even if the disks have been reorganized, and
156          * will also catch preferred disk names if multiple paths exist.
157          */
158         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
159         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
160                 path = NULL;
161
162         matched = 0;
163         best = NULL;
164         for (ne = names; ne != NULL; ne = ne->ne_next) {
165                 if (ne->ne_guid == guid) {
166                         const char *src, *dst;
167                         int count;
168
169                         if (path == NULL) {
170                                 best = ne;
171                                 break;
172                         }
173
174                         src = ne->ne_name + strlen(ne->ne_name) - 1;
175                         dst = path + strlen(path) - 1;
176                         for (count = 0; src >= ne->ne_name && dst >= path;
177                             src--, dst--, count++)
178                                 if (*src != *dst)
179                                         break;
180
181                         /*
182                          * At this point, 'count' is the number of characters
183                          * matched from the end.
184                          */
185                         if (count > matched || best == NULL) {
186                                 best = ne;
187                                 matched = count;
188                         }
189                 }
190         }
191
192         if (best == NULL)
193                 return (0);
194
195         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
196                 return (-1);
197
198         if ((devid = get_devid(best->ne_name)) == NULL) {
199                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
200         } else {
201                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
202                         return (-1);
203                 devid_str_free(devid);
204         }
205
206         return (0);
207 }
208
209 /*
210  * Add the given configuration to the list of known devices.
211  */
212 static int
213 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
214     nvlist_t *config)
215 {
216         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
217         pool_entry_t *pe;
218         vdev_entry_t *ve;
219         config_entry_t *ce;
220         name_entry_t *ne;
221
222         /*
223          * If this is a hot spare not currently in use or level 2 cache
224          * device, add it to the list of names to translate, but don't do
225          * anything else.
226          */
227         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
228             &state) == 0 &&
229             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
230             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
231                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
232                         return (-1);
233
234                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
235                         free(ne);
236                         return (-1);
237                 }
238                 ne->ne_guid = vdev_guid;
239                 ne->ne_next = pl->names;
240                 pl->names = ne;
241                 return (0);
242         }
243
244         /*
245          * If we have a valid config but cannot read any of these fields, then
246          * it means we have a half-initialized label.  In vdev_label_init()
247          * we write a label with txg == 0 so that we can identify the device
248          * in case the user refers to the same disk later on.  If we fail to
249          * create the pool, we'll be left with a label in this state
250          * which should not be considered part of a valid pool.
251          */
252         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
253             &pool_guid) != 0 ||
254             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
255             &vdev_guid) != 0 ||
256             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
257             &top_guid) != 0 ||
258             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
259             &txg) != 0 || txg == 0) {
260                 nvlist_free(config);
261                 return (0);
262         }
263
264         /*
265          * First, see if we know about this pool.  If not, then add it to the
266          * list of known pools.
267          */
268         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
269                 if (pe->pe_guid == pool_guid)
270                         break;
271         }
272
273         if (pe == NULL) {
274                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
275                         nvlist_free(config);
276                         return (-1);
277                 }
278                 pe->pe_guid = pool_guid;
279                 pe->pe_next = pl->pools;
280                 pl->pools = pe;
281         }
282
283         /*
284          * Second, see if we know about this toplevel vdev.  Add it if its
285          * missing.
286          */
287         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
288                 if (ve->ve_guid == top_guid)
289                         break;
290         }
291
292         if (ve == NULL) {
293                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
294                         nvlist_free(config);
295                         return (-1);
296                 }
297                 ve->ve_guid = top_guid;
298                 ve->ve_next = pe->pe_vdevs;
299                 pe->pe_vdevs = ve;
300         }
301
302         /*
303          * Third, see if we have a config with a matching transaction group.  If
304          * so, then we do nothing.  Otherwise, add it to the list of known
305          * configs.
306          */
307         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
308                 if (ce->ce_txg == txg)
309                         break;
310         }
311
312         if (ce == NULL) {
313                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
314                         nvlist_free(config);
315                         return (-1);
316                 }
317                 ce->ce_txg = txg;
318                 ce->ce_config = config;
319                 ce->ce_next = ve->ve_configs;
320                 ve->ve_configs = ce;
321         } else {
322                 nvlist_free(config);
323         }
324
325         /*
326          * At this point we've successfully added our config to the list of
327          * known configs.  The last thing to do is add the vdev guid -> path
328          * mappings so that we can fix up the configuration as necessary before
329          * doing the import.
330          */
331         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
332                 return (-1);
333
334         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
335                 free(ne);
336                 return (-1);
337         }
338
339         ne->ne_guid = vdev_guid;
340         ne->ne_next = pl->names;
341         pl->names = ne;
342
343         return (0);
344 }
345
346 /*
347  * Returns true if the named pool matches the given GUID.
348  */
349 static int
350 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
351     boolean_t *isactive)
352 {
353         zpool_handle_t *zhp;
354         uint64_t theguid;
355
356         if (zpool_open_silent(hdl, name, &zhp) != 0)
357                 return (-1);
358
359         if (zhp == NULL) {
360                 *isactive = B_FALSE;
361                 return (0);
362         }
363
364         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
365             &theguid) == 0);
366
367         zpool_close(zhp);
368
369         *isactive = (theguid == guid);
370         return (0);
371 }
372
373 static nvlist_t *
374 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
375 {
376         nvlist_t *nvl;
377         zfs_cmd_t zc = { 0 };
378         int err;
379
380         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
381                 return (NULL);
382
383         if (zcmd_alloc_dst_nvlist(hdl, &zc,
384             zc.zc_nvlist_conf_size * 2) != 0) {
385                 zcmd_free_nvlists(&zc);
386                 return (NULL);
387         }
388
389         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
390             &zc)) != 0 && errno == ENOMEM) {
391                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
392                         zcmd_free_nvlists(&zc);
393                         return (NULL);
394                 }
395         }
396
397         if (err) {
398                 zcmd_free_nvlists(&zc);
399                 return (NULL);
400         }
401
402         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
403                 zcmd_free_nvlists(&zc);
404                 return (NULL);
405         }
406
407         zcmd_free_nvlists(&zc);
408         return (nvl);
409 }
410
411 /*
412  * Determine if the vdev id is a hole in the namespace.
413  */
414 boolean_t
415 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
416 {
417         for (int c = 0; c < holes; c++) {
418
419                 /* Top-level is a hole */
420                 if (hole_array[c] == id)
421                         return (B_TRUE);
422         }
423         return (B_FALSE);
424 }
425
426 /*
427  * Convert our list of pools into the definitive set of configurations.  We
428  * start by picking the best config for each toplevel vdev.  Once that's done,
429  * we assemble the toplevel vdevs into a full config for the pool.  We make a
430  * pass to fix up any incorrect paths, and then add it to the main list to
431  * return to the user.
432  */
433 static nvlist_t *
434 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
435 {
436         pool_entry_t *pe;
437         vdev_entry_t *ve;
438         config_entry_t *ce;
439         nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
440         nvlist_t **spares, **l2cache;
441         uint_t i, nspares, nl2cache;
442         boolean_t config_seen;
443         uint64_t best_txg;
444         char *name, *hostname;
445         uint64_t guid;
446         uint_t children = 0;
447         nvlist_t **child = NULL;
448         uint_t holes;
449         uint64_t *hole_array, max_id;
450         uint_t c;
451         boolean_t isactive;
452         uint64_t hostid;
453         nvlist_t *nvl;
454         boolean_t found_one = B_FALSE;
455         boolean_t valid_top_config = B_FALSE;
456
457         if (nvlist_alloc(&ret, 0, 0) != 0)
458                 goto nomem;
459
460         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
461                 uint64_t id, max_txg = 0;
462
463                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
464                         goto nomem;
465                 config_seen = B_FALSE;
466
467                 /*
468                  * Iterate over all toplevel vdevs.  Grab the pool configuration
469                  * from the first one we find, and then go through the rest and
470                  * add them as necessary to the 'vdevs' member of the config.
471                  */
472                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
473
474                         /*
475                          * Determine the best configuration for this vdev by
476                          * selecting the config with the latest transaction
477                          * group.
478                          */
479                         best_txg = 0;
480                         for (ce = ve->ve_configs; ce != NULL;
481                             ce = ce->ce_next) {
482
483                                 if (ce->ce_txg > best_txg) {
484                                         tmp = ce->ce_config;
485                                         best_txg = ce->ce_txg;
486                                 }
487                         }
488
489                         /*
490                          * We rely on the fact that the max txg for the
491                          * pool will contain the most up-to-date information
492                          * about the valid top-levels in the vdev namespace.
493                          */
494                         if (best_txg > max_txg) {
495                                 (void) nvlist_remove(config,
496                                     ZPOOL_CONFIG_VDEV_CHILDREN,
497                                     DATA_TYPE_UINT64);
498                                 (void) nvlist_remove(config,
499                                     ZPOOL_CONFIG_HOLE_ARRAY,
500                                     DATA_TYPE_UINT64_ARRAY);
501
502                                 max_txg = best_txg;
503                                 hole_array = NULL;
504                                 holes = 0;
505                                 max_id = 0;
506                                 valid_top_config = B_FALSE;
507
508                                 if (nvlist_lookup_uint64(tmp,
509                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
510                                         verify(nvlist_add_uint64(config,
511                                             ZPOOL_CONFIG_VDEV_CHILDREN,
512                                             max_id) == 0);
513                                         valid_top_config = B_TRUE;
514                                 }
515
516                                 if (nvlist_lookup_uint64_array(tmp,
517                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
518                                     &holes) == 0) {
519                                         verify(nvlist_add_uint64_array(config,
520                                             ZPOOL_CONFIG_HOLE_ARRAY,
521                                             hole_array, holes) == 0);
522                                 }
523                         }
524
525                         if (!config_seen) {
526                                 /*
527                                  * Copy the relevant pieces of data to the pool
528                                  * configuration:
529                                  *
530                                  *      version
531                                  *      pool guid
532                                  *      name
533                                  *      comment (if available)
534                                  *      pool state
535                                  *      hostid (if available)
536                                  *      hostname (if available)
537                                  */
538                                 uint64_t state, version;
539                                 char *comment = NULL;
540
541                                 version = fnvlist_lookup_uint64(tmp,
542                                     ZPOOL_CONFIG_VERSION);
543                                 fnvlist_add_uint64(config,
544                                     ZPOOL_CONFIG_VERSION, version);
545                                 guid = fnvlist_lookup_uint64(tmp,
546                                     ZPOOL_CONFIG_POOL_GUID);
547                                 fnvlist_add_uint64(config,
548                                     ZPOOL_CONFIG_POOL_GUID, guid);
549                                 name = fnvlist_lookup_string(tmp,
550                                     ZPOOL_CONFIG_POOL_NAME);
551                                 fnvlist_add_string(config,
552                                     ZPOOL_CONFIG_POOL_NAME, name);
553
554                                 if (nvlist_lookup_string(tmp,
555                                     ZPOOL_CONFIG_COMMENT, &comment) == 0)
556                                         fnvlist_add_string(config,
557                                             ZPOOL_CONFIG_COMMENT, comment);
558
559                                 state = fnvlist_lookup_uint64(tmp,
560                                     ZPOOL_CONFIG_POOL_STATE);
561                                 fnvlist_add_uint64(config,
562                                     ZPOOL_CONFIG_POOL_STATE, state);
563
564                                 hostid = 0;
565                                 if (nvlist_lookup_uint64(tmp,
566                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
567                                         fnvlist_add_uint64(config,
568                                             ZPOOL_CONFIG_HOSTID, hostid);
569                                         hostname = fnvlist_lookup_string(tmp,
570                                             ZPOOL_CONFIG_HOSTNAME);
571                                         fnvlist_add_string(config,
572                                             ZPOOL_CONFIG_HOSTNAME, hostname);
573                                 }
574
575                                 config_seen = B_TRUE;
576                         }
577
578                         /*
579                          * Add this top-level vdev to the child array.
580                          */
581                         verify(nvlist_lookup_nvlist(tmp,
582                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
583                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
584                             &id) == 0);
585
586                         if (id >= children) {
587                                 nvlist_t **newchild;
588
589                                 newchild = zfs_alloc(hdl, (id + 1) *
590                                     sizeof (nvlist_t *));
591                                 if (newchild == NULL)
592                                         goto nomem;
593
594                                 for (c = 0; c < children; c++)
595                                         newchild[c] = child[c];
596
597                                 free(child);
598                                 child = newchild;
599                                 children = id + 1;
600                         }
601                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
602                                 goto nomem;
603
604                 }
605
606                 /*
607                  * If we have information about all the top-levels then
608                  * clean up the nvlist which we've constructed. This
609                  * means removing any extraneous devices that are
610                  * beyond the valid range or adding devices to the end
611                  * of our array which appear to be missing.
612                  */
613                 if (valid_top_config) {
614                         if (max_id < children) {
615                                 for (c = max_id; c < children; c++)
616                                         nvlist_free(child[c]);
617                                 children = max_id;
618                         } else if (max_id > children) {
619                                 nvlist_t **newchild;
620
621                                 newchild = zfs_alloc(hdl, (max_id) *
622                                     sizeof (nvlist_t *));
623                                 if (newchild == NULL)
624                                         goto nomem;
625
626                                 for (c = 0; c < children; c++)
627                                         newchild[c] = child[c];
628
629                                 free(child);
630                                 child = newchild;
631                                 children = max_id;
632                         }
633                 }
634
635                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
636                     &guid) == 0);
637
638                 /*
639                  * The vdev namespace may contain holes as a result of
640                  * device removal. We must add them back into the vdev
641                  * tree before we process any missing devices.
642                  */
643                 if (holes > 0) {
644                         ASSERT(valid_top_config);
645
646                         for (c = 0; c < children; c++) {
647                                 nvlist_t *holey;
648
649                                 if (child[c] != NULL ||
650                                     !vdev_is_hole(hole_array, holes, c))
651                                         continue;
652
653                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
654                                     0) != 0)
655                                         goto nomem;
656
657                                 /*
658                                  * Holes in the namespace are treated as
659                                  * "hole" top-level vdevs and have a
660                                  * special flag set on them.
661                                  */
662                                 if (nvlist_add_string(holey,
663                                     ZPOOL_CONFIG_TYPE,
664                                     VDEV_TYPE_HOLE) != 0 ||
665                                     nvlist_add_uint64(holey,
666                                     ZPOOL_CONFIG_ID, c) != 0 ||
667                                     nvlist_add_uint64(holey,
668                                     ZPOOL_CONFIG_GUID, 0ULL) != 0)
669                                         goto nomem;
670                                 child[c] = holey;
671                         }
672                 }
673
674                 /*
675                  * Look for any missing top-level vdevs.  If this is the case,
676                  * create a faked up 'missing' vdev as a placeholder.  We cannot
677                  * simply compress the child array, because the kernel performs
678                  * certain checks to make sure the vdev IDs match their location
679                  * in the configuration.
680                  */
681                 for (c = 0; c < children; c++) {
682                         if (child[c] == NULL) {
683                                 nvlist_t *missing;
684                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
685                                     0) != 0)
686                                         goto nomem;
687                                 if (nvlist_add_string(missing,
688                                     ZPOOL_CONFIG_TYPE,
689                                     VDEV_TYPE_MISSING) != 0 ||
690                                     nvlist_add_uint64(missing,
691                                     ZPOOL_CONFIG_ID, c) != 0 ||
692                                     nvlist_add_uint64(missing,
693                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
694                                         nvlist_free(missing);
695                                         goto nomem;
696                                 }
697                                 child[c] = missing;
698                         }
699                 }
700
701                 /*
702                  * Put all of this pool's top-level vdevs into a root vdev.
703                  */
704                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
705                         goto nomem;
706                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
707                     VDEV_TYPE_ROOT) != 0 ||
708                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
709                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
710                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
711                     child, children) != 0) {
712                         nvlist_free(nvroot);
713                         goto nomem;
714                 }
715
716                 for (c = 0; c < children; c++)
717                         nvlist_free(child[c]);
718                 free(child);
719                 children = 0;
720                 child = NULL;
721
722                 /*
723                  * Go through and fix up any paths and/or devids based on our
724                  * known list of vdev GUID -> path mappings.
725                  */
726                 if (fix_paths(nvroot, pl->names) != 0) {
727                         nvlist_free(nvroot);
728                         goto nomem;
729                 }
730
731                 /*
732                  * Add the root vdev to this pool's configuration.
733                  */
734                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
735                     nvroot) != 0) {
736                         nvlist_free(nvroot);
737                         goto nomem;
738                 }
739                 nvlist_free(nvroot);
740
741                 /*
742                  * zdb uses this path to report on active pools that were
743                  * imported or created using -R.
744                  */
745                 if (active_ok)
746                         goto add_pool;
747
748                 /*
749                  * Determine if this pool is currently active, in which case we
750                  * can't actually import it.
751                  */
752                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
753                     &name) == 0);
754                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
755                     &guid) == 0);
756
757                 if (pool_active(hdl, name, guid, &isactive) != 0)
758                         goto error;
759
760                 if (isactive) {
761                         nvlist_free(config);
762                         config = NULL;
763                         continue;
764                 }
765
766                 if ((nvl = refresh_config(hdl, config)) == NULL) {
767                         nvlist_free(config);
768                         config = NULL;
769                         continue;
770                 }
771
772                 nvlist_free(config);
773                 config = nvl;
774
775                 /*
776                  * Go through and update the paths for spares, now that we have
777                  * them.
778                  */
779                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
780                     &nvroot) == 0);
781                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
782                     &spares, &nspares) == 0) {
783                         for (i = 0; i < nspares; i++) {
784                                 if (fix_paths(spares[i], pl->names) != 0)
785                                         goto nomem;
786                         }
787                 }
788
789                 /*
790                  * Update the paths for l2cache devices.
791                  */
792                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
793                     &l2cache, &nl2cache) == 0) {
794                         for (i = 0; i < nl2cache; i++) {
795                                 if (fix_paths(l2cache[i], pl->names) != 0)
796                                         goto nomem;
797                         }
798                 }
799
800                 /*
801                  * Restore the original information read from the actual label.
802                  */
803                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
804                     DATA_TYPE_UINT64);
805                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
806                     DATA_TYPE_STRING);
807                 if (hostid != 0) {
808                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
809                             hostid) == 0);
810                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
811                             hostname) == 0);
812                 }
813
814 add_pool:
815                 /*
816                  * Add this pool to the list of configs.
817                  */
818                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
819                     &name) == 0);
820                 if (nvlist_add_nvlist(ret, name, config) != 0)
821                         goto nomem;
822
823                 found_one = B_TRUE;
824                 nvlist_free(config);
825                 config = NULL;
826         }
827
828         if (!found_one) {
829                 nvlist_free(ret);
830                 ret = NULL;
831         }
832
833         return (ret);
834
835 nomem:
836         (void) no_memory(hdl);
837 error:
838         nvlist_free(config);
839         nvlist_free(ret);
840         for (c = 0; c < children; c++)
841                 nvlist_free(child[c]);
842         free(child);
843
844         return (NULL);
845 }
846
847 /*
848  * Return the offset of the given label.
849  */
850 static uint64_t
851 label_offset(uint64_t size, int l)
852 {
853         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
854         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
855             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
856 }
857
858 /*
859  * Given a file descriptor, read the label information and return an nvlist
860  * describing the configuration, if there is one.
861  */
862 int
863 zpool_read_label(int fd, nvlist_t **config)
864 {
865         struct stat64 statbuf;
866         int l;
867         vdev_label_t *label;
868         uint64_t state, txg, size;
869
870         *config = NULL;
871
872         if (fstat64(fd, &statbuf) == -1)
873                 return (0);
874         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
875
876         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
877                 return (-1);
878
879         for (l = 0; l < VDEV_LABELS; l++) {
880                 if (pread64(fd, label, sizeof (vdev_label_t),
881                     label_offset(size, l)) != sizeof (vdev_label_t))
882                         continue;
883
884                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
885                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
886                         continue;
887
888                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
889                     &state) != 0 || state > POOL_STATE_L2CACHE) {
890                         nvlist_free(*config);
891                         continue;
892                 }
893
894                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
895                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
896                     &txg) != 0 || txg == 0)) {
897                         nvlist_free(*config);
898                         continue;
899                 }
900
901                 free(label);
902                 return (0);
903         }
904
905         free(label);
906         *config = NULL;
907         return (0);
908 }
909
910 typedef struct rdsk_node {
911         char *rn_name;
912         int rn_dfd;
913         libzfs_handle_t *rn_hdl;
914         nvlist_t *rn_config;
915         avl_tree_t *rn_avl;
916         avl_node_t rn_node;
917         boolean_t rn_nozpool;
918 } rdsk_node_t;
919
920 static int
921 slice_cache_compare(const void *arg1, const void *arg2)
922 {
923         const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
924         const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
925         char *nm1slice, *nm2slice;
926         int rv;
927
928         /*
929          * slices zero and two are the most likely to provide results,
930          * so put those first
931          */
932         nm1slice = strstr(nm1, "s0");
933         nm2slice = strstr(nm2, "s0");
934         if (nm1slice && !nm2slice) {
935                 return (-1);
936         }
937         if (!nm1slice && nm2slice) {
938                 return (1);
939         }
940         nm1slice = strstr(nm1, "s2");
941         nm2slice = strstr(nm2, "s2");
942         if (nm1slice && !nm2slice) {
943                 return (-1);
944         }
945         if (!nm1slice && nm2slice) {
946                 return (1);
947         }
948
949         rv = strcmp(nm1, nm2);
950         if (rv == 0)
951                 return (0);
952         return (rv > 0 ? 1 : -1);
953 }
954
955 #ifdef sun
956 static void
957 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
958     diskaddr_t size, uint_t blksz)
959 {
960         rdsk_node_t tmpnode;
961         rdsk_node_t *node;
962         char sname[MAXNAMELEN];
963
964         tmpnode.rn_name = &sname[0];
965         (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
966             diskname, partno);
967         /*
968          * protect against division by zero for disk labels that
969          * contain a bogus sector size
970          */
971         if (blksz == 0)
972                 blksz = DEV_BSIZE;
973         /* too small to contain a zpool? */
974         if ((size < (SPA_MINDEVSIZE / blksz)) &&
975             (node = avl_find(r, &tmpnode, NULL)))
976                 node->rn_nozpool = B_TRUE;
977 }
978 #endif  /* sun */
979
980 static void
981 nozpool_all_slices(avl_tree_t *r, const char *sname)
982 {
983 #ifdef sun
984         char diskname[MAXNAMELEN];
985         char *ptr;
986         int i;
987
988         (void) strncpy(diskname, sname, MAXNAMELEN);
989         if (((ptr = strrchr(diskname, 's')) == NULL) &&
990             ((ptr = strrchr(diskname, 'p')) == NULL))
991                 return;
992         ptr[0] = 's';
993         ptr[1] = '\0';
994         for (i = 0; i < NDKMAP; i++)
995                 check_one_slice(r, diskname, i, 0, 1);
996         ptr[0] = 'p';
997         for (i = 0; i <= FD_NUMPART; i++)
998                 check_one_slice(r, diskname, i, 0, 1);
999 #endif  /* sun */
1000 }
1001
1002 #ifdef sun
1003 static void
1004 check_slices(avl_tree_t *r, int fd, const char *sname)
1005 {
1006         struct extvtoc vtoc;
1007         struct dk_gpt *gpt;
1008         char diskname[MAXNAMELEN];
1009         char *ptr;
1010         int i;
1011
1012         (void) strncpy(diskname, sname, MAXNAMELEN);
1013         if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1014                 return;
1015         ptr[1] = '\0';
1016
1017         if (read_extvtoc(fd, &vtoc) >= 0) {
1018                 for (i = 0; i < NDKMAP; i++)
1019                         check_one_slice(r, diskname, i,
1020                             vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1021         } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1022                 /*
1023                  * on x86 we'll still have leftover links that point
1024                  * to slices s[9-15], so use NDKMAP instead
1025                  */
1026                 for (i = 0; i < NDKMAP; i++)
1027                         check_one_slice(r, diskname, i,
1028                             gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1029                 /* nodes p[1-4] are never used with EFI labels */
1030                 ptr[0] = 'p';
1031                 for (i = 1; i <= FD_NUMPART; i++)
1032                         check_one_slice(r, diskname, i, 0, 1);
1033                 efi_free(gpt);
1034         }
1035 }
1036 #endif  /* sun */
1037
1038 static void
1039 zpool_open_func(void *arg)
1040 {
1041         rdsk_node_t *rn = arg;
1042         struct stat64 statbuf;
1043         nvlist_t *config;
1044         int fd;
1045
1046         if (rn->rn_nozpool)
1047                 return;
1048         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1049                 /* symlink to a device that's no longer there */
1050                 if (errno == ENOENT)
1051                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1052                 return;
1053         }
1054         /*
1055          * Ignore failed stats.  We only want regular
1056          * files, character devs and block devs.
1057          */
1058         if (fstat64(fd, &statbuf) != 0 ||
1059             (!S_ISREG(statbuf.st_mode) &&
1060             !S_ISCHR(statbuf.st_mode) &&
1061             !S_ISBLK(statbuf.st_mode))) {
1062                 (void) close(fd);
1063                 return;
1064         }
1065         /* this file is too small to hold a zpool */
1066 #ifdef sun
1067         if (S_ISREG(statbuf.st_mode) &&
1068             statbuf.st_size < SPA_MINDEVSIZE) {
1069                 (void) close(fd);
1070                 return;
1071         } else if (!S_ISREG(statbuf.st_mode)) {
1072                 /*
1073                  * Try to read the disk label first so we don't have to
1074                  * open a bunch of minor nodes that can't have a zpool.
1075                  */
1076                 check_slices(rn->rn_avl, fd, rn->rn_name);
1077         }
1078 #else   /* !sun */
1079         if (statbuf.st_size < SPA_MINDEVSIZE) {
1080                 (void) close(fd);
1081                 return;
1082         }
1083 #endif  /* sun */
1084
1085         if ((zpool_read_label(fd, &config)) != 0) {
1086                 (void) close(fd);
1087                 (void) no_memory(rn->rn_hdl);
1088                 return;
1089         }
1090         (void) close(fd);
1091
1092
1093         rn->rn_config = config;
1094         if (config != NULL) {
1095                 assert(rn->rn_nozpool == B_FALSE);
1096         }
1097 }
1098
1099 /*
1100  * Given a file descriptor, clear (zero) the label information.  This function
1101  * is used in the appliance stack as part of the ZFS sysevent module and
1102  * to implement the "zpool labelclear" command.
1103  */
1104 int
1105 zpool_clear_label(int fd)
1106 {
1107         struct stat64 statbuf;
1108         int l;
1109         vdev_label_t *label;
1110         uint64_t size;
1111
1112         if (fstat64(fd, &statbuf) == -1)
1113                 return (0);
1114         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1115
1116         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1117                 return (-1);
1118
1119         for (l = 0; l < VDEV_LABELS; l++) {
1120                 if (pwrite64(fd, label, sizeof (vdev_label_t),
1121                     label_offset(size, l)) != sizeof (vdev_label_t))
1122                         return (-1);
1123         }
1124
1125         free(label);
1126         return (0);
1127 }
1128
1129 /*
1130  * Given a list of directories to search, find all pools stored on disk.  This
1131  * includes partial pools which are not available to import.  If no args are
1132  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1133  * poolname or guid (but not both) are provided by the caller when trying
1134  * to import a specific pool.
1135  */
1136 static nvlist_t *
1137 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1138 {
1139         int i, dirs = iarg->paths;
1140         DIR *dirp = NULL;
1141         struct dirent64 *dp;
1142         char path[MAXPATHLEN];
1143         char *end, **dir = iarg->path;
1144         size_t pathleft;
1145         nvlist_t *ret = NULL;
1146         static char *default_dir = "/dev";
1147         pool_list_t pools = { 0 };
1148         pool_entry_t *pe, *penext;
1149         vdev_entry_t *ve, *venext;
1150         config_entry_t *ce, *cenext;
1151         name_entry_t *ne, *nenext;
1152         avl_tree_t slice_cache;
1153         rdsk_node_t *slice;
1154         void *cookie;
1155
1156         if (dirs == 0) {
1157                 dirs = 1;
1158                 dir = &default_dir;
1159         }
1160
1161         /*
1162          * Go through and read the label configuration information from every
1163          * possible device, organizing the information according to pool GUID
1164          * and toplevel GUID.
1165          */
1166         for (i = 0; i < dirs; i++) {
1167                 tpool_t *t;
1168                 char *rdsk;
1169                 int dfd;
1170
1171                 /* use realpath to normalize the path */
1172                 if (realpath(dir[i], path) == 0) {
1173                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1174                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1175                         goto error;
1176                 }
1177                 end = &path[strlen(path)];
1178                 *end++ = '/';
1179                 *end = 0;
1180                 pathleft = &path[sizeof (path)] - end;
1181
1182                 /*
1183                  * Using raw devices instead of block devices when we're
1184                  * reading the labels skips a bunch of slow operations during
1185                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1186                  */
1187                 if (strcmp(path, "/dev/dsk/") == 0)
1188                         rdsk = "/dev/";
1189                 else
1190                         rdsk = path;
1191
1192                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1193                     (dirp = fdopendir(dfd)) == NULL) {
1194                         zfs_error_aux(hdl, strerror(errno));
1195                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1196                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1197                             rdsk);
1198                         goto error;
1199                 }
1200
1201                 avl_create(&slice_cache, slice_cache_compare,
1202                     sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1203
1204                 if (strcmp(rdsk, "/dev/") == 0) {
1205                         struct gmesh mesh;
1206                         struct gclass *mp;
1207                         struct ggeom *gp;
1208                         struct gprovider *pp;
1209
1210                         errno = geom_gettree(&mesh);
1211                         if (errno != 0) {
1212                                 zfs_error_aux(hdl, strerror(errno));
1213                                 (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1214                                     dgettext(TEXT_DOMAIN, "cannot get GEOM tree"));
1215                                 goto error;
1216                         }
1217
1218                         LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
1219                                 LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
1220                                         LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
1221                                                 slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1222                                                 slice->rn_name = zfs_strdup(hdl, pp->lg_name);
1223                                                 slice->rn_avl = &slice_cache;
1224                                                 slice->rn_dfd = dfd;
1225                                                 slice->rn_hdl = hdl;
1226                                                 slice->rn_nozpool = B_FALSE;
1227                                                 avl_add(&slice_cache, slice);
1228                                         }
1229                                 }
1230                         }
1231
1232                         geom_deletetree(&mesh);
1233                         goto skipdir;
1234                 }
1235
1236                 /*
1237                  * This is not MT-safe, but we have no MT consumers of libzfs
1238                  */
1239                 while ((dp = readdir64(dirp)) != NULL) {
1240                         const char *name = dp->d_name;
1241                         if (name[0] == '.' &&
1242                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1243                                 continue;
1244
1245                         slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1246                         slice->rn_name = zfs_strdup(hdl, name);
1247                         slice->rn_avl = &slice_cache;
1248                         slice->rn_dfd = dfd;
1249                         slice->rn_hdl = hdl;
1250                         slice->rn_nozpool = B_FALSE;
1251                         avl_add(&slice_cache, slice);
1252                 }
1253 skipdir:
1254                 /*
1255                  * create a thread pool to do all of this in parallel;
1256                  * rn_nozpool is not protected, so this is racy in that
1257                  * multiple tasks could decide that the same slice can
1258                  * not hold a zpool, which is benign.  Also choose
1259                  * double the number of processors; we hold a lot of
1260                  * locks in the kernel, so going beyond this doesn't
1261                  * buy us much.
1262                  */
1263                 t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1264                     0, NULL);
1265                 for (slice = avl_first(&slice_cache); slice;
1266                     (slice = avl_walk(&slice_cache, slice,
1267                     AVL_AFTER)))
1268                         (void) tpool_dispatch(t, zpool_open_func, slice);
1269                 tpool_wait(t);
1270                 tpool_destroy(t);
1271
1272                 cookie = NULL;
1273                 while ((slice = avl_destroy_nodes(&slice_cache,
1274                     &cookie)) != NULL) {
1275                         if (slice->rn_config != NULL) {
1276                                 nvlist_t *config = slice->rn_config;
1277                                 boolean_t matched = B_TRUE;
1278
1279                                 if (iarg->poolname != NULL) {
1280                                         char *pname;
1281
1282                                         matched = nvlist_lookup_string(config,
1283                                             ZPOOL_CONFIG_POOL_NAME,
1284                                             &pname) == 0 &&
1285                                             strcmp(iarg->poolname, pname) == 0;
1286                                 } else if (iarg->guid != 0) {
1287                                         uint64_t this_guid;
1288
1289                                         matched = nvlist_lookup_uint64(config,
1290                                             ZPOOL_CONFIG_POOL_GUID,
1291                                             &this_guid) == 0 &&
1292                                             iarg->guid == this_guid;
1293                                 }
1294                                 if (!matched) {
1295                                         nvlist_free(config);
1296                                         config = NULL;
1297                                         continue;
1298                                 }
1299                                 /* use the non-raw path for the config */
1300                                 (void) strlcpy(end, slice->rn_name, pathleft);
1301                                 if (add_config(hdl, &pools, path, config) != 0)
1302                                         goto error;
1303                         }
1304                         free(slice->rn_name);
1305                         free(slice);
1306                 }
1307                 avl_destroy(&slice_cache);
1308
1309                 (void) closedir(dirp);
1310                 dirp = NULL;
1311         }
1312
1313         ret = get_configs(hdl, &pools, iarg->can_be_active);
1314
1315 error:
1316         for (pe = pools.pools; pe != NULL; pe = penext) {
1317                 penext = pe->pe_next;
1318                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1319                         venext = ve->ve_next;
1320                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1321                                 cenext = ce->ce_next;
1322                                 if (ce->ce_config)
1323                                         nvlist_free(ce->ce_config);
1324                                 free(ce);
1325                         }
1326                         free(ve);
1327                 }
1328                 free(pe);
1329         }
1330
1331         for (ne = pools.names; ne != NULL; ne = nenext) {
1332                 nenext = ne->ne_next;
1333                 if (ne->ne_name)
1334                         free(ne->ne_name);
1335                 free(ne);
1336         }
1337
1338         if (dirp)
1339                 (void) closedir(dirp);
1340
1341         return (ret);
1342 }
1343
1344 nvlist_t *
1345 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1346 {
1347         importargs_t iarg = { 0 };
1348
1349         iarg.paths = argc;
1350         iarg.path = argv;
1351
1352         return (zpool_find_import_impl(hdl, &iarg));
1353 }
1354
1355 /*
1356  * Given a cache file, return the contents as a list of importable pools.
1357  * poolname or guid (but not both) are provided by the caller when trying
1358  * to import a specific pool.
1359  */
1360 nvlist_t *
1361 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1362     char *poolname, uint64_t guid)
1363 {
1364         char *buf;
1365         int fd;
1366         struct stat64 statbuf;
1367         nvlist_t *raw, *src, *dst;
1368         nvlist_t *pools;
1369         nvpair_t *elem;
1370         char *name;
1371         uint64_t this_guid;
1372         boolean_t active;
1373
1374         verify(poolname == NULL || guid == 0);
1375
1376         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1377                 zfs_error_aux(hdl, "%s", strerror(errno));
1378                 (void) zfs_error(hdl, EZFS_BADCACHE,
1379                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1380                 return (NULL);
1381         }
1382
1383         if (fstat64(fd, &statbuf) != 0) {
1384                 zfs_error_aux(hdl, "%s", strerror(errno));
1385                 (void) close(fd);
1386                 (void) zfs_error(hdl, EZFS_BADCACHE,
1387                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1388                 return (NULL);
1389         }
1390
1391         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1392                 (void) close(fd);
1393                 return (NULL);
1394         }
1395
1396         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1397                 (void) close(fd);
1398                 free(buf);
1399                 (void) zfs_error(hdl, EZFS_BADCACHE,
1400                     dgettext(TEXT_DOMAIN,
1401                     "failed to read cache file contents"));
1402                 return (NULL);
1403         }
1404
1405         (void) close(fd);
1406
1407         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1408                 free(buf);
1409                 (void) zfs_error(hdl, EZFS_BADCACHE,
1410                     dgettext(TEXT_DOMAIN,
1411                     "invalid or corrupt cache file contents"));
1412                 return (NULL);
1413         }
1414
1415         free(buf);
1416
1417         /*
1418          * Go through and get the current state of the pools and refresh their
1419          * state.
1420          */
1421         if (nvlist_alloc(&pools, 0, 0) != 0) {
1422                 (void) no_memory(hdl);
1423                 nvlist_free(raw);
1424                 return (NULL);
1425         }
1426
1427         elem = NULL;
1428         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1429                 verify(nvpair_value_nvlist(elem, &src) == 0);
1430
1431                 verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1432                     &name) == 0);
1433                 if (poolname != NULL && strcmp(poolname, name) != 0)
1434                         continue;
1435
1436                 verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1437                     &this_guid) == 0);
1438                 if (guid != 0) {
1439                         verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1440                             &this_guid) == 0);
1441                         if (guid != this_guid)
1442                                 continue;
1443                 }
1444
1445                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1446                         nvlist_free(raw);
1447                         nvlist_free(pools);
1448                         return (NULL);
1449                 }
1450
1451                 if (active)
1452                         continue;
1453
1454                 if ((dst = refresh_config(hdl, src)) == NULL) {
1455                         nvlist_free(raw);
1456                         nvlist_free(pools);
1457                         return (NULL);
1458                 }
1459
1460                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1461                         (void) no_memory(hdl);
1462                         nvlist_free(dst);
1463                         nvlist_free(raw);
1464                         nvlist_free(pools);
1465                         return (NULL);
1466                 }
1467                 nvlist_free(dst);
1468         }
1469
1470         nvlist_free(raw);
1471         return (pools);
1472 }
1473
1474 static int
1475 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1476 {
1477         importargs_t *import = data;
1478         int found = 0;
1479
1480         if (import->poolname != NULL) {
1481                 char *pool_name;
1482
1483                 verify(nvlist_lookup_string(zhp->zpool_config,
1484                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1485                 if (strcmp(pool_name, import->poolname) == 0)
1486                         found = 1;
1487         } else {
1488                 uint64_t pool_guid;
1489
1490                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1491                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1492                 if (pool_guid == import->guid)
1493                         found = 1;
1494         }
1495
1496         zpool_close(zhp);
1497         return (found);
1498 }
1499
1500 nvlist_t *
1501 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1502 {
1503         verify(import->poolname == NULL || import->guid == 0);
1504
1505         if (import->unique)
1506                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1507
1508         if (import->cachefile != NULL)
1509                 return (zpool_find_import_cached(hdl, import->cachefile,
1510                     import->poolname, import->guid));
1511
1512         return (zpool_find_import_impl(hdl, import));
1513 }
1514
1515 boolean_t
1516 find_guid(nvlist_t *nv, uint64_t guid)
1517 {
1518         uint64_t tmp;
1519         nvlist_t **child;
1520         uint_t c, children;
1521
1522         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1523         if (tmp == guid)
1524                 return (B_TRUE);
1525
1526         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1527             &child, &children) == 0) {
1528                 for (c = 0; c < children; c++)
1529                         if (find_guid(child[c], guid))
1530                                 return (B_TRUE);
1531         }
1532
1533         return (B_FALSE);
1534 }
1535
1536 typedef struct aux_cbdata {
1537         const char      *cb_type;
1538         uint64_t        cb_guid;
1539         zpool_handle_t  *cb_zhp;
1540 } aux_cbdata_t;
1541
1542 static int
1543 find_aux(zpool_handle_t *zhp, void *data)
1544 {
1545         aux_cbdata_t *cbp = data;
1546         nvlist_t **list;
1547         uint_t i, count;
1548         uint64_t guid;
1549         nvlist_t *nvroot;
1550
1551         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1552             &nvroot) == 0);
1553
1554         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1555             &list, &count) == 0) {
1556                 for (i = 0; i < count; i++) {
1557                         verify(nvlist_lookup_uint64(list[i],
1558                             ZPOOL_CONFIG_GUID, &guid) == 0);
1559                         if (guid == cbp->cb_guid) {
1560                                 cbp->cb_zhp = zhp;
1561                                 return (1);
1562                         }
1563                 }
1564         }
1565
1566         zpool_close(zhp);
1567         return (0);
1568 }
1569
1570 /*
1571  * Determines if the pool is in use.  If so, it returns true and the state of
1572  * the pool as well as the name of the pool.  Both strings are allocated and
1573  * must be freed by the caller.
1574  */
1575 int
1576 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1577     boolean_t *inuse)
1578 {
1579         nvlist_t *config;
1580         char *name;
1581         boolean_t ret;
1582         uint64_t guid, vdev_guid;
1583         zpool_handle_t *zhp;
1584         nvlist_t *pool_config;
1585         uint64_t stateval, isspare;
1586         aux_cbdata_t cb = { 0 };
1587         boolean_t isactive;
1588
1589         *inuse = B_FALSE;
1590
1591         if (zpool_read_label(fd, &config) != 0) {
1592                 (void) no_memory(hdl);
1593                 return (-1);
1594         }
1595
1596         if (config == NULL)
1597                 return (0);
1598
1599         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1600             &stateval) == 0);
1601         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1602             &vdev_guid) == 0);
1603
1604         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1605                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1606                     &name) == 0);
1607                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1608                     &guid) == 0);
1609         }
1610
1611         switch (stateval) {
1612         case POOL_STATE_EXPORTED:
1613                 /*
1614                  * A pool with an exported state may in fact be imported
1615                  * read-only, so check the in-core state to see if it's
1616                  * active and imported read-only.  If it is, set
1617                  * its state to active.
1618                  */
1619                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1620                     (zhp = zpool_open_canfail(hdl, name)) != NULL) {
1621                         if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1622                                 stateval = POOL_STATE_ACTIVE;
1623
1624                         /*
1625                          * All we needed the zpool handle for is the
1626                          * readonly prop check.
1627                          */
1628                         zpool_close(zhp);
1629                 }
1630
1631                 ret = B_TRUE;
1632                 break;
1633
1634         case POOL_STATE_ACTIVE:
1635                 /*
1636                  * For an active pool, we have to determine if it's really part
1637                  * of a currently active pool (in which case the pool will exist
1638                  * and the guid will be the same), or whether it's part of an
1639                  * active pool that was disconnected without being explicitly
1640                  * exported.
1641                  */
1642                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1643                         nvlist_free(config);
1644                         return (-1);
1645                 }
1646
1647                 if (isactive) {
1648                         /*
1649                          * Because the device may have been removed while
1650                          * offlined, we only report it as active if the vdev is
1651                          * still present in the config.  Otherwise, pretend like
1652                          * it's not in use.
1653                          */
1654                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1655                             (pool_config = zpool_get_config(zhp, NULL))
1656                             != NULL) {
1657                                 nvlist_t *nvroot;
1658
1659                                 verify(nvlist_lookup_nvlist(pool_config,
1660                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1661                                 ret = find_guid(nvroot, vdev_guid);
1662                         } else {
1663                                 ret = B_FALSE;
1664                         }
1665
1666                         /*
1667                          * If this is an active spare within another pool, we
1668                          * treat it like an unused hot spare.  This allows the
1669                          * user to create a pool with a hot spare that currently
1670                          * in use within another pool.  Since we return B_TRUE,
1671                          * libdiskmgt will continue to prevent generic consumers
1672                          * from using the device.
1673                          */
1674                         if (ret && nvlist_lookup_uint64(config,
1675                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1676                                 stateval = POOL_STATE_SPARE;
1677
1678                         if (zhp != NULL)
1679                                 zpool_close(zhp);
1680                 } else {
1681                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1682                         ret = B_TRUE;
1683                 }
1684                 break;
1685
1686         case POOL_STATE_SPARE:
1687                 /*
1688                  * For a hot spare, it can be either definitively in use, or
1689                  * potentially active.  To determine if it's in use, we iterate
1690                  * over all pools in the system and search for one with a spare
1691                  * with a matching guid.
1692                  *
1693                  * Due to the shared nature of spares, we don't actually report
1694                  * the potentially active case as in use.  This means the user
1695                  * can freely create pools on the hot spares of exported pools,
1696                  * but to do otherwise makes the resulting code complicated, and
1697                  * we end up having to deal with this case anyway.
1698                  */
1699                 cb.cb_zhp = NULL;
1700                 cb.cb_guid = vdev_guid;
1701                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1702                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1703                         name = (char *)zpool_get_name(cb.cb_zhp);
1704                         ret = TRUE;
1705                 } else {
1706                         ret = FALSE;
1707                 }
1708                 break;
1709
1710         case POOL_STATE_L2CACHE:
1711
1712                 /*
1713                  * Check if any pool is currently using this l2cache device.
1714                  */
1715                 cb.cb_zhp = NULL;
1716                 cb.cb_guid = vdev_guid;
1717                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1718                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1719                         name = (char *)zpool_get_name(cb.cb_zhp);
1720                         ret = TRUE;
1721                 } else {
1722                         ret = FALSE;
1723                 }
1724                 break;
1725
1726         default:
1727                 ret = B_FALSE;
1728         }
1729
1730
1731         if (ret) {
1732                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1733                         if (cb.cb_zhp)
1734                                 zpool_close(cb.cb_zhp);
1735                         nvlist_free(config);
1736                         return (-1);
1737                 }
1738                 *state = (pool_state_t)stateval;
1739         }
1740
1741         if (cb.cb_zhp)
1742                 zpool_close(cb.cb_zhp);
1743
1744         nvlist_free(config);
1745         *inuse = ret;
1746         return (0);
1747 }