]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
MFC r330295: ZFS: fix adding vdevs to very large pools
[FreeBSD/stable/10.git] / cddl / contrib / opensolaris / lib / libzfs / common / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25  * Copyright 2015 RackTop Systems.
26  * Copyright 2016 Nexenta Systems, Inc.
27  */
28
29 /*
30  * Pool import support functions.
31  *
32  * To import a pool, we rely on reading the configuration information from the
33  * ZFS label of each device.  If we successfully read the label, then we
34  * organize the configuration information in the following hierarchy:
35  *
36  *      pool guid -> toplevel vdev guid -> label txg
37  *
38  * Duplicate entries matching this same tuple will be discarded.  Once we have
39  * examined every device, we pick the best label txg config for each toplevel
40  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
41  * update any paths that have changed.  Finally, we attempt to import the pool
42  * using our derived config, and record the results.
43  */
44
45 #include <ctype.h>
46 #include <devid.h>
47 #include <dirent.h>
48 #include <errno.h>
49 #include <libintl.h>
50 #include <stddef.h>
51 #include <stdlib.h>
52 #include <string.h>
53 #include <sys/stat.h>
54 #include <unistd.h>
55 #include <fcntl.h>
56 #include <thread_pool.h>
57 #include <libgeom.h>
58
59 #include <sys/vdev_impl.h>
60
61 #include "libzfs.h"
62 #include "libzfs_impl.h"
63
64 /*
65  * Intermediate structures used to gather configuration information.
66  */
67 typedef struct config_entry {
68         uint64_t                ce_txg;
69         nvlist_t                *ce_config;
70         struct config_entry     *ce_next;
71 } config_entry_t;
72
73 typedef struct vdev_entry {
74         uint64_t                ve_guid;
75         config_entry_t          *ve_configs;
76         struct vdev_entry       *ve_next;
77 } vdev_entry_t;
78
79 typedef struct pool_entry {
80         uint64_t                pe_guid;
81         vdev_entry_t            *pe_vdevs;
82         struct pool_entry       *pe_next;
83 } pool_entry_t;
84
85 typedef struct name_entry {
86         char                    *ne_name;
87         uint64_t                ne_guid;
88         struct name_entry       *ne_next;
89 } name_entry_t;
90
91 typedef struct pool_list {
92         pool_entry_t            *pools;
93         name_entry_t            *names;
94 } pool_list_t;
95
96 static char *
97 get_devid(const char *path)
98 {
99 #ifdef have_devid
100         int fd;
101         ddi_devid_t devid;
102         char *minor, *ret;
103
104         if ((fd = open(path, O_RDONLY)) < 0)
105                 return (NULL);
106
107         minor = NULL;
108         ret = NULL;
109         if (devid_get(fd, &devid) == 0) {
110                 if (devid_get_minor_name(fd, &minor) == 0)
111                         ret = devid_str_encode(devid, minor);
112                 if (minor != NULL)
113                         devid_str_free(minor);
114                 devid_free(devid);
115         }
116         (void) close(fd);
117
118         return (ret);
119 #else
120         return (NULL);
121 #endif
122 }
123
124
125 /*
126  * Go through and fix up any path and/or devid information for the given vdev
127  * configuration.
128  */
129 static int
130 fix_paths(nvlist_t *nv, name_entry_t *names)
131 {
132         nvlist_t **child;
133         uint_t c, children;
134         uint64_t guid;
135         name_entry_t *ne, *best;
136         char *path, *devid;
137         int matched;
138
139         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
140             &child, &children) == 0) {
141                 for (c = 0; c < children; c++)
142                         if (fix_paths(child[c], names) != 0)
143                                 return (-1);
144                 return (0);
145         }
146
147         /*
148          * This is a leaf (file or disk) vdev.  In either case, go through
149          * the name list and see if we find a matching guid.  If so, replace
150          * the path and see if we can calculate a new devid.
151          *
152          * There may be multiple names associated with a particular guid, in
153          * which case we have overlapping slices or multiple paths to the same
154          * disk.  If this is the case, then we want to pick the path that is
155          * the most similar to the original, where "most similar" is the number
156          * of matching characters starting from the end of the path.  This will
157          * preserve slice numbers even if the disks have been reorganized, and
158          * will also catch preferred disk names if multiple paths exist.
159          */
160         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
161         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
162                 path = NULL;
163
164         matched = 0;
165         best = NULL;
166         for (ne = names; ne != NULL; ne = ne->ne_next) {
167                 if (ne->ne_guid == guid) {
168                         const char *src, *dst;
169                         int count;
170
171                         if (path == NULL) {
172                                 best = ne;
173                                 break;
174                         }
175
176                         src = ne->ne_name + strlen(ne->ne_name) - 1;
177                         dst = path + strlen(path) - 1;
178                         for (count = 0; src >= ne->ne_name && dst >= path;
179                             src--, dst--, count++)
180                                 if (*src != *dst)
181                                         break;
182
183                         /*
184                          * At this point, 'count' is the number of characters
185                          * matched from the end.
186                          */
187                         if (count > matched || best == NULL) {
188                                 best = ne;
189                                 matched = count;
190                         }
191                 }
192         }
193
194         if (best == NULL)
195                 return (0);
196
197         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
198                 return (-1);
199
200         if ((devid = get_devid(best->ne_name)) == NULL) {
201                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
202         } else {
203                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) {
204                         devid_str_free(devid);
205                         return (-1);
206                 }
207                 devid_str_free(devid);
208         }
209
210         return (0);
211 }
212
213 /*
214  * Add the given configuration to the list of known devices.
215  */
216 static int
217 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
218     nvlist_t *config)
219 {
220         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
221         pool_entry_t *pe;
222         vdev_entry_t *ve;
223         config_entry_t *ce;
224         name_entry_t *ne;
225
226         /*
227          * If this is a hot spare not currently in use or level 2 cache
228          * device, add it to the list of names to translate, but don't do
229          * anything else.
230          */
231         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
232             &state) == 0 &&
233             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
234             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
235                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
236                         return (-1);
237
238                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
239                         free(ne);
240                         return (-1);
241                 }
242                 ne->ne_guid = vdev_guid;
243                 ne->ne_next = pl->names;
244                 pl->names = ne;
245                 return (0);
246         }
247
248         /*
249          * If we have a valid config but cannot read any of these fields, then
250          * it means we have a half-initialized label.  In vdev_label_init()
251          * we write a label with txg == 0 so that we can identify the device
252          * in case the user refers to the same disk later on.  If we fail to
253          * create the pool, we'll be left with a label in this state
254          * which should not be considered part of a valid pool.
255          */
256         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
257             &pool_guid) != 0 ||
258             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
259             &vdev_guid) != 0 ||
260             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
261             &top_guid) != 0 ||
262             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
263             &txg) != 0 || txg == 0) {
264                 nvlist_free(config);
265                 return (0);
266         }
267
268         /*
269          * First, see if we know about this pool.  If not, then add it to the
270          * list of known pools.
271          */
272         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
273                 if (pe->pe_guid == pool_guid)
274                         break;
275         }
276
277         if (pe == NULL) {
278                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
279                         nvlist_free(config);
280                         return (-1);
281                 }
282                 pe->pe_guid = pool_guid;
283                 pe->pe_next = pl->pools;
284                 pl->pools = pe;
285         }
286
287         /*
288          * Second, see if we know about this toplevel vdev.  Add it if its
289          * missing.
290          */
291         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
292                 if (ve->ve_guid == top_guid)
293                         break;
294         }
295
296         if (ve == NULL) {
297                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
298                         nvlist_free(config);
299                         return (-1);
300                 }
301                 ve->ve_guid = top_guid;
302                 ve->ve_next = pe->pe_vdevs;
303                 pe->pe_vdevs = ve;
304         }
305
306         /*
307          * Third, see if we have a config with a matching transaction group.  If
308          * so, then we do nothing.  Otherwise, add it to the list of known
309          * configs.
310          */
311         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
312                 if (ce->ce_txg == txg)
313                         break;
314         }
315
316         if (ce == NULL) {
317                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
318                         nvlist_free(config);
319                         return (-1);
320                 }
321                 ce->ce_txg = txg;
322                 ce->ce_config = config;
323                 ce->ce_next = ve->ve_configs;
324                 ve->ve_configs = ce;
325         } else {
326                 nvlist_free(config);
327         }
328
329         /*
330          * At this point we've successfully added our config to the list of
331          * known configs.  The last thing to do is add the vdev guid -> path
332          * mappings so that we can fix up the configuration as necessary before
333          * doing the import.
334          */
335         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
336                 return (-1);
337
338         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
339                 free(ne);
340                 return (-1);
341         }
342
343         ne->ne_guid = vdev_guid;
344         ne->ne_next = pl->names;
345         pl->names = ne;
346
347         return (0);
348 }
349
350 /*
351  * Returns true if the named pool matches the given GUID.
352  */
353 static int
354 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
355     boolean_t *isactive)
356 {
357         zpool_handle_t *zhp;
358         uint64_t theguid;
359
360         if (zpool_open_silent(hdl, name, &zhp) != 0)
361                 return (-1);
362
363         if (zhp == NULL) {
364                 *isactive = B_FALSE;
365                 return (0);
366         }
367
368         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
369             &theguid) == 0);
370
371         zpool_close(zhp);
372
373         *isactive = (theguid == guid);
374         return (0);
375 }
376
377 static nvlist_t *
378 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
379 {
380         nvlist_t *nvl;
381         zfs_cmd_t zc = { 0 };
382         int err;
383
384         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
385                 return (NULL);
386
387         if (zcmd_alloc_dst_nvlist(hdl, &zc,
388             zc.zc_nvlist_conf_size * 2) != 0) {
389                 zcmd_free_nvlists(&zc);
390                 return (NULL);
391         }
392
393         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
394             &zc)) != 0 && errno == ENOMEM) {
395                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
396                         zcmd_free_nvlists(&zc);
397                         return (NULL);
398                 }
399         }
400
401         if (err) {
402                 zcmd_free_nvlists(&zc);
403                 return (NULL);
404         }
405
406         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
407                 zcmd_free_nvlists(&zc);
408                 return (NULL);
409         }
410
411         zcmd_free_nvlists(&zc);
412         return (nvl);
413 }
414
415 /*
416  * Determine if the vdev id is a hole in the namespace.
417  */
418 boolean_t
419 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
420 {
421         for (int c = 0; c < holes; c++) {
422
423                 /* Top-level is a hole */
424                 if (hole_array[c] == id)
425                         return (B_TRUE);
426         }
427         return (B_FALSE);
428 }
429
430 /*
431  * Convert our list of pools into the definitive set of configurations.  We
432  * start by picking the best config for each toplevel vdev.  Once that's done,
433  * we assemble the toplevel vdevs into a full config for the pool.  We make a
434  * pass to fix up any incorrect paths, and then add it to the main list to
435  * return to the user.
436  */
437 static nvlist_t *
438 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
439 {
440         pool_entry_t *pe;
441         vdev_entry_t *ve;
442         config_entry_t *ce;
443         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
444         nvlist_t **spares, **l2cache;
445         uint_t i, nspares, nl2cache;
446         boolean_t config_seen;
447         uint64_t best_txg;
448         char *name, *hostname = NULL;
449         uint64_t guid;
450         uint_t children = 0;
451         nvlist_t **child = NULL;
452         uint_t holes;
453         uint64_t *hole_array, max_id;
454         uint_t c;
455         boolean_t isactive;
456         uint64_t hostid;
457         nvlist_t *nvl;
458         boolean_t found_one = B_FALSE;
459         boolean_t valid_top_config = B_FALSE;
460
461         if (nvlist_alloc(&ret, 0, 0) != 0)
462                 goto nomem;
463
464         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
465                 uint64_t id, max_txg = 0;
466
467                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
468                         goto nomem;
469                 config_seen = B_FALSE;
470
471                 /*
472                  * Iterate over all toplevel vdevs.  Grab the pool configuration
473                  * from the first one we find, and then go through the rest and
474                  * add them as necessary to the 'vdevs' member of the config.
475                  */
476                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
477
478                         /*
479                          * Determine the best configuration for this vdev by
480                          * selecting the config with the latest transaction
481                          * group.
482                          */
483                         best_txg = 0;
484                         for (ce = ve->ve_configs; ce != NULL;
485                             ce = ce->ce_next) {
486
487                                 if (ce->ce_txg > best_txg) {
488                                         tmp = ce->ce_config;
489                                         best_txg = ce->ce_txg;
490                                 }
491                         }
492
493                         /*
494                          * We rely on the fact that the max txg for the
495                          * pool will contain the most up-to-date information
496                          * about the valid top-levels in the vdev namespace.
497                          */
498                         if (best_txg > max_txg) {
499                                 (void) nvlist_remove(config,
500                                     ZPOOL_CONFIG_VDEV_CHILDREN,
501                                     DATA_TYPE_UINT64);
502                                 (void) nvlist_remove(config,
503                                     ZPOOL_CONFIG_HOLE_ARRAY,
504                                     DATA_TYPE_UINT64_ARRAY);
505
506                                 max_txg = best_txg;
507                                 hole_array = NULL;
508                                 holes = 0;
509                                 max_id = 0;
510                                 valid_top_config = B_FALSE;
511
512                                 if (nvlist_lookup_uint64(tmp,
513                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
514                                         verify(nvlist_add_uint64(config,
515                                             ZPOOL_CONFIG_VDEV_CHILDREN,
516                                             max_id) == 0);
517                                         valid_top_config = B_TRUE;
518                                 }
519
520                                 if (nvlist_lookup_uint64_array(tmp,
521                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
522                                     &holes) == 0) {
523                                         verify(nvlist_add_uint64_array(config,
524                                             ZPOOL_CONFIG_HOLE_ARRAY,
525                                             hole_array, holes) == 0);
526                                 }
527                         }
528
529                         if (!config_seen) {
530                                 /*
531                                  * Copy the relevant pieces of data to the pool
532                                  * configuration:
533                                  *
534                                  *      version
535                                  *      pool guid
536                                  *      name
537                                  *      comment (if available)
538                                  *      pool state
539                                  *      hostid (if available)
540                                  *      hostname (if available)
541                                  */
542                                 uint64_t state, version;
543                                 char *comment = NULL;
544
545                                 version = fnvlist_lookup_uint64(tmp,
546                                     ZPOOL_CONFIG_VERSION);
547                                 fnvlist_add_uint64(config,
548                                     ZPOOL_CONFIG_VERSION, version);
549                                 guid = fnvlist_lookup_uint64(tmp,
550                                     ZPOOL_CONFIG_POOL_GUID);
551                                 fnvlist_add_uint64(config,
552                                     ZPOOL_CONFIG_POOL_GUID, guid);
553                                 name = fnvlist_lookup_string(tmp,
554                                     ZPOOL_CONFIG_POOL_NAME);
555                                 fnvlist_add_string(config,
556                                     ZPOOL_CONFIG_POOL_NAME, name);
557
558                                 if (nvlist_lookup_string(tmp,
559                                     ZPOOL_CONFIG_COMMENT, &comment) == 0)
560                                         fnvlist_add_string(config,
561                                             ZPOOL_CONFIG_COMMENT, comment);
562
563                                 state = fnvlist_lookup_uint64(tmp,
564                                     ZPOOL_CONFIG_POOL_STATE);
565                                 fnvlist_add_uint64(config,
566                                     ZPOOL_CONFIG_POOL_STATE, state);
567
568                                 hostid = 0;
569                                 if (nvlist_lookup_uint64(tmp,
570                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
571                                         fnvlist_add_uint64(config,
572                                             ZPOOL_CONFIG_HOSTID, hostid);
573                                         hostname = fnvlist_lookup_string(tmp,
574                                             ZPOOL_CONFIG_HOSTNAME);
575                                         fnvlist_add_string(config,
576                                             ZPOOL_CONFIG_HOSTNAME, hostname);
577                                 }
578
579                                 config_seen = B_TRUE;
580                         }
581
582                         /*
583                          * Add this top-level vdev to the child array.
584                          */
585                         verify(nvlist_lookup_nvlist(tmp,
586                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
587                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
588                             &id) == 0);
589
590                         if (id >= children) {
591                                 nvlist_t **newchild;
592
593                                 newchild = zfs_alloc(hdl, (id + 1) *
594                                     sizeof (nvlist_t *));
595                                 if (newchild == NULL)
596                                         goto nomem;
597
598                                 for (c = 0; c < children; c++)
599                                         newchild[c] = child[c];
600
601                                 free(child);
602                                 child = newchild;
603                                 children = id + 1;
604                         }
605                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
606                                 goto nomem;
607
608                 }
609
610                 /*
611                  * If we have information about all the top-levels then
612                  * clean up the nvlist which we've constructed. This
613                  * means removing any extraneous devices that are
614                  * beyond the valid range or adding devices to the end
615                  * of our array which appear to be missing.
616                  */
617                 if (valid_top_config) {
618                         if (max_id < children) {
619                                 for (c = max_id; c < children; c++)
620                                         nvlist_free(child[c]);
621                                 children = max_id;
622                         } else if (max_id > children) {
623                                 nvlist_t **newchild;
624
625                                 newchild = zfs_alloc(hdl, (max_id) *
626                                     sizeof (nvlist_t *));
627                                 if (newchild == NULL)
628                                         goto nomem;
629
630                                 for (c = 0; c < children; c++)
631                                         newchild[c] = child[c];
632
633                                 free(child);
634                                 child = newchild;
635                                 children = max_id;
636                         }
637                 }
638
639                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
640                     &guid) == 0);
641
642                 /*
643                  * The vdev namespace may contain holes as a result of
644                  * device removal. We must add them back into the vdev
645                  * tree before we process any missing devices.
646                  */
647                 if (holes > 0) {
648                         ASSERT(valid_top_config);
649
650                         for (c = 0; c < children; c++) {
651                                 nvlist_t *holey;
652
653                                 if (child[c] != NULL ||
654                                     !vdev_is_hole(hole_array, holes, c))
655                                         continue;
656
657                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
658                                     0) != 0)
659                                         goto nomem;
660
661                                 /*
662                                  * Holes in the namespace are treated as
663                                  * "hole" top-level vdevs and have a
664                                  * special flag set on them.
665                                  */
666                                 if (nvlist_add_string(holey,
667                                     ZPOOL_CONFIG_TYPE,
668                                     VDEV_TYPE_HOLE) != 0 ||
669                                     nvlist_add_uint64(holey,
670                                     ZPOOL_CONFIG_ID, c) != 0 ||
671                                     nvlist_add_uint64(holey,
672                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
673                                         nvlist_free(holey);
674                                         goto nomem;
675                                 }
676                                 child[c] = holey;
677                         }
678                 }
679
680                 /*
681                  * Look for any missing top-level vdevs.  If this is the case,
682                  * create a faked up 'missing' vdev as a placeholder.  We cannot
683                  * simply compress the child array, because the kernel performs
684                  * certain checks to make sure the vdev IDs match their location
685                  * in the configuration.
686                  */
687                 for (c = 0; c < children; c++) {
688                         if (child[c] == NULL) {
689                                 nvlist_t *missing;
690                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
691                                     0) != 0)
692                                         goto nomem;
693                                 if (nvlist_add_string(missing,
694                                     ZPOOL_CONFIG_TYPE,
695                                     VDEV_TYPE_MISSING) != 0 ||
696                                     nvlist_add_uint64(missing,
697                                     ZPOOL_CONFIG_ID, c) != 0 ||
698                                     nvlist_add_uint64(missing,
699                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
700                                         nvlist_free(missing);
701                                         goto nomem;
702                                 }
703                                 child[c] = missing;
704                         }
705                 }
706
707                 /*
708                  * Put all of this pool's top-level vdevs into a root vdev.
709                  */
710                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
711                         goto nomem;
712                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
713                     VDEV_TYPE_ROOT) != 0 ||
714                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
715                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
716                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
717                     child, children) != 0) {
718                         nvlist_free(nvroot);
719                         goto nomem;
720                 }
721
722                 for (c = 0; c < children; c++)
723                         nvlist_free(child[c]);
724                 free(child);
725                 children = 0;
726                 child = NULL;
727
728                 /*
729                  * Go through and fix up any paths and/or devids based on our
730                  * known list of vdev GUID -> path mappings.
731                  */
732                 if (fix_paths(nvroot, pl->names) != 0) {
733                         nvlist_free(nvroot);
734                         goto nomem;
735                 }
736
737                 /*
738                  * Add the root vdev to this pool's configuration.
739                  */
740                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
741                     nvroot) != 0) {
742                         nvlist_free(nvroot);
743                         goto nomem;
744                 }
745                 nvlist_free(nvroot);
746
747                 /*
748                  * zdb uses this path to report on active pools that were
749                  * imported or created using -R.
750                  */
751                 if (active_ok)
752                         goto add_pool;
753
754                 /*
755                  * Determine if this pool is currently active, in which case we
756                  * can't actually import it.
757                  */
758                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
759                     &name) == 0);
760                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
761                     &guid) == 0);
762
763                 if (pool_active(hdl, name, guid, &isactive) != 0)
764                         goto error;
765
766                 if (isactive) {
767                         nvlist_free(config);
768                         config = NULL;
769                         continue;
770                 }
771
772                 if ((nvl = refresh_config(hdl, config)) == NULL) {
773                         nvlist_free(config);
774                         config = NULL;
775                         continue;
776                 }
777
778                 nvlist_free(config);
779                 config = nvl;
780
781                 /*
782                  * Go through and update the paths for spares, now that we have
783                  * them.
784                  */
785                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
786                     &nvroot) == 0);
787                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
788                     &spares, &nspares) == 0) {
789                         for (i = 0; i < nspares; i++) {
790                                 if (fix_paths(spares[i], pl->names) != 0)
791                                         goto nomem;
792                         }
793                 }
794
795                 /*
796                  * Update the paths for l2cache devices.
797                  */
798                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
799                     &l2cache, &nl2cache) == 0) {
800                         for (i = 0; i < nl2cache; i++) {
801                                 if (fix_paths(l2cache[i], pl->names) != 0)
802                                         goto nomem;
803                         }
804                 }
805
806                 /*
807                  * Restore the original information read from the actual label.
808                  */
809                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
810                     DATA_TYPE_UINT64);
811                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
812                     DATA_TYPE_STRING);
813                 if (hostid != 0) {
814                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
815                             hostid) == 0);
816                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
817                             hostname) == 0);
818                 }
819
820 add_pool:
821                 /*
822                  * Add this pool to the list of configs.
823                  */
824                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
825                     &name) == 0);
826                 if (nvlist_add_nvlist(ret, name, config) != 0)
827                         goto nomem;
828
829                 found_one = B_TRUE;
830                 nvlist_free(config);
831                 config = NULL;
832         }
833
834         if (!found_one) {
835                 nvlist_free(ret);
836                 ret = NULL;
837         }
838
839         return (ret);
840
841 nomem:
842         (void) no_memory(hdl);
843 error:
844         nvlist_free(config);
845         nvlist_free(ret);
846         for (c = 0; c < children; c++)
847                 nvlist_free(child[c]);
848         free(child);
849
850         return (NULL);
851 }
852
853 /*
854  * Return the offset of the given label.
855  */
856 static uint64_t
857 label_offset(uint64_t size, int l)
858 {
859         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
860         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
861             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
862 }
863
864 /*
865  * Given a file descriptor, read the label information and return an nvlist
866  * describing the configuration, if there is one.
867  * Return 0 on success, or -1 on failure
868  */
869 int
870 zpool_read_label(int fd, nvlist_t **config)
871 {
872         struct stat64 statbuf;
873         int l;
874         vdev_label_t *label;
875         uint64_t state, txg, size;
876
877         *config = NULL;
878
879         if (fstat64(fd, &statbuf) == -1)
880                 return (-1);
881         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
882
883         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
884                 return (-1);
885
886         for (l = 0; l < VDEV_LABELS; l++) {
887                 if (pread64(fd, label, sizeof (vdev_label_t),
888                     label_offset(size, l)) != sizeof (vdev_label_t))
889                         continue;
890
891                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
892                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
893                         continue;
894
895                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
896                     &state) != 0 || state > POOL_STATE_L2CACHE) {
897                         nvlist_free(*config);
898                         continue;
899                 }
900
901                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
902                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
903                     &txg) != 0 || txg == 0)) {
904                         nvlist_free(*config);
905                         continue;
906                 }
907
908                 free(label);
909                 return (0);
910         }
911
912         free(label);
913         *config = NULL;
914         errno = ENOENT;
915         return (-1);
916 }
917
918 typedef struct rdsk_node {
919         char *rn_name;
920         int rn_dfd;
921         libzfs_handle_t *rn_hdl;
922         nvlist_t *rn_config;
923         avl_tree_t *rn_avl;
924         avl_node_t rn_node;
925         boolean_t rn_nozpool;
926 } rdsk_node_t;
927
928 static int
929 slice_cache_compare(const void *arg1, const void *arg2)
930 {
931         const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
932         const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
933         char *nm1slice, *nm2slice;
934         int rv;
935
936         /*
937          * slices zero and two are the most likely to provide results,
938          * so put those first
939          */
940         nm1slice = strstr(nm1, "s0");
941         nm2slice = strstr(nm2, "s0");
942         if (nm1slice && !nm2slice) {
943                 return (-1);
944         }
945         if (!nm1slice && nm2slice) {
946                 return (1);
947         }
948         nm1slice = strstr(nm1, "s2");
949         nm2slice = strstr(nm2, "s2");
950         if (nm1slice && !nm2slice) {
951                 return (-1);
952         }
953         if (!nm1slice && nm2slice) {
954                 return (1);
955         }
956
957         rv = strcmp(nm1, nm2);
958         if (rv == 0)
959                 return (0);
960         return (rv > 0 ? 1 : -1);
961 }
962
963 #ifdef illumos
964 static void
965 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
966     diskaddr_t size, uint_t blksz)
967 {
968         rdsk_node_t tmpnode;
969         rdsk_node_t *node;
970         char sname[MAXNAMELEN];
971
972         tmpnode.rn_name = &sname[0];
973         (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
974             diskname, partno);
975         /*
976          * protect against division by zero for disk labels that
977          * contain a bogus sector size
978          */
979         if (blksz == 0)
980                 blksz = DEV_BSIZE;
981         /* too small to contain a zpool? */
982         if ((size < (SPA_MINDEVSIZE / blksz)) &&
983             (node = avl_find(r, &tmpnode, NULL)))
984                 node->rn_nozpool = B_TRUE;
985 }
986 #endif  /* illumos */
987
988 static void
989 nozpool_all_slices(avl_tree_t *r, const char *sname)
990 {
991 #ifdef illumos
992         char diskname[MAXNAMELEN];
993         char *ptr;
994         int i;
995
996         (void) strncpy(diskname, sname, MAXNAMELEN);
997         if (((ptr = strrchr(diskname, 's')) == NULL) &&
998             ((ptr = strrchr(diskname, 'p')) == NULL))
999                 return;
1000         ptr[0] = 's';
1001         ptr[1] = '\0';
1002         for (i = 0; i < NDKMAP; i++)
1003                 check_one_slice(r, diskname, i, 0, 1);
1004         ptr[0] = 'p';
1005         for (i = 0; i <= FD_NUMPART; i++)
1006                 check_one_slice(r, diskname, i, 0, 1);
1007 #endif  /* illumos */
1008 }
1009
1010 #ifdef illumos
1011 static void
1012 check_slices(avl_tree_t *r, int fd, const char *sname)
1013 {
1014         struct extvtoc vtoc;
1015         struct dk_gpt *gpt;
1016         char diskname[MAXNAMELEN];
1017         char *ptr;
1018         int i;
1019
1020         (void) strncpy(diskname, sname, MAXNAMELEN);
1021         if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1022                 return;
1023         ptr[1] = '\0';
1024
1025         if (read_extvtoc(fd, &vtoc) >= 0) {
1026                 for (i = 0; i < NDKMAP; i++)
1027                         check_one_slice(r, diskname, i,
1028                             vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1029         } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1030                 /*
1031                  * on x86 we'll still have leftover links that point
1032                  * to slices s[9-15], so use NDKMAP instead
1033                  */
1034                 for (i = 0; i < NDKMAP; i++)
1035                         check_one_slice(r, diskname, i,
1036                             gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1037                 /* nodes p[1-4] are never used with EFI labels */
1038                 ptr[0] = 'p';
1039                 for (i = 1; i <= FD_NUMPART; i++)
1040                         check_one_slice(r, diskname, i, 0, 1);
1041                 efi_free(gpt);
1042         }
1043 }
1044 #endif  /* illumos */
1045
1046 static void
1047 zpool_open_func(void *arg)
1048 {
1049         rdsk_node_t *rn = arg;
1050         struct stat64 statbuf;
1051         nvlist_t *config;
1052         int fd;
1053
1054         if (rn->rn_nozpool)
1055                 return;
1056         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1057                 /* symlink to a device that's no longer there */
1058                 if (errno == ENOENT)
1059                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1060                 return;
1061         }
1062         /*
1063          * Ignore failed stats.  We only want regular
1064          * files, character devs and block devs.
1065          */
1066         if (fstat64(fd, &statbuf) != 0 ||
1067             (!S_ISREG(statbuf.st_mode) &&
1068             !S_ISCHR(statbuf.st_mode) &&
1069             !S_ISBLK(statbuf.st_mode))) {
1070                 (void) close(fd);
1071                 return;
1072         }
1073         /* this file is too small to hold a zpool */
1074 #ifdef illumos
1075         if (S_ISREG(statbuf.st_mode) &&
1076             statbuf.st_size < SPA_MINDEVSIZE) {
1077                 (void) close(fd);
1078                 return;
1079         } else if (!S_ISREG(statbuf.st_mode)) {
1080                 /*
1081                  * Try to read the disk label first so we don't have to
1082                  * open a bunch of minor nodes that can't have a zpool.
1083                  */
1084                 check_slices(rn->rn_avl, fd, rn->rn_name);
1085         }
1086 #else   /* !illumos */
1087         if (statbuf.st_size < SPA_MINDEVSIZE) {
1088                 (void) close(fd);
1089                 return;
1090         }
1091 #endif  /* illumos */
1092
1093         if ((zpool_read_label(fd, &config)) != 0 && errno == ENOMEM) {
1094                 (void) close(fd);
1095                 (void) no_memory(rn->rn_hdl);
1096                 return;
1097         }
1098         (void) close(fd);
1099
1100         rn->rn_config = config;
1101 }
1102
1103 /*
1104  * Given a file descriptor, clear (zero) the label information.
1105  */
1106 int
1107 zpool_clear_label(int fd)
1108 {
1109         struct stat64 statbuf;
1110         int l;
1111         vdev_label_t *label;
1112         uint64_t size;
1113
1114         if (fstat64(fd, &statbuf) == -1)
1115                 return (0);
1116         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1117
1118         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1119                 return (-1);
1120
1121         for (l = 0; l < VDEV_LABELS; l++) {
1122                 if (pwrite64(fd, label, sizeof (vdev_label_t),
1123                     label_offset(size, l)) != sizeof (vdev_label_t)) {
1124                         free(label);
1125                         return (-1);
1126                 }
1127         }
1128
1129         free(label);
1130         return (0);
1131 }
1132
1133 /*
1134  * Given a list of directories to search, find all pools stored on disk.  This
1135  * includes partial pools which are not available to import.  If no args are
1136  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1137  * poolname or guid (but not both) are provided by the caller when trying
1138  * to import a specific pool.
1139  */
1140 static nvlist_t *
1141 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1142 {
1143         int i, dirs = iarg->paths;
1144         struct dirent64 *dp;
1145         char path[MAXPATHLEN];
1146         char *end, **dir = iarg->path;
1147         size_t pathleft;
1148         nvlist_t *ret = NULL;
1149         static char *default_dir = "/dev";
1150         pool_list_t pools = { 0 };
1151         pool_entry_t *pe, *penext;
1152         vdev_entry_t *ve, *venext;
1153         config_entry_t *ce, *cenext;
1154         name_entry_t *ne, *nenext;
1155         avl_tree_t slice_cache;
1156         rdsk_node_t *slice;
1157         void *cookie;
1158
1159         if (dirs == 0) {
1160                 dirs = 1;
1161                 dir = &default_dir;
1162         }
1163
1164         /*
1165          * Go through and read the label configuration information from every
1166          * possible device, organizing the information according to pool GUID
1167          * and toplevel GUID.
1168          */
1169         for (i = 0; i < dirs; i++) {
1170                 tpool_t *t;
1171                 char rdsk[MAXPATHLEN];
1172                 int dfd;
1173                 boolean_t config_failed = B_FALSE;
1174                 DIR *dirp;
1175
1176                 /* use realpath to normalize the path */
1177                 if (realpath(dir[i], path) == 0) {
1178                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1179                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1180                         goto error;
1181                 }
1182                 end = &path[strlen(path)];
1183                 *end++ = '/';
1184                 *end = 0;
1185                 pathleft = &path[sizeof (path)] - end;
1186
1187 #ifdef illumos
1188                 /*
1189                  * Using raw devices instead of block devices when we're
1190                  * reading the labels skips a bunch of slow operations during
1191                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1192                  */
1193                 if (strcmp(path, ZFS_DISK_ROOTD) == 0)
1194                         (void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
1195                 else
1196 #endif
1197                         (void) strlcpy(rdsk, path, sizeof (rdsk));
1198
1199                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1200                     (dirp = fdopendir(dfd)) == NULL) {
1201                         if (dfd >= 0)
1202                                 (void) close(dfd);
1203                         zfs_error_aux(hdl, strerror(errno));
1204                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1205                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1206                             rdsk);
1207                         goto error;
1208                 }
1209
1210                 avl_create(&slice_cache, slice_cache_compare,
1211                     sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1212
1213                 if (strcmp(rdsk, "/dev/") == 0) {
1214                         struct gmesh mesh;
1215                         struct gclass *mp;
1216                         struct ggeom *gp;
1217                         struct gprovider *pp;
1218
1219                         errno = geom_gettree(&mesh);
1220                         if (errno != 0) {
1221                                 zfs_error_aux(hdl, strerror(errno));
1222                                 (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1223                                     dgettext(TEXT_DOMAIN, "cannot get GEOM tree"));
1224                                 goto error;
1225                         }
1226
1227                         LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
1228                                 LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
1229                                         LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
1230                                                 slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1231                                                 slice->rn_name = zfs_strdup(hdl, pp->lg_name);
1232                                                 slice->rn_avl = &slice_cache;
1233                                                 slice->rn_dfd = dfd;
1234                                                 slice->rn_hdl = hdl;
1235                                                 slice->rn_nozpool = B_FALSE;
1236                                                 avl_add(&slice_cache, slice);
1237                                         }
1238                                 }
1239                         }
1240
1241                         geom_deletetree(&mesh);
1242                         goto skipdir;
1243                 }
1244
1245                 /*
1246                  * This is not MT-safe, but we have no MT consumers of libzfs
1247                  */
1248                 while ((dp = readdir64(dirp)) != NULL) {
1249                         const char *name = dp->d_name;
1250                         if (name[0] == '.' &&
1251                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1252                                 continue;
1253
1254                         slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1255                         slice->rn_name = zfs_strdup(hdl, name);
1256                         slice->rn_avl = &slice_cache;
1257                         slice->rn_dfd = dfd;
1258                         slice->rn_hdl = hdl;
1259                         slice->rn_nozpool = B_FALSE;
1260                         avl_add(&slice_cache, slice);
1261                 }
1262 skipdir:
1263                 /*
1264                  * create a thread pool to do all of this in parallel;
1265                  * rn_nozpool is not protected, so this is racy in that
1266                  * multiple tasks could decide that the same slice can
1267                  * not hold a zpool, which is benign.  Also choose
1268                  * double the number of processors; we hold a lot of
1269                  * locks in the kernel, so going beyond this doesn't
1270                  * buy us much.
1271                  */
1272                 t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1273                     0, NULL);
1274                 for (slice = avl_first(&slice_cache); slice;
1275                     (slice = avl_walk(&slice_cache, slice,
1276                     AVL_AFTER)))
1277                         (void) tpool_dispatch(t, zpool_open_func, slice);
1278                 tpool_wait(t);
1279                 tpool_destroy(t);
1280
1281                 cookie = NULL;
1282                 while ((slice = avl_destroy_nodes(&slice_cache,
1283                     &cookie)) != NULL) {
1284                         if (slice->rn_config != NULL && !config_failed) {
1285                                 nvlist_t *config = slice->rn_config;
1286                                 boolean_t matched = B_TRUE;
1287
1288                                 if (iarg->poolname != NULL) {
1289                                         char *pname;
1290
1291                                         matched = nvlist_lookup_string(config,
1292                                             ZPOOL_CONFIG_POOL_NAME,
1293                                             &pname) == 0 &&
1294                                             strcmp(iarg->poolname, pname) == 0;
1295                                 } else if (iarg->guid != 0) {
1296                                         uint64_t this_guid;
1297
1298                                         matched = nvlist_lookup_uint64(config,
1299                                             ZPOOL_CONFIG_POOL_GUID,
1300                                             &this_guid) == 0 &&
1301                                             iarg->guid == this_guid;
1302                                 }
1303                                 if (!matched) {
1304                                         nvlist_free(config);
1305                                 } else {
1306                                         /*
1307                                          * use the non-raw path for the config
1308                                          */
1309                                         (void) strlcpy(end, slice->rn_name,
1310                                             pathleft);
1311                                         if (add_config(hdl, &pools, path,
1312                                             config) != 0)
1313                                                 config_failed = B_TRUE;
1314                                 }
1315                         }
1316                         free(slice->rn_name);
1317                         free(slice);
1318                 }
1319                 avl_destroy(&slice_cache);
1320
1321                 (void) closedir(dirp);
1322
1323                 if (config_failed)
1324                         goto error;
1325         }
1326
1327         ret = get_configs(hdl, &pools, iarg->can_be_active);
1328
1329 error:
1330         for (pe = pools.pools; pe != NULL; pe = penext) {
1331                 penext = pe->pe_next;
1332                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1333                         venext = ve->ve_next;
1334                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1335                                 cenext = ce->ce_next;
1336                                 nvlist_free(ce->ce_config);
1337                                 free(ce);
1338                         }
1339                         free(ve);
1340                 }
1341                 free(pe);
1342         }
1343
1344         for (ne = pools.names; ne != NULL; ne = nenext) {
1345                 nenext = ne->ne_next;
1346                 free(ne->ne_name);
1347                 free(ne);
1348         }
1349
1350         return (ret);
1351 }
1352
1353 nvlist_t *
1354 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1355 {
1356         importargs_t iarg = { 0 };
1357
1358         iarg.paths = argc;
1359         iarg.path = argv;
1360
1361         return (zpool_find_import_impl(hdl, &iarg));
1362 }
1363
1364 /*
1365  * Given a cache file, return the contents as a list of importable pools.
1366  * poolname or guid (but not both) are provided by the caller when trying
1367  * to import a specific pool.
1368  */
1369 nvlist_t *
1370 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1371     char *poolname, uint64_t guid)
1372 {
1373         char *buf;
1374         int fd;
1375         struct stat64 statbuf;
1376         nvlist_t *raw, *src, *dst;
1377         nvlist_t *pools;
1378         nvpair_t *elem;
1379         char *name;
1380         uint64_t this_guid;
1381         boolean_t active;
1382
1383         verify(poolname == NULL || guid == 0);
1384
1385         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1386                 zfs_error_aux(hdl, "%s", strerror(errno));
1387                 (void) zfs_error(hdl, EZFS_BADCACHE,
1388                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1389                 return (NULL);
1390         }
1391
1392         if (fstat64(fd, &statbuf) != 0) {
1393                 zfs_error_aux(hdl, "%s", strerror(errno));
1394                 (void) close(fd);
1395                 (void) zfs_error(hdl, EZFS_BADCACHE,
1396                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1397                 return (NULL);
1398         }
1399
1400         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1401                 (void) close(fd);
1402                 return (NULL);
1403         }
1404
1405         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1406                 (void) close(fd);
1407                 free(buf);
1408                 (void) zfs_error(hdl, EZFS_BADCACHE,
1409                     dgettext(TEXT_DOMAIN,
1410                     "failed to read cache file contents"));
1411                 return (NULL);
1412         }
1413
1414         (void) close(fd);
1415
1416         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1417                 free(buf);
1418                 (void) zfs_error(hdl, EZFS_BADCACHE,
1419                     dgettext(TEXT_DOMAIN,
1420                     "invalid or corrupt cache file contents"));
1421                 return (NULL);
1422         }
1423
1424         free(buf);
1425
1426         /*
1427          * Go through and get the current state of the pools and refresh their
1428          * state.
1429          */
1430         if (nvlist_alloc(&pools, 0, 0) != 0) {
1431                 (void) no_memory(hdl);
1432                 nvlist_free(raw);
1433                 return (NULL);
1434         }
1435
1436         elem = NULL;
1437         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1438                 src = fnvpair_value_nvlist(elem);
1439
1440                 name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
1441                 if (poolname != NULL && strcmp(poolname, name) != 0)
1442                         continue;
1443
1444                 this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
1445                 if (guid != 0 && guid != this_guid)
1446                         continue;
1447
1448                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1449                         nvlist_free(raw);
1450                         nvlist_free(pools);
1451                         return (NULL);
1452                 }
1453
1454                 if (active)
1455                         continue;
1456
1457                 if ((dst = refresh_config(hdl, src)) == NULL) {
1458                         nvlist_free(raw);
1459                         nvlist_free(pools);
1460                         return (NULL);
1461                 }
1462
1463                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1464                         (void) no_memory(hdl);
1465                         nvlist_free(dst);
1466                         nvlist_free(raw);
1467                         nvlist_free(pools);
1468                         return (NULL);
1469                 }
1470                 nvlist_free(dst);
1471         }
1472
1473         nvlist_free(raw);
1474         return (pools);
1475 }
1476
1477 static int
1478 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1479 {
1480         importargs_t *import = data;
1481         int found = 0;
1482
1483         if (import->poolname != NULL) {
1484                 char *pool_name;
1485
1486                 verify(nvlist_lookup_string(zhp->zpool_config,
1487                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1488                 if (strcmp(pool_name, import->poolname) == 0)
1489                         found = 1;
1490         } else {
1491                 uint64_t pool_guid;
1492
1493                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1494                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1495                 if (pool_guid == import->guid)
1496                         found = 1;
1497         }
1498
1499         zpool_close(zhp);
1500         return (found);
1501 }
1502
1503 nvlist_t *
1504 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1505 {
1506         verify(import->poolname == NULL || import->guid == 0);
1507
1508         if (import->unique)
1509                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1510
1511         if (import->cachefile != NULL)
1512                 return (zpool_find_import_cached(hdl, import->cachefile,
1513                     import->poolname, import->guid));
1514
1515         return (zpool_find_import_impl(hdl, import));
1516 }
1517
1518 boolean_t
1519 find_guid(nvlist_t *nv, uint64_t guid)
1520 {
1521         uint64_t tmp;
1522         nvlist_t **child;
1523         uint_t c, children;
1524
1525         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1526         if (tmp == guid)
1527                 return (B_TRUE);
1528
1529         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1530             &child, &children) == 0) {
1531                 for (c = 0; c < children; c++)
1532                         if (find_guid(child[c], guid))
1533                                 return (B_TRUE);
1534         }
1535
1536         return (B_FALSE);
1537 }
1538
1539 typedef struct aux_cbdata {
1540         const char      *cb_type;
1541         uint64_t        cb_guid;
1542         zpool_handle_t  *cb_zhp;
1543 } aux_cbdata_t;
1544
1545 static int
1546 find_aux(zpool_handle_t *zhp, void *data)
1547 {
1548         aux_cbdata_t *cbp = data;
1549         nvlist_t **list;
1550         uint_t i, count;
1551         uint64_t guid;
1552         nvlist_t *nvroot;
1553
1554         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1555             &nvroot) == 0);
1556
1557         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1558             &list, &count) == 0) {
1559                 for (i = 0; i < count; i++) {
1560                         verify(nvlist_lookup_uint64(list[i],
1561                             ZPOOL_CONFIG_GUID, &guid) == 0);
1562                         if (guid == cbp->cb_guid) {
1563                                 cbp->cb_zhp = zhp;
1564                                 return (1);
1565                         }
1566                 }
1567         }
1568
1569         zpool_close(zhp);
1570         return (0);
1571 }
1572
1573 /*
1574  * Determines if the pool is in use.  If so, it returns true and the state of
1575  * the pool as well as the name of the pool.  Both strings are allocated and
1576  * must be freed by the caller.
1577  */
1578 int
1579 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1580     boolean_t *inuse)
1581 {
1582         nvlist_t *config;
1583         char *name;
1584         boolean_t ret;
1585         uint64_t guid, vdev_guid;
1586         zpool_handle_t *zhp;
1587         nvlist_t *pool_config;
1588         uint64_t stateval, isspare;
1589         aux_cbdata_t cb = { 0 };
1590         boolean_t isactive;
1591
1592         *inuse = B_FALSE;
1593
1594         if (zpool_read_label(fd, &config) != 0 && errno == ENOMEM) {
1595                 (void) no_memory(hdl);
1596                 return (-1);
1597         }
1598
1599         if (config == NULL)
1600                 return (0);
1601
1602         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1603             &stateval) == 0);
1604         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1605             &vdev_guid) == 0);
1606
1607         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1608                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1609                     &name) == 0);
1610                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1611                     &guid) == 0);
1612         }
1613
1614         switch (stateval) {
1615         case POOL_STATE_EXPORTED:
1616                 /*
1617                  * A pool with an exported state may in fact be imported
1618                  * read-only, so check the in-core state to see if it's
1619                  * active and imported read-only.  If it is, set
1620                  * its state to active.
1621                  */
1622                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1623                     (zhp = zpool_open_canfail(hdl, name)) != NULL) {
1624                         if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1625                                 stateval = POOL_STATE_ACTIVE;
1626
1627                         /*
1628                          * All we needed the zpool handle for is the
1629                          * readonly prop check.
1630                          */
1631                         zpool_close(zhp);
1632                 }
1633
1634                 ret = B_TRUE;
1635                 break;
1636
1637         case POOL_STATE_ACTIVE:
1638                 /*
1639                  * For an active pool, we have to determine if it's really part
1640                  * of a currently active pool (in which case the pool will exist
1641                  * and the guid will be the same), or whether it's part of an
1642                  * active pool that was disconnected without being explicitly
1643                  * exported.
1644                  */
1645                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1646                         nvlist_free(config);
1647                         return (-1);
1648                 }
1649
1650                 if (isactive) {
1651                         /*
1652                          * Because the device may have been removed while
1653                          * offlined, we only report it as active if the vdev is
1654                          * still present in the config.  Otherwise, pretend like
1655                          * it's not in use.
1656                          */
1657                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1658                             (pool_config = zpool_get_config(zhp, NULL))
1659                             != NULL) {
1660                                 nvlist_t *nvroot;
1661
1662                                 verify(nvlist_lookup_nvlist(pool_config,
1663                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1664                                 ret = find_guid(nvroot, vdev_guid);
1665                         } else {
1666                                 ret = B_FALSE;
1667                         }
1668
1669                         /*
1670                          * If this is an active spare within another pool, we
1671                          * treat it like an unused hot spare.  This allows the
1672                          * user to create a pool with a hot spare that currently
1673                          * in use within another pool.  Since we return B_TRUE,
1674                          * libdiskmgt will continue to prevent generic consumers
1675                          * from using the device.
1676                          */
1677                         if (ret && nvlist_lookup_uint64(config,
1678                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1679                                 stateval = POOL_STATE_SPARE;
1680
1681                         if (zhp != NULL)
1682                                 zpool_close(zhp);
1683                 } else {
1684                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1685                         ret = B_TRUE;
1686                 }
1687                 break;
1688
1689         case POOL_STATE_SPARE:
1690                 /*
1691                  * For a hot spare, it can be either definitively in use, or
1692                  * potentially active.  To determine if it's in use, we iterate
1693                  * over all pools in the system and search for one with a spare
1694                  * with a matching guid.
1695                  *
1696                  * Due to the shared nature of spares, we don't actually report
1697                  * the potentially active case as in use.  This means the user
1698                  * can freely create pools on the hot spares of exported pools,
1699                  * but to do otherwise makes the resulting code complicated, and
1700                  * we end up having to deal with this case anyway.
1701                  */
1702                 cb.cb_zhp = NULL;
1703                 cb.cb_guid = vdev_guid;
1704                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1705                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1706                         name = (char *)zpool_get_name(cb.cb_zhp);
1707                         ret = B_TRUE;
1708                 } else {
1709                         ret = B_FALSE;
1710                 }
1711                 break;
1712
1713         case POOL_STATE_L2CACHE:
1714
1715                 /*
1716                  * Check if any pool is currently using this l2cache device.
1717                  */
1718                 cb.cb_zhp = NULL;
1719                 cb.cb_guid = vdev_guid;
1720                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1721                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1722                         name = (char *)zpool_get_name(cb.cb_zhp);
1723                         ret = B_TRUE;
1724                 } else {
1725                         ret = B_FALSE;
1726                 }
1727                 break;
1728
1729         default:
1730                 ret = B_FALSE;
1731         }
1732
1733
1734         if (ret) {
1735                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1736                         if (cb.cb_zhp)
1737                                 zpool_close(cb.cb_zhp);
1738                         nvlist_free(config);
1739                         return (-1);
1740                 }
1741                 *state = (pool_state_t)stateval;
1742         }
1743
1744         if (cb.cb_zhp)
1745                 zpool_close(cb.cb_zhp);
1746
1747         nvlist_free(config);
1748         *inuse = ret;
1749         return (0);
1750 }