]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libzfs/libzfs_import.c
Illumos 5518 - Memory leaks in libzfs import implementation
[FreeBSD/FreeBSD.git] / lib / libzfs / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012 by Delphix. All rights reserved.
25  */
26
27 /*
28  * Pool import support functions.
29  *
30  * To import a pool, we rely on reading the configuration information from the
31  * ZFS label of each device.  If we successfully read the label, then we
32  * organize the configuration information in the following hierarchy:
33  *
34  *      pool guid -> toplevel vdev guid -> label txg
35  *
36  * Duplicate entries matching this same tuple will be discarded.  Once we have
37  * examined every device, we pick the best label txg config for each toplevel
38  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
39  * update any paths that have changed.  Finally, we attempt to import the pool
40  * using our derived config, and record the results.
41  */
42
43 #include <ctype.h>
44 #include <devid.h>
45 #include <dirent.h>
46 #include <errno.h>
47 #include <libintl.h>
48 #include <stddef.h>
49 #include <stdlib.h>
50 #include <string.h>
51 #include <sys/stat.h>
52 #include <unistd.h>
53 #include <fcntl.h>
54 #include <sys/vtoc.h>
55 #include <sys/dktp/fdisk.h>
56 #include <sys/efi_partition.h>
57
58 #include <sys/vdev_impl.h>
59 #ifdef HAVE_LIBBLKID
60 #include <blkid/blkid.h>
61 #endif
62
63 #include "libzfs.h"
64 #include "libzfs_impl.h"
65
66 /*
67  * Intermediate structures used to gather configuration information.
68  */
69 typedef struct config_entry {
70         uint64_t                ce_txg;
71         nvlist_t                *ce_config;
72         struct config_entry     *ce_next;
73 } config_entry_t;
74
75 typedef struct vdev_entry {
76         uint64_t                ve_guid;
77         config_entry_t          *ve_configs;
78         struct vdev_entry       *ve_next;
79 } vdev_entry_t;
80
81 typedef struct pool_entry {
82         uint64_t                pe_guid;
83         vdev_entry_t            *pe_vdevs;
84         struct pool_entry       *pe_next;
85 } pool_entry_t;
86
87 typedef struct name_entry {
88         char                    *ne_name;
89         uint64_t                ne_guid;
90         uint64_t                ne_order;
91         uint64_t                ne_num_labels;
92         struct name_entry       *ne_next;
93 } name_entry_t;
94
95 typedef struct pool_list {
96         pool_entry_t            *pools;
97         name_entry_t            *names;
98 } pool_list_t;
99
100 static char *
101 get_devid(const char *path)
102 {
103         int fd;
104         ddi_devid_t devid;
105         char *minor, *ret;
106
107         if ((fd = open(path, O_RDONLY)) < 0)
108                 return (NULL);
109
110         minor = NULL;
111         ret = NULL;
112         if (devid_get(fd, &devid) == 0) {
113                 if (devid_get_minor_name(fd, &minor) == 0)
114                         ret = devid_str_encode(devid, minor);
115                 if (minor != NULL)
116                         devid_str_free(minor);
117                 devid_free(devid);
118         }
119         (void) close(fd);
120
121         return (ret);
122 }
123
124
125 /*
126  * Go through and fix up any path and/or devid information for the given vdev
127  * configuration.
128  */
129 static int
130 fix_paths(nvlist_t *nv, name_entry_t *names)
131 {
132         nvlist_t **child;
133         uint_t c, children;
134         uint64_t guid;
135         name_entry_t *ne, *best;
136         char *path, *devid;
137
138         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
139             &child, &children) == 0) {
140                 for (c = 0; c < children; c++)
141                         if (fix_paths(child[c], names) != 0)
142                                 return (-1);
143                 return (0);
144         }
145
146         /*
147          * This is a leaf (file or disk) vdev.  In either case, go through
148          * the name list and see if we find a matching guid.  If so, replace
149          * the path and see if we can calculate a new devid.
150          *
151          * There may be multiple names associated with a particular guid, in
152          * which case we have overlapping partitions or multiple paths to the
153          * same disk.  In this case we prefer to use the path name which
154          * matches the ZPOOL_CONFIG_PATH.  If no matching entry is found we
155          * use the lowest order device which corresponds to the first match
156          * while traversing the ZPOOL_IMPORT_PATH search path.
157          */
158         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
159         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
160                 path = NULL;
161
162         best = NULL;
163         for (ne = names; ne != NULL; ne = ne->ne_next) {
164                 if (ne->ne_guid == guid) {
165
166                         if (path == NULL) {
167                                 best = ne;
168                                 break;
169                         }
170
171                         if ((strlen(path) == strlen(ne->ne_name)) &&
172                             strncmp(path, ne->ne_name, strlen(path)) == 0) {
173                                 best = ne;
174                                 break;
175                         }
176
177                         if (best == NULL) {
178                                 best = ne;
179                                 continue;
180                         }
181
182                         /* Prefer paths with move vdev labels. */
183                         if (ne->ne_num_labels > best->ne_num_labels) {
184                                 best = ne;
185                                 continue;
186                         }
187
188                         /* Prefer paths earlier in the search order. */
189                         if (best->ne_num_labels == best->ne_num_labels &&
190                             ne->ne_order < best->ne_order) {
191                                 best = ne;
192                                 continue;
193                         }
194                 }
195         }
196
197         if (best == NULL)
198                 return (0);
199
200         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
201                 return (-1);
202
203         if ((devid = get_devid(best->ne_name)) == NULL) {
204                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
205         } else {
206                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) {
207                         devid_str_free(devid);
208                         return (-1);
209                 }
210                 devid_str_free(devid);
211         }
212
213         return (0);
214 }
215
216 /*
217  * Add the given configuration to the list of known devices.
218  */
219 static int
220 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
221     int order, int num_labels, nvlist_t *config)
222 {
223         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
224         pool_entry_t *pe;
225         vdev_entry_t *ve;
226         config_entry_t *ce;
227         name_entry_t *ne;
228
229         /*
230          * If this is a hot spare not currently in use or level 2 cache
231          * device, add it to the list of names to translate, but don't do
232          * anything else.
233          */
234         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
235             &state) == 0 &&
236             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
237             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
238                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
239                         return (-1);
240
241                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
242                         free(ne);
243                         return (-1);
244                 }
245                 ne->ne_guid = vdev_guid;
246                 ne->ne_order = order;
247                 ne->ne_num_labels = num_labels;
248                 ne->ne_next = pl->names;
249                 pl->names = ne;
250                 return (0);
251         }
252
253         /*
254          * If we have a valid config but cannot read any of these fields, then
255          * it means we have a half-initialized label.  In vdev_label_init()
256          * we write a label with txg == 0 so that we can identify the device
257          * in case the user refers to the same disk later on.  If we fail to
258          * create the pool, we'll be left with a label in this state
259          * which should not be considered part of a valid pool.
260          */
261         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
262             &pool_guid) != 0 ||
263             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
264             &vdev_guid) != 0 ||
265             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
266             &top_guid) != 0 ||
267             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
268             &txg) != 0 || txg == 0) {
269                 nvlist_free(config);
270                 return (0);
271         }
272
273         /*
274          * First, see if we know about this pool.  If not, then add it to the
275          * list of known pools.
276          */
277         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
278                 if (pe->pe_guid == pool_guid)
279                         break;
280         }
281
282         if (pe == NULL) {
283                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
284                         nvlist_free(config);
285                         return (-1);
286                 }
287                 pe->pe_guid = pool_guid;
288                 pe->pe_next = pl->pools;
289                 pl->pools = pe;
290         }
291
292         /*
293          * Second, see if we know about this toplevel vdev.  Add it if its
294          * missing.
295          */
296         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
297                 if (ve->ve_guid == top_guid)
298                         break;
299         }
300
301         if (ve == NULL) {
302                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
303                         nvlist_free(config);
304                         return (-1);
305                 }
306                 ve->ve_guid = top_guid;
307                 ve->ve_next = pe->pe_vdevs;
308                 pe->pe_vdevs = ve;
309         }
310
311         /*
312          * Third, see if we have a config with a matching transaction group.  If
313          * so, then we do nothing.  Otherwise, add it to the list of known
314          * configs.
315          */
316         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
317                 if (ce->ce_txg == txg)
318                         break;
319         }
320
321         if (ce == NULL) {
322                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
323                         nvlist_free(config);
324                         return (-1);
325                 }
326                 ce->ce_txg = txg;
327                 ce->ce_config = config;
328                 ce->ce_next = ve->ve_configs;
329                 ve->ve_configs = ce;
330         } else {
331                 nvlist_free(config);
332         }
333
334         /*
335          * At this point we've successfully added our config to the list of
336          * known configs.  The last thing to do is add the vdev guid -> path
337          * mappings so that we can fix up the configuration as necessary before
338          * doing the import.
339          */
340         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
341                 return (-1);
342
343         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
344                 free(ne);
345                 return (-1);
346         }
347
348         ne->ne_guid = vdev_guid;
349         ne->ne_order = order;
350         ne->ne_num_labels = num_labels;
351         ne->ne_next = pl->names;
352         pl->names = ne;
353
354         return (0);
355 }
356
357 /*
358  * Returns true if the named pool matches the given GUID.
359  */
360 static int
361 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
362     boolean_t *isactive)
363 {
364         zpool_handle_t *zhp;
365         uint64_t theguid;
366
367         if (zpool_open_silent(hdl, name, &zhp) != 0)
368                 return (-1);
369
370         if (zhp == NULL) {
371                 *isactive = B_FALSE;
372                 return (0);
373         }
374
375         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
376             &theguid) == 0);
377
378         zpool_close(zhp);
379
380         *isactive = (theguid == guid);
381         return (0);
382 }
383
384 static nvlist_t *
385 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
386 {
387         nvlist_t *nvl;
388         zfs_cmd_t zc = {"\0"};
389         int err;
390
391         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
392                 return (NULL);
393
394         if (zcmd_alloc_dst_nvlist(hdl, &zc,
395             zc.zc_nvlist_conf_size * 2) != 0) {
396                 zcmd_free_nvlists(&zc);
397                 return (NULL);
398         }
399
400         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
401             &zc)) != 0 && errno == ENOMEM) {
402                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
403                         zcmd_free_nvlists(&zc);
404                         return (NULL);
405                 }
406         }
407
408         if (err) {
409                 zcmd_free_nvlists(&zc);
410                 return (NULL);
411         }
412
413         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
414                 zcmd_free_nvlists(&zc);
415                 return (NULL);
416         }
417
418         zcmd_free_nvlists(&zc);
419         return (nvl);
420 }
421
422 /*
423  * Determine if the vdev id is a hole in the namespace.
424  */
425 boolean_t
426 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
427 {
428         int c;
429
430         for (c = 0; c < holes; c++) {
431
432                 /* Top-level is a hole */
433                 if (hole_array[c] == id)
434                         return (B_TRUE);
435         }
436         return (B_FALSE);
437 }
438
439 /*
440  * Convert our list of pools into the definitive set of configurations.  We
441  * start by picking the best config for each toplevel vdev.  Once that's done,
442  * we assemble the toplevel vdevs into a full config for the pool.  We make a
443  * pass to fix up any incorrect paths, and then add it to the main list to
444  * return to the user.
445  */
446 static nvlist_t *
447 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
448 {
449         pool_entry_t *pe;
450         vdev_entry_t *ve;
451         config_entry_t *ce;
452         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
453         nvlist_t **spares, **l2cache;
454         uint_t i, nspares, nl2cache;
455         boolean_t config_seen;
456         uint64_t best_txg;
457         char *name, *hostname = NULL;
458         uint64_t guid;
459         uint_t children = 0;
460         nvlist_t **child = NULL;
461         uint_t holes;
462         uint64_t *hole_array, max_id;
463         uint_t c;
464         boolean_t isactive;
465         uint64_t hostid;
466         nvlist_t *nvl;
467         boolean_t valid_top_config = B_FALSE;
468
469         if (nvlist_alloc(&ret, 0, 0) != 0)
470                 goto nomem;
471
472         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
473                 uint64_t id, max_txg = 0;
474
475                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
476                         goto nomem;
477                 config_seen = B_FALSE;
478
479                 /*
480                  * Iterate over all toplevel vdevs.  Grab the pool configuration
481                  * from the first one we find, and then go through the rest and
482                  * add them as necessary to the 'vdevs' member of the config.
483                  */
484                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
485
486                         /*
487                          * Determine the best configuration for this vdev by
488                          * selecting the config with the latest transaction
489                          * group.
490                          */
491                         best_txg = 0;
492                         for (ce = ve->ve_configs; ce != NULL;
493                             ce = ce->ce_next) {
494
495                                 if (ce->ce_txg > best_txg) {
496                                         tmp = ce->ce_config;
497                                         best_txg = ce->ce_txg;
498                                 }
499                         }
500
501                         /*
502                          * We rely on the fact that the max txg for the
503                          * pool will contain the most up-to-date information
504                          * about the valid top-levels in the vdev namespace.
505                          */
506                         if (best_txg > max_txg) {
507                                 (void) nvlist_remove(config,
508                                     ZPOOL_CONFIG_VDEV_CHILDREN,
509                                     DATA_TYPE_UINT64);
510                                 (void) nvlist_remove(config,
511                                     ZPOOL_CONFIG_HOLE_ARRAY,
512                                     DATA_TYPE_UINT64_ARRAY);
513
514                                 max_txg = best_txg;
515                                 hole_array = NULL;
516                                 holes = 0;
517                                 max_id = 0;
518                                 valid_top_config = B_FALSE;
519
520                                 if (nvlist_lookup_uint64(tmp,
521                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
522                                         verify(nvlist_add_uint64(config,
523                                             ZPOOL_CONFIG_VDEV_CHILDREN,
524                                             max_id) == 0);
525                                         valid_top_config = B_TRUE;
526                                 }
527
528                                 if (nvlist_lookup_uint64_array(tmp,
529                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
530                                     &holes) == 0) {
531                                         verify(nvlist_add_uint64_array(config,
532                                             ZPOOL_CONFIG_HOLE_ARRAY,
533                                             hole_array, holes) == 0);
534                                 }
535                         }
536
537                         if (!config_seen) {
538                                 /*
539                                  * Copy the relevant pieces of data to the pool
540                                  * configuration:
541                                  *
542                                  *      version
543                                  *      pool guid
544                                  *      name
545                                  *      comment (if available)
546                                  *      pool state
547                                  *      hostid (if available)
548                                  *      hostname (if available)
549                                  */
550                                 uint64_t state, version;
551                                 char *comment = NULL;
552
553                                 version = fnvlist_lookup_uint64(tmp,
554                                     ZPOOL_CONFIG_VERSION);
555                                 fnvlist_add_uint64(config,
556                                     ZPOOL_CONFIG_VERSION, version);
557                                 guid = fnvlist_lookup_uint64(tmp,
558                                     ZPOOL_CONFIG_POOL_GUID);
559                                 fnvlist_add_uint64(config,
560                                     ZPOOL_CONFIG_POOL_GUID, guid);
561                                 name = fnvlist_lookup_string(tmp,
562                                     ZPOOL_CONFIG_POOL_NAME);
563                                 fnvlist_add_string(config,
564                                     ZPOOL_CONFIG_POOL_NAME, name);
565
566                                 if (nvlist_lookup_string(tmp,
567                                     ZPOOL_CONFIG_COMMENT, &comment) == 0)
568                                         fnvlist_add_string(config,
569                                             ZPOOL_CONFIG_COMMENT, comment);
570
571                                 state = fnvlist_lookup_uint64(tmp,
572                                     ZPOOL_CONFIG_POOL_STATE);
573                                 fnvlist_add_uint64(config,
574                                     ZPOOL_CONFIG_POOL_STATE, state);
575
576                                 hostid = 0;
577                                 if (nvlist_lookup_uint64(tmp,
578                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
579                                         fnvlist_add_uint64(config,
580                                             ZPOOL_CONFIG_HOSTID, hostid);
581                                         hostname = fnvlist_lookup_string(tmp,
582                                             ZPOOL_CONFIG_HOSTNAME);
583                                         fnvlist_add_string(config,
584                                             ZPOOL_CONFIG_HOSTNAME, hostname);
585                                 }
586
587                                 config_seen = B_TRUE;
588                         }
589
590                         /*
591                          * Add this top-level vdev to the child array.
592                          */
593                         verify(nvlist_lookup_nvlist(tmp,
594                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
595                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
596                             &id) == 0);
597
598                         if (id >= children) {
599                                 nvlist_t **newchild;
600
601                                 newchild = zfs_alloc(hdl, (id + 1) *
602                                     sizeof (nvlist_t *));
603                                 if (newchild == NULL)
604                                         goto nomem;
605
606                                 for (c = 0; c < children; c++)
607                                         newchild[c] = child[c];
608
609                                 free(child);
610                                 child = newchild;
611                                 children = id + 1;
612                         }
613                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
614                                 goto nomem;
615
616                 }
617
618                 /*
619                  * If we have information about all the top-levels then
620                  * clean up the nvlist which we've constructed. This
621                  * means removing any extraneous devices that are
622                  * beyond the valid range or adding devices to the end
623                  * of our array which appear to be missing.
624                  */
625                 if (valid_top_config) {
626                         if (max_id < children) {
627                                 for (c = max_id; c < children; c++)
628                                         nvlist_free(child[c]);
629                                 children = max_id;
630                         } else if (max_id > children) {
631                                 nvlist_t **newchild;
632
633                                 newchild = zfs_alloc(hdl, (max_id) *
634                                     sizeof (nvlist_t *));
635                                 if (newchild == NULL)
636                                         goto nomem;
637
638                                 for (c = 0; c < children; c++)
639                                         newchild[c] = child[c];
640
641                                 free(child);
642                                 child = newchild;
643                                 children = max_id;
644                         }
645                 }
646
647                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
648                     &guid) == 0);
649
650                 /*
651                  * The vdev namespace may contain holes as a result of
652                  * device removal. We must add them back into the vdev
653                  * tree before we process any missing devices.
654                  */
655                 if (holes > 0) {
656                         ASSERT(valid_top_config);
657
658                         for (c = 0; c < children; c++) {
659                                 nvlist_t *holey;
660
661                                 if (child[c] != NULL ||
662                                     !vdev_is_hole(hole_array, holes, c))
663                                         continue;
664
665                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
666                                     0) != 0)
667                                         goto nomem;
668
669                                 /*
670                                  * Holes in the namespace are treated as
671                                  * "hole" top-level vdevs and have a
672                                  * special flag set on them.
673                                  */
674                                 if (nvlist_add_string(holey,
675                                     ZPOOL_CONFIG_TYPE,
676                                     VDEV_TYPE_HOLE) != 0 ||
677                                     nvlist_add_uint64(holey,
678                                     ZPOOL_CONFIG_ID, c) != 0 ||
679                                     nvlist_add_uint64(holey,
680                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
681                                         nvlist_free(holey);
682                                         goto nomem;
683                                 }
684                                 child[c] = holey;
685                         }
686                 }
687
688                 /*
689                  * Look for any missing top-level vdevs.  If this is the case,
690                  * create a faked up 'missing' vdev as a placeholder.  We cannot
691                  * simply compress the child array, because the kernel performs
692                  * certain checks to make sure the vdev IDs match their location
693                  * in the configuration.
694                  */
695                 for (c = 0; c < children; c++) {
696                         if (child[c] == NULL) {
697                                 nvlist_t *missing;
698                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
699                                     0) != 0)
700                                         goto nomem;
701                                 if (nvlist_add_string(missing,
702                                     ZPOOL_CONFIG_TYPE,
703                                     VDEV_TYPE_MISSING) != 0 ||
704                                     nvlist_add_uint64(missing,
705                                     ZPOOL_CONFIG_ID, c) != 0 ||
706                                     nvlist_add_uint64(missing,
707                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
708                                         nvlist_free(missing);
709                                         goto nomem;
710                                 }
711                                 child[c] = missing;
712                         }
713                 }
714
715                 /*
716                  * Put all of this pool's top-level vdevs into a root vdev.
717                  */
718                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
719                         goto nomem;
720                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
721                     VDEV_TYPE_ROOT) != 0 ||
722                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
723                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
724                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
725                     child, children) != 0) {
726                         nvlist_free(nvroot);
727                         goto nomem;
728                 }
729
730                 for (c = 0; c < children; c++)
731                         nvlist_free(child[c]);
732                 free(child);
733                 children = 0;
734                 child = NULL;
735
736                 /*
737                  * Go through and fix up any paths and/or devids based on our
738                  * known list of vdev GUID -> path mappings.
739                  */
740                 if (fix_paths(nvroot, pl->names) != 0) {
741                         nvlist_free(nvroot);
742                         goto nomem;
743                 }
744
745                 /*
746                  * Add the root vdev to this pool's configuration.
747                  */
748                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
749                     nvroot) != 0) {
750                         nvlist_free(nvroot);
751                         goto nomem;
752                 }
753                 nvlist_free(nvroot);
754
755                 /*
756                  * zdb uses this path to report on active pools that were
757                  * imported or created using -R.
758                  */
759                 if (active_ok)
760                         goto add_pool;
761
762                 /*
763                  * Determine if this pool is currently active, in which case we
764                  * can't actually import it.
765                  */
766                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
767                     &name) == 0);
768                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
769                     &guid) == 0);
770
771                 if (pool_active(hdl, name, guid, &isactive) != 0)
772                         goto error;
773
774                 if (isactive) {
775                         nvlist_free(config);
776                         config = NULL;
777                         continue;
778                 }
779
780                 if ((nvl = refresh_config(hdl, config)) == NULL) {
781                         nvlist_free(config);
782                         config = NULL;
783                         continue;
784                 }
785
786                 nvlist_free(config);
787                 config = nvl;
788
789                 /*
790                  * Go through and update the paths for spares, now that we have
791                  * them.
792                  */
793                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
794                     &nvroot) == 0);
795                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
796                     &spares, &nspares) == 0) {
797                         for (i = 0; i < nspares; i++) {
798                                 if (fix_paths(spares[i], pl->names) != 0)
799                                         goto nomem;
800                         }
801                 }
802
803                 /*
804                  * Update the paths for l2cache devices.
805                  */
806                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
807                     &l2cache, &nl2cache) == 0) {
808                         for (i = 0; i < nl2cache; i++) {
809                                 if (fix_paths(l2cache[i], pl->names) != 0)
810                                         goto nomem;
811                         }
812                 }
813
814                 /*
815                  * Restore the original information read from the actual label.
816                  */
817                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
818                     DATA_TYPE_UINT64);
819                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
820                     DATA_TYPE_STRING);
821                 if (hostid != 0) {
822                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
823                             hostid) == 0);
824                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
825                             hostname) == 0);
826                 }
827
828 add_pool:
829                 /*
830                  * Add this pool to the list of configs.
831                  */
832                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
833                     &name) == 0);
834                 if (nvlist_add_nvlist(ret, name, config) != 0)
835                         goto nomem;
836
837                 nvlist_free(config);
838                 config = NULL;
839         }
840
841         return (ret);
842
843 nomem:
844         (void) no_memory(hdl);
845 error:
846         nvlist_free(config);
847         nvlist_free(ret);
848         for (c = 0; c < children; c++)
849                 nvlist_free(child[c]);
850         free(child);
851
852         return (NULL);
853 }
854
855 /*
856  * Return the offset of the given label.
857  */
858 static uint64_t
859 label_offset(uint64_t size, int l)
860 {
861         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
862         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
863             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
864 }
865
866 /*
867  * Given a file descriptor, read the label information and return an nvlist
868  * describing the configuration, if there is one.  The number of valid
869  * labels found will be returned in num_labels when non-NULL.
870  */
871 int
872 zpool_read_label(int fd, nvlist_t **config, int *num_labels)
873 {
874         struct stat64 statbuf;
875         int l, count = 0;
876         vdev_label_t *label;
877         nvlist_t *expected_config = NULL;
878         uint64_t expected_guid = 0, size;
879
880         *config = NULL;
881
882         if (fstat64_blk(fd, &statbuf) == -1)
883                 return (0);
884         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
885
886         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
887                 return (-1);
888
889         for (l = 0; l < VDEV_LABELS; l++) {
890                 uint64_t state, guid, txg;
891
892                 if (pread64(fd, label, sizeof (vdev_label_t),
893                     label_offset(size, l)) != sizeof (vdev_label_t))
894                         continue;
895
896                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
897                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
898                         continue;
899
900                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
901                     &guid) != 0 || guid == 0) {
902                         nvlist_free(*config);
903                         continue;
904                 }
905
906                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
907                     &state) != 0 || state > POOL_STATE_L2CACHE) {
908                         nvlist_free(*config);
909                         continue;
910                 }
911
912                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
913                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
914                     &txg) != 0 || txg == 0)) {
915                         nvlist_free(*config);
916                         continue;
917                 }
918
919                 if (expected_guid) {
920                         if (expected_guid == guid)
921                                 count++;
922
923                         nvlist_free(*config);
924                 } else {
925                         expected_config = *config;
926                         expected_guid = guid;
927                         count++;
928                 }
929         }
930
931         if (num_labels != NULL)
932                 *num_labels = count;
933
934         free(label);
935         *config = expected_config;
936
937         return (0);
938 }
939
940 typedef struct rdsk_node {
941         char *rn_name;
942         int rn_num_labels;
943         int rn_dfd;
944         libzfs_handle_t *rn_hdl;
945         nvlist_t *rn_config;
946         avl_tree_t *rn_avl;
947         avl_node_t rn_node;
948         boolean_t rn_nozpool;
949 } rdsk_node_t;
950
951 static int
952 slice_cache_compare(const void *arg1, const void *arg2)
953 {
954         const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
955         const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
956         char *nm1slice, *nm2slice;
957         int rv;
958
959         /*
960          * partitions one and three (slices zero and two) are the most
961          * likely to provide results, so put those first
962          */
963         nm1slice = strstr(nm1, "part1");
964         nm2slice = strstr(nm2, "part1");
965         if (nm1slice && !nm2slice) {
966                 return (-1);
967         }
968         if (!nm1slice && nm2slice) {
969                 return (1);
970         }
971         nm1slice = strstr(nm1, "part3");
972         nm2slice = strstr(nm2, "part3");
973         if (nm1slice && !nm2slice) {
974                 return (-1);
975         }
976         if (!nm1slice && nm2slice) {
977                 return (1);
978         }
979
980         rv = strcmp(nm1, nm2);
981         if (rv == 0)
982                 return (0);
983         return (rv > 0 ? 1 : -1);
984 }
985
986 #ifndef __linux__
987 static void
988 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
989     diskaddr_t size, uint_t blksz)
990 {
991         rdsk_node_t tmpnode;
992         rdsk_node_t *node;
993         char sname[MAXNAMELEN];
994
995         tmpnode.rn_name = &sname[0];
996         (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
997             diskname, partno);
998         /* too small to contain a zpool? */
999         if ((size < (SPA_MINDEVSIZE / blksz)) &&
1000             (node = avl_find(r, &tmpnode, NULL)))
1001                 node->rn_nozpool = B_TRUE;
1002 }
1003 #endif
1004
1005 static void
1006 nozpool_all_slices(avl_tree_t *r, const char *sname)
1007 {
1008 #ifndef __linux__
1009         char diskname[MAXNAMELEN];
1010         char *ptr;
1011         int i;
1012
1013         (void) strncpy(diskname, sname, MAXNAMELEN);
1014         if (((ptr = strrchr(diskname, 's')) == NULL) &&
1015             ((ptr = strrchr(diskname, 'p')) == NULL))
1016                 return;
1017         ptr[0] = 's';
1018         ptr[1] = '\0';
1019         for (i = 0; i < NDKMAP; i++)
1020                 check_one_slice(r, diskname, i, 0, 1);
1021         ptr[0] = 'p';
1022         for (i = 0; i <= FD_NUMPART; i++)
1023                 check_one_slice(r, diskname, i, 0, 1);
1024 #endif
1025 }
1026
1027 static void
1028 check_slices(avl_tree_t *r, int fd, const char *sname)
1029 {
1030 #ifndef __linux__
1031         struct extvtoc vtoc;
1032         struct dk_gpt *gpt;
1033         char diskname[MAXNAMELEN];
1034         char *ptr;
1035         int i;
1036
1037         (void) strncpy(diskname, sname, MAXNAMELEN);
1038         if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1039                 return;
1040         ptr[1] = '\0';
1041
1042         if (read_extvtoc(fd, &vtoc) >= 0) {
1043                 for (i = 0; i < NDKMAP; i++)
1044                         check_one_slice(r, diskname, i,
1045                             vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1046         } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1047                 /*
1048                  * on x86 we'll still have leftover links that point
1049                  * to slices s[9-15], so use NDKMAP instead
1050                  */
1051                 for (i = 0; i < NDKMAP; i++)
1052                         check_one_slice(r, diskname, i,
1053                             gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1054                 /* nodes p[1-4] are never used with EFI labels */
1055                 ptr[0] = 'p';
1056                 for (i = 1; i <= FD_NUMPART; i++)
1057                         check_one_slice(r, diskname, i, 0, 1);
1058                 efi_free(gpt);
1059         }
1060 #endif
1061 }
1062
1063 static void
1064 zpool_open_func(void *arg)
1065 {
1066         rdsk_node_t *rn = arg;
1067         struct stat64 statbuf;
1068         nvlist_t *config;
1069         int num_labels;
1070         int fd;
1071
1072         if (rn->rn_nozpool)
1073                 return;
1074 #ifdef __linux__
1075         /*
1076          * Skip devices with well known prefixes there can be side effects
1077          * when opening devices which need to be avoided.
1078          *
1079          * core     - Symlink to /proc/kcore
1080          * fd*      - Floppy interface.
1081          * fuse     - Fuse control device.
1082          * hpet     - High Precision Event Timer
1083          * lp*      - Printer interface.
1084          * parport* - Parallel port interface.
1085          * ppp      - Generic PPP driver.
1086          * random   - Random device
1087          * rtc      - Real Time Clock
1088          * tty*     - Generic serial interface.
1089          * urandom  - Random device.
1090          * usbmon*  - USB IO monitor.
1091          * vcs*     - Virtual console memory.
1092          * watchdog - Watchdog must be closed in a special way.
1093          */
1094         if ((strncmp(rn->rn_name, "core", 4) == 0) ||
1095             (strncmp(rn->rn_name, "fd", 2) == 0) ||
1096             (strncmp(rn->rn_name, "fuse", 4) == 0) ||
1097             (strncmp(rn->rn_name, "hpet", 4) == 0) ||
1098             (strncmp(rn->rn_name, "lp", 2) == 0) ||
1099             (strncmp(rn->rn_name, "parport", 7) == 0) ||
1100             (strncmp(rn->rn_name, "ppp", 3) == 0) ||
1101             (strncmp(rn->rn_name, "random", 6) == 0) ||
1102             (strncmp(rn->rn_name, "rtc", 3) == 0) ||
1103             (strncmp(rn->rn_name, "tty", 3) == 0) ||
1104             (strncmp(rn->rn_name, "urandom", 7) == 0) ||
1105             (strncmp(rn->rn_name, "usbmon", 6) == 0) ||
1106             (strncmp(rn->rn_name, "vcs", 3) == 0) ||
1107             (strncmp(rn->rn_name, "watchdog", 8) == 0))
1108                 return;
1109
1110         /*
1111          * Ignore failed stats.  We only want regular files and block devices.
1112          */
1113         if (fstatat64(rn->rn_dfd, rn->rn_name, &statbuf, 0) != 0 ||
1114             (!S_ISREG(statbuf.st_mode) && !S_ISBLK(statbuf.st_mode)))
1115                 return;
1116
1117         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1118                 /* symlink to a device that's no longer there */
1119                 if (errno == ENOENT)
1120                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1121                 return;
1122         }
1123 #else
1124         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1125                 /* symlink to a device that's no longer there */
1126                 if (errno == ENOENT)
1127                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1128                 return;
1129         }
1130         /*
1131          * Ignore failed stats.  We only want regular
1132          * files, character devs and block devs.
1133          */
1134         if (fstat64(fd, &statbuf) != 0 ||
1135             (!S_ISREG(statbuf.st_mode) &&
1136             !S_ISCHR(statbuf.st_mode) &&
1137             !S_ISBLK(statbuf.st_mode))) {
1138                 (void) close(fd);
1139                 return;
1140         }
1141 #endif
1142         /* this file is too small to hold a zpool */
1143         if (S_ISREG(statbuf.st_mode) &&
1144             statbuf.st_size < SPA_MINDEVSIZE) {
1145                 (void) close(fd);
1146                 return;
1147         } else if (!S_ISREG(statbuf.st_mode)) {
1148                 /*
1149                  * Try to read the disk label first so we don't have to
1150                  * open a bunch of minor nodes that can't have a zpool.
1151                  */
1152                 check_slices(rn->rn_avl, fd, rn->rn_name);
1153         }
1154
1155         if ((zpool_read_label(fd, &config, &num_labels)) != 0) {
1156                 (void) close(fd);
1157                 (void) no_memory(rn->rn_hdl);
1158                 return;
1159         }
1160
1161         if (num_labels == 0) {
1162                 (void) close(fd);
1163                 nvlist_free(config);
1164                 return;
1165         }
1166
1167         (void) close(fd);
1168
1169         rn->rn_config = config;
1170         rn->rn_num_labels = num_labels;
1171         if (config != NULL) {
1172                 assert(rn->rn_nozpool == B_FALSE);
1173         }
1174 }
1175
1176 /*
1177  * Given a file descriptor, clear (zero) the label information.  This function
1178  * is used in the appliance stack as part of the ZFS sysevent module and
1179  * to implement the "zpool labelclear" command.
1180  */
1181 int
1182 zpool_clear_label(int fd)
1183 {
1184         struct stat64 statbuf;
1185         int l;
1186         vdev_label_t *label;
1187         uint64_t size;
1188
1189         if (fstat64_blk(fd, &statbuf) == -1)
1190                 return (0);
1191         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1192
1193         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1194                 return (-1);
1195
1196         for (l = 0; l < VDEV_LABELS; l++) {
1197                 if (pwrite64(fd, label, sizeof (vdev_label_t),
1198                     label_offset(size, l)) != sizeof (vdev_label_t)) {
1199                         free(label);
1200                         return (-1);
1201                 }
1202         }
1203
1204         free(label);
1205         return (0);
1206 }
1207
1208 #ifdef HAVE_LIBBLKID
1209 /*
1210  * Use libblkid to quickly search for zfs devices
1211  */
1212 static int
1213 zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
1214 {
1215         blkid_cache cache;
1216         blkid_dev_iterate iter;
1217         blkid_dev dev;
1218         const char *devname;
1219         nvlist_t *config;
1220         int fd, err, num_labels;
1221
1222         err = blkid_get_cache(&cache, NULL);
1223         if (err != 0) {
1224                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1225                     dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err);
1226                 goto err_blkid1;
1227         }
1228
1229         err = blkid_probe_all(cache);
1230         if (err != 0) {
1231                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1232                     dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err);
1233                 goto err_blkid2;
1234         }
1235
1236         iter = blkid_dev_iterate_begin(cache);
1237         if (iter == NULL) {
1238                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1239                     dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()"));
1240                 goto err_blkid2;
1241         }
1242
1243         err = blkid_dev_set_search(iter, "TYPE", "zfs_member");
1244         if (err != 0) {
1245                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1246                     dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err);
1247                 goto err_blkid3;
1248         }
1249
1250         while (blkid_dev_next(iter, &dev) == 0) {
1251                 devname = blkid_dev_devname(dev);
1252                 if ((fd = open64(devname, O_RDONLY)) < 0)
1253                         continue;
1254
1255                 err = zpool_read_label(fd, &config, &num_labels);
1256                 (void) close(fd);
1257
1258                 if (err != 0) {
1259                         (void) no_memory(hdl);
1260                         goto err_blkid3;
1261                 }
1262
1263                 if (config != NULL) {
1264                         err = add_config(hdl, pools, devname, 0,
1265                             num_labels, config);
1266                         if (err != 0)
1267                                 goto err_blkid3;
1268                 }
1269         }
1270
1271 err_blkid3:
1272         blkid_dev_iterate_end(iter);
1273 err_blkid2:
1274         blkid_put_cache(cache);
1275 err_blkid1:
1276         return (err);
1277 }
1278 #endif /* HAVE_LIBBLKID */
1279
1280 char *
1281 zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE] = {
1282         "/dev/disk/by-vdev",    /* Custom rules, use first if they exist */
1283         "/dev/mapper",          /* Use multipath devices before components */
1284         "/dev/disk/by-uuid",    /* Single unique entry and persistent */
1285         "/dev/disk/by-id",      /* May be multiple entries and persistent */
1286         "/dev/disk/by-path",    /* Encodes physical location and persistent */
1287         "/dev/disk/by-label",   /* Custom persistent labels */
1288         "/dev"                  /* UNSAFE device names will change */
1289 };
1290
1291 /*
1292  * Given a list of directories to search, find all pools stored on disk.  This
1293  * includes partial pools which are not available to import.  If no args are
1294  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1295  * poolname or guid (but not both) are provided by the caller when trying
1296  * to import a specific pool.
1297  */
1298 static nvlist_t *
1299 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1300 {
1301         int i, dirs = iarg->paths;
1302         struct dirent64 *dp;
1303         char path[MAXPATHLEN];
1304         char *end, **dir = iarg->path;
1305         size_t pathleft;
1306         nvlist_t *ret = NULL;
1307         pool_list_t pools = { 0 };
1308         pool_entry_t *pe, *penext;
1309         vdev_entry_t *ve, *venext;
1310         config_entry_t *ce, *cenext;
1311         name_entry_t *ne, *nenext;
1312         avl_tree_t slice_cache;
1313         rdsk_node_t *slice;
1314         void *cookie;
1315
1316         verify(iarg->poolname == NULL || iarg->guid == 0);
1317
1318         if (dirs == 0) {
1319 #ifdef HAVE_LIBBLKID
1320                 /* Use libblkid to scan all device for their type */
1321                 if (zpool_find_import_blkid(hdl, &pools) == 0)
1322                         goto skip_scanning;
1323
1324                 (void) zfs_error_fmt(hdl, EZFS_BADCACHE,
1325                     dgettext(TEXT_DOMAIN, "blkid failure falling back "
1326                     "to manual probing"));
1327 #endif /* HAVE_LIBBLKID */
1328
1329                 dir = zpool_default_import_path;
1330                 dirs = DEFAULT_IMPORT_PATH_SIZE;
1331         }
1332
1333         /*
1334          * Go through and read the label configuration information from every
1335          * possible device, organizing the information according to pool GUID
1336          * and toplevel GUID.
1337          */
1338         for (i = 0; i < dirs; i++) {
1339                 taskq_t *t;
1340                 char *rdsk;
1341                 int dfd;
1342                 boolean_t config_failed = B_FALSE;
1343                 DIR *dirp;
1344
1345                 /* use realpath to normalize the path */
1346                 if (realpath(dir[i], path) == 0) {
1347
1348                         /* it is safe to skip missing search paths */
1349                         if (errno == ENOENT)
1350                                 continue;
1351
1352                         zfs_error_aux(hdl, strerror(errno));
1353                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1354                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1355                         goto error;
1356                 }
1357                 end = &path[strlen(path)];
1358                 *end++ = '/';
1359                 *end = 0;
1360                 pathleft = &path[sizeof (path)] - end;
1361
1362                 /*
1363                  * Using raw devices instead of block devices when we're
1364                  * reading the labels skips a bunch of slow operations during
1365                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1366                  */
1367                 if (strcmp(path, "/dev/dsk/") == 0)
1368                         rdsk = "/dev/rdsk/";
1369                 else
1370                         rdsk = path;
1371
1372                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1373                     (dirp = fdopendir(dfd)) == NULL) {
1374                         if (dfd >= 0)
1375                                 (void) close(dfd);
1376                         zfs_error_aux(hdl, strerror(errno));
1377                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1378                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1379                             rdsk);
1380                         goto error;
1381                 }
1382
1383                 avl_create(&slice_cache, slice_cache_compare,
1384                     sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1385
1386                 /*
1387                  * This is not MT-safe, but we have no MT consumers of libzfs
1388                  */
1389                 while ((dp = readdir64(dirp)) != NULL) {
1390                         const char *name = dp->d_name;
1391                         if (name[0] == '.' &&
1392                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1393                                 continue;
1394
1395                         slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1396                         slice->rn_name = zfs_strdup(hdl, name);
1397                         slice->rn_avl = &slice_cache;
1398                         slice->rn_dfd = dfd;
1399                         slice->rn_hdl = hdl;
1400                         slice->rn_nozpool = B_FALSE;
1401                         avl_add(&slice_cache, slice);
1402                 }
1403                 /*
1404                  * create a thread pool to do all of this in parallel;
1405                  * rn_nozpool is not protected, so this is racy in that
1406                  * multiple tasks could decide that the same slice can
1407                  * not hold a zpool, which is benign.  Also choose
1408                  * double the number of processors; we hold a lot of
1409                  * locks in the kernel, so going beyond this doesn't
1410                  * buy us much.
1411                  */
1412                 thread_init();
1413                 t = taskq_create("z_import", 2 * boot_ncpus, defclsyspri,
1414                     2 * boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1415                 for (slice = avl_first(&slice_cache); slice;
1416                     (slice = avl_walk(&slice_cache, slice,
1417                     AVL_AFTER)))
1418                         (void) taskq_dispatch(t, zpool_open_func, slice,
1419                             TQ_SLEEP);
1420                 taskq_wait(t);
1421                 taskq_destroy(t);
1422                 thread_fini();
1423
1424                 cookie = NULL;
1425                 while ((slice = avl_destroy_nodes(&slice_cache,
1426                     &cookie)) != NULL) {
1427                         if (slice->rn_config != NULL && !config_failed) {
1428                                 nvlist_t *config = slice->rn_config;
1429                                 boolean_t matched = B_TRUE;
1430
1431                                 if (iarg->poolname != NULL) {
1432                                         char *pname;
1433
1434                                         matched = nvlist_lookup_string(config,
1435                                             ZPOOL_CONFIG_POOL_NAME,
1436                                             &pname) == 0 &&
1437                                             strcmp(iarg->poolname, pname) == 0;
1438                                 } else if (iarg->guid != 0) {
1439                                         uint64_t this_guid;
1440
1441                                         matched = nvlist_lookup_uint64(config,
1442                                             ZPOOL_CONFIG_POOL_GUID,
1443                                             &this_guid) == 0 &&
1444                                             iarg->guid == this_guid;
1445                                 }
1446                                 if (!matched) {
1447                                         nvlist_free(config);
1448                                 } else {
1449                                         /*
1450                                          * use the non-raw path for the config
1451                                          */
1452                                         (void) strlcpy(end, slice->rn_name,
1453                                             pathleft);
1454                                         if (add_config(hdl, &pools, path, i+1,
1455                                             slice->rn_num_labels, config) != 0)
1456                                                 config_failed = B_TRUE;
1457                                 }
1458                         }
1459                         free(slice->rn_name);
1460                         free(slice);
1461                 }
1462                 avl_destroy(&slice_cache);
1463
1464                 (void) closedir(dirp);
1465
1466                 if (config_failed)
1467                         goto error;
1468         }
1469
1470 #ifdef HAVE_LIBBLKID
1471 skip_scanning:
1472 #endif
1473         ret = get_configs(hdl, &pools, iarg->can_be_active);
1474
1475 error:
1476         for (pe = pools.pools; pe != NULL; pe = penext) {
1477                 penext = pe->pe_next;
1478                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1479                         venext = ve->ve_next;
1480                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1481                                 cenext = ce->ce_next;
1482                                 if (ce->ce_config)
1483                                         nvlist_free(ce->ce_config);
1484                                 free(ce);
1485                         }
1486                         free(ve);
1487                 }
1488                 free(pe);
1489         }
1490
1491         for (ne = pools.names; ne != NULL; ne = nenext) {
1492                 nenext = ne->ne_next;
1493                 free(ne->ne_name);
1494                 free(ne);
1495         }
1496
1497         return (ret);
1498 }
1499
1500 nvlist_t *
1501 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1502 {
1503         importargs_t iarg = { 0 };
1504
1505         iarg.paths = argc;
1506         iarg.path = argv;
1507
1508         return (zpool_find_import_impl(hdl, &iarg));
1509 }
1510
1511 /*
1512  * Given a cache file, return the contents as a list of importable pools.
1513  * poolname or guid (but not both) are provided by the caller when trying
1514  * to import a specific pool.
1515  */
1516 nvlist_t *
1517 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1518     char *poolname, uint64_t guid)
1519 {
1520         char *buf;
1521         int fd;
1522         struct stat64 statbuf;
1523         nvlist_t *raw, *src, *dst;
1524         nvlist_t *pools;
1525         nvpair_t *elem;
1526         char *name;
1527         uint64_t this_guid;
1528         boolean_t active;
1529
1530         verify(poolname == NULL || guid == 0);
1531
1532         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1533                 zfs_error_aux(hdl, "%s", strerror(errno));
1534                 (void) zfs_error(hdl, EZFS_BADCACHE,
1535                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1536                 return (NULL);
1537         }
1538
1539         if (fstat64(fd, &statbuf) != 0) {
1540                 zfs_error_aux(hdl, "%s", strerror(errno));
1541                 (void) close(fd);
1542                 (void) zfs_error(hdl, EZFS_BADCACHE,
1543                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1544                 return (NULL);
1545         }
1546
1547         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1548                 (void) close(fd);
1549                 return (NULL);
1550         }
1551
1552         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1553                 (void) close(fd);
1554                 free(buf);
1555                 (void) zfs_error(hdl, EZFS_BADCACHE,
1556                     dgettext(TEXT_DOMAIN,
1557                     "failed to read cache file contents"));
1558                 return (NULL);
1559         }
1560
1561         (void) close(fd);
1562
1563         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1564                 free(buf);
1565                 (void) zfs_error(hdl, EZFS_BADCACHE,
1566                     dgettext(TEXT_DOMAIN,
1567                     "invalid or corrupt cache file contents"));
1568                 return (NULL);
1569         }
1570
1571         free(buf);
1572
1573         /*
1574          * Go through and get the current state of the pools and refresh their
1575          * state.
1576          */
1577         if (nvlist_alloc(&pools, 0, 0) != 0) {
1578                 (void) no_memory(hdl);
1579                 nvlist_free(raw);
1580                 return (NULL);
1581         }
1582
1583         elem = NULL;
1584         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1585                 src = fnvpair_value_nvlist(elem);
1586
1587                 name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
1588                 if (poolname != NULL && strcmp(poolname, name) != 0)
1589                         continue;
1590
1591                 this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
1592                 if (guid != 0 && guid != this_guid)
1593                         continue;
1594
1595                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1596                         nvlist_free(raw);
1597                         nvlist_free(pools);
1598                         return (NULL);
1599                 }
1600
1601                 if (active)
1602                         continue;
1603
1604                 if ((dst = refresh_config(hdl, src)) == NULL) {
1605                         nvlist_free(raw);
1606                         nvlist_free(pools);
1607                         return (NULL);
1608                 }
1609
1610                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1611                         (void) no_memory(hdl);
1612                         nvlist_free(dst);
1613                         nvlist_free(raw);
1614                         nvlist_free(pools);
1615                         return (NULL);
1616                 }
1617                 nvlist_free(dst);
1618         }
1619
1620         nvlist_free(raw);
1621         return (pools);
1622 }
1623
1624 static int
1625 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1626 {
1627         importargs_t *import = data;
1628         int found = 0;
1629
1630         if (import->poolname != NULL) {
1631                 char *pool_name;
1632
1633                 verify(nvlist_lookup_string(zhp->zpool_config,
1634                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1635                 if (strcmp(pool_name, import->poolname) == 0)
1636                         found = 1;
1637         } else {
1638                 uint64_t pool_guid;
1639
1640                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1641                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1642                 if (pool_guid == import->guid)
1643                         found = 1;
1644         }
1645
1646         zpool_close(zhp);
1647         return (found);
1648 }
1649
1650 nvlist_t *
1651 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1652 {
1653         verify(import->poolname == NULL || import->guid == 0);
1654
1655         if (import->unique)
1656                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1657
1658         if (import->cachefile != NULL)
1659                 return (zpool_find_import_cached(hdl, import->cachefile,
1660                     import->poolname, import->guid));
1661
1662         return (zpool_find_import_impl(hdl, import));
1663 }
1664
1665 boolean_t
1666 find_guid(nvlist_t *nv, uint64_t guid)
1667 {
1668         uint64_t tmp;
1669         nvlist_t **child;
1670         uint_t c, children;
1671
1672         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1673         if (tmp == guid)
1674                 return (B_TRUE);
1675
1676         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1677             &child, &children) == 0) {
1678                 for (c = 0; c < children; c++)
1679                         if (find_guid(child[c], guid))
1680                                 return (B_TRUE);
1681         }
1682
1683         return (B_FALSE);
1684 }
1685
1686 typedef struct aux_cbdata {
1687         const char      *cb_type;
1688         uint64_t        cb_guid;
1689         zpool_handle_t  *cb_zhp;
1690 } aux_cbdata_t;
1691
1692 static int
1693 find_aux(zpool_handle_t *zhp, void *data)
1694 {
1695         aux_cbdata_t *cbp = data;
1696         nvlist_t **list;
1697         uint_t i, count;
1698         uint64_t guid;
1699         nvlist_t *nvroot;
1700
1701         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1702             &nvroot) == 0);
1703
1704         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1705             &list, &count) == 0) {
1706                 for (i = 0; i < count; i++) {
1707                         verify(nvlist_lookup_uint64(list[i],
1708                             ZPOOL_CONFIG_GUID, &guid) == 0);
1709                         if (guid == cbp->cb_guid) {
1710                                 cbp->cb_zhp = zhp;
1711                                 return (1);
1712                         }
1713                 }
1714         }
1715
1716         zpool_close(zhp);
1717         return (0);
1718 }
1719
1720 /*
1721  * Determines if the pool is in use.  If so, it returns true and the state of
1722  * the pool as well as the name of the pool.  Both strings are allocated and
1723  * must be freed by the caller.
1724  */
1725 int
1726 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1727     boolean_t *inuse)
1728 {
1729         nvlist_t *config;
1730         char *name;
1731         boolean_t ret;
1732         uint64_t guid, vdev_guid;
1733         zpool_handle_t *zhp;
1734         nvlist_t *pool_config;
1735         uint64_t stateval, isspare;
1736         aux_cbdata_t cb = { 0 };
1737         boolean_t isactive;
1738
1739         *inuse = B_FALSE;
1740
1741         if (zpool_read_label(fd, &config, NULL) != 0) {
1742                 (void) no_memory(hdl);
1743                 return (-1);
1744         }
1745
1746         if (config == NULL)
1747                 return (0);
1748
1749         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1750             &stateval) == 0);
1751         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1752             &vdev_guid) == 0);
1753
1754         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1755                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1756                     &name) == 0);
1757                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1758                     &guid) == 0);
1759         }
1760
1761         switch (stateval) {
1762         case POOL_STATE_EXPORTED:
1763                 /*
1764                  * A pool with an exported state may in fact be imported
1765                  * read-only, so check the in-core state to see if it's
1766                  * active and imported read-only.  If it is, set
1767                  * its state to active.
1768                  */
1769                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1770                     (zhp = zpool_open_canfail(hdl, name)) != NULL) {
1771                         if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1772                                 stateval = POOL_STATE_ACTIVE;
1773
1774                         /*
1775                          * All we needed the zpool handle for is the
1776                          * readonly prop check.
1777                          */
1778                         zpool_close(zhp);
1779                 }
1780
1781                 ret = B_TRUE;
1782                 break;
1783
1784         case POOL_STATE_ACTIVE:
1785                 /*
1786                  * For an active pool, we have to determine if it's really part
1787                  * of a currently active pool (in which case the pool will exist
1788                  * and the guid will be the same), or whether it's part of an
1789                  * active pool that was disconnected without being explicitly
1790                  * exported.
1791                  */
1792                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1793                         nvlist_free(config);
1794                         return (-1);
1795                 }
1796
1797                 if (isactive) {
1798                         /*
1799                          * Because the device may have been removed while
1800                          * offlined, we only report it as active if the vdev is
1801                          * still present in the config.  Otherwise, pretend like
1802                          * it's not in use.
1803                          */
1804                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1805                             (pool_config = zpool_get_config(zhp, NULL))
1806                             != NULL) {
1807                                 nvlist_t *nvroot;
1808
1809                                 verify(nvlist_lookup_nvlist(pool_config,
1810                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1811                                 ret = find_guid(nvroot, vdev_guid);
1812                         } else {
1813                                 ret = B_FALSE;
1814                         }
1815
1816                         /*
1817                          * If this is an active spare within another pool, we
1818                          * treat it like an unused hot spare.  This allows the
1819                          * user to create a pool with a hot spare that currently
1820                          * in use within another pool.  Since we return B_TRUE,
1821                          * libdiskmgt will continue to prevent generic consumers
1822                          * from using the device.
1823                          */
1824                         if (ret && nvlist_lookup_uint64(config,
1825                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1826                                 stateval = POOL_STATE_SPARE;
1827
1828                         if (zhp != NULL)
1829                                 zpool_close(zhp);
1830                 } else {
1831                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1832                         ret = B_TRUE;
1833                 }
1834                 break;
1835
1836         case POOL_STATE_SPARE:
1837                 /*
1838                  * For a hot spare, it can be either definitively in use, or
1839                  * potentially active.  To determine if it's in use, we iterate
1840                  * over all pools in the system and search for one with a spare
1841                  * with a matching guid.
1842                  *
1843                  * Due to the shared nature of spares, we don't actually report
1844                  * the potentially active case as in use.  This means the user
1845                  * can freely create pools on the hot spares of exported pools,
1846                  * but to do otherwise makes the resulting code complicated, and
1847                  * we end up having to deal with this case anyway.
1848                  */
1849                 cb.cb_zhp = NULL;
1850                 cb.cb_guid = vdev_guid;
1851                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1852                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1853                         name = (char *)zpool_get_name(cb.cb_zhp);
1854                         ret = B_TRUE;
1855                 } else {
1856                         ret = B_FALSE;
1857                 }
1858                 break;
1859
1860         case POOL_STATE_L2CACHE:
1861
1862                 /*
1863                  * Check if any pool is currently using this l2cache device.
1864                  */
1865                 cb.cb_zhp = NULL;
1866                 cb.cb_guid = vdev_guid;
1867                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1868                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1869                         name = (char *)zpool_get_name(cb.cb_zhp);
1870                         ret = B_TRUE;
1871                 } else {
1872                         ret = B_FALSE;
1873                 }
1874                 break;
1875
1876         default:
1877                 ret = B_FALSE;
1878         }
1879
1880
1881         if (ret) {
1882                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1883                         if (cb.cb_zhp)
1884                                 zpool_close(cb.cb_zhp);
1885                         nvlist_free(config);
1886                         return (-1);
1887                 }
1888                 *state = (pool_state_t)stateval;
1889         }
1890
1891         if (cb.cb_zhp)
1892                 zpool_close(cb.cb_zhp);
1893
1894         nvlist_free(config);
1895         *inuse = ret;
1896         return (0);
1897 }