]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
MFV CK@r336629: Import CK as of commit 1c1f9901c2dea7a883342cd03d3906a1bc482583
[FreeBSD/FreeBSD.git] / cddl / contrib / opensolaris / lib / libzfs / common / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25  * Copyright 2015 RackTop Systems.
26  * Copyright 2016 Nexenta Systems, Inc.
27  */
28
29 /*
30  * Pool import support functions.
31  *
32  * To import a pool, we rely on reading the configuration information from the
33  * ZFS label of each device.  If we successfully read the label, then we
34  * organize the configuration information in the following hierarchy:
35  *
36  *      pool guid -> toplevel vdev guid -> label txg
37  *
38  * Duplicate entries matching this same tuple will be discarded.  Once we have
39  * examined every device, we pick the best label txg config for each toplevel
40  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
41  * update any paths that have changed.  Finally, we attempt to import the pool
42  * using our derived config, and record the results.
43  */
44
45 #include <aio.h>
46 #include <ctype.h>
47 #include <devid.h>
48 #include <dirent.h>
49 #include <errno.h>
50 #include <libintl.h>
51 #include <stddef.h>
52 #include <stdlib.h>
53 #include <string.h>
54 #include <sys/stat.h>
55 #include <unistd.h>
56 #include <fcntl.h>
57 #include <thread_pool.h>
58 #include <libgeom.h>
59
60 #include <sys/vdev_impl.h>
61
62 #include "libzfs.h"
63 #include "libzfs_impl.h"
64
65 /*
66  * Intermediate structures used to gather configuration information.
67  */
68 typedef struct config_entry {
69         uint64_t                ce_txg;
70         nvlist_t                *ce_config;
71         struct config_entry     *ce_next;
72 } config_entry_t;
73
74 typedef struct vdev_entry {
75         uint64_t                ve_guid;
76         config_entry_t          *ve_configs;
77         struct vdev_entry       *ve_next;
78 } vdev_entry_t;
79
80 typedef struct pool_entry {
81         uint64_t                pe_guid;
82         vdev_entry_t            *pe_vdevs;
83         struct pool_entry       *pe_next;
84 } pool_entry_t;
85
86 typedef struct name_entry {
87         char                    *ne_name;
88         uint64_t                ne_guid;
89         struct name_entry       *ne_next;
90 } name_entry_t;
91
92 typedef struct pool_list {
93         pool_entry_t            *pools;
94         name_entry_t            *names;
95 } pool_list_t;
96
97 static char *
98 get_devid(const char *path)
99 {
100 #ifdef have_devid
101         int fd;
102         ddi_devid_t devid;
103         char *minor, *ret;
104
105         if ((fd = open(path, O_RDONLY)) < 0)
106                 return (NULL);
107
108         minor = NULL;
109         ret = NULL;
110         if (devid_get(fd, &devid) == 0) {
111                 if (devid_get_minor_name(fd, &minor) == 0)
112                         ret = devid_str_encode(devid, minor);
113                 if (minor != NULL)
114                         devid_str_free(minor);
115                 devid_free(devid);
116         }
117         (void) close(fd);
118
119         return (ret);
120 #else
121         return (NULL);
122 #endif
123 }
124
125
126 /*
127  * Go through and fix up any path and/or devid information for the given vdev
128  * configuration.
129  */
130 static int
131 fix_paths(nvlist_t *nv, name_entry_t *names)
132 {
133         nvlist_t **child;
134         uint_t c, children;
135         uint64_t guid;
136         name_entry_t *ne, *best;
137         char *path, *devid;
138         int matched;
139
140         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
141             &child, &children) == 0) {
142                 for (c = 0; c < children; c++)
143                         if (fix_paths(child[c], names) != 0)
144                                 return (-1);
145                 return (0);
146         }
147
148         /*
149          * This is a leaf (file or disk) vdev.  In either case, go through
150          * the name list and see if we find a matching guid.  If so, replace
151          * the path and see if we can calculate a new devid.
152          *
153          * There may be multiple names associated with a particular guid, in
154          * which case we have overlapping slices or multiple paths to the same
155          * disk.  If this is the case, then we want to pick the path that is
156          * the most similar to the original, where "most similar" is the number
157          * of matching characters starting from the end of the path.  This will
158          * preserve slice numbers even if the disks have been reorganized, and
159          * will also catch preferred disk names if multiple paths exist.
160          */
161         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
162         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
163                 path = NULL;
164
165         matched = 0;
166         best = NULL;
167         for (ne = names; ne != NULL; ne = ne->ne_next) {
168                 if (ne->ne_guid == guid) {
169                         const char *src, *dst;
170                         int count;
171
172                         if (path == NULL) {
173                                 best = ne;
174                                 break;
175                         }
176
177                         src = ne->ne_name + strlen(ne->ne_name) - 1;
178                         dst = path + strlen(path) - 1;
179                         for (count = 0; src >= ne->ne_name && dst >= path;
180                             src--, dst--, count++)
181                                 if (*src != *dst)
182                                         break;
183
184                         /*
185                          * At this point, 'count' is the number of characters
186                          * matched from the end.
187                          */
188                         if (count > matched || best == NULL) {
189                                 best = ne;
190                                 matched = count;
191                         }
192                 }
193         }
194
195         if (best == NULL)
196                 return (0);
197
198         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
199                 return (-1);
200
201         if ((devid = get_devid(best->ne_name)) == NULL) {
202                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
203         } else {
204                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) {
205                         devid_str_free(devid);
206                         return (-1);
207                 }
208                 devid_str_free(devid);
209         }
210
211         return (0);
212 }
213
214 /*
215  * Add the given configuration to the list of known devices.
216  */
217 static int
218 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
219     nvlist_t *config)
220 {
221         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
222         pool_entry_t *pe;
223         vdev_entry_t *ve;
224         config_entry_t *ce;
225         name_entry_t *ne;
226
227         /*
228          * If this is a hot spare not currently in use or level 2 cache
229          * device, add it to the list of names to translate, but don't do
230          * anything else.
231          */
232         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
233             &state) == 0 &&
234             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
235             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
236                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
237                         return (-1);
238
239                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
240                         free(ne);
241                         return (-1);
242                 }
243
244                 ne->ne_guid = vdev_guid;
245                 ne->ne_next = pl->names;
246                 pl->names = ne;
247
248                 nvlist_free(config);
249                 return (0);
250         }
251
252         /*
253          * If we have a valid config but cannot read any of these fields, then
254          * it means we have a half-initialized label.  In vdev_label_init()
255          * we write a label with txg == 0 so that we can identify the device
256          * in case the user refers to the same disk later on.  If we fail to
257          * create the pool, we'll be left with a label in this state
258          * which should not be considered part of a valid pool.
259          */
260         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
261             &pool_guid) != 0 ||
262             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
263             &vdev_guid) != 0 ||
264             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
265             &top_guid) != 0 ||
266             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
267             &txg) != 0 || txg == 0) {
268                 nvlist_free(config);
269                 return (0);
270         }
271
272         /*
273          * First, see if we know about this pool.  If not, then add it to the
274          * list of known pools.
275          */
276         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
277                 if (pe->pe_guid == pool_guid)
278                         break;
279         }
280
281         if (pe == NULL) {
282                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
283                         nvlist_free(config);
284                         return (-1);
285                 }
286                 pe->pe_guid = pool_guid;
287                 pe->pe_next = pl->pools;
288                 pl->pools = pe;
289         }
290
291         /*
292          * Second, see if we know about this toplevel vdev.  Add it if its
293          * missing.
294          */
295         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
296                 if (ve->ve_guid == top_guid)
297                         break;
298         }
299
300         if (ve == NULL) {
301                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
302                         nvlist_free(config);
303                         return (-1);
304                 }
305                 ve->ve_guid = top_guid;
306                 ve->ve_next = pe->pe_vdevs;
307                 pe->pe_vdevs = ve;
308         }
309
310         /*
311          * Third, see if we have a config with a matching transaction group.  If
312          * so, then we do nothing.  Otherwise, add it to the list of known
313          * configs.
314          */
315         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
316                 if (ce->ce_txg == txg)
317                         break;
318         }
319
320         if (ce == NULL) {
321                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
322                         nvlist_free(config);
323                         return (-1);
324                 }
325                 ce->ce_txg = txg;
326                 ce->ce_config = config;
327                 ce->ce_next = ve->ve_configs;
328                 ve->ve_configs = ce;
329         } else {
330                 nvlist_free(config);
331         }
332
333         /*
334          * At this point we've successfully added our config to the list of
335          * known configs.  The last thing to do is add the vdev guid -> path
336          * mappings so that we can fix up the configuration as necessary before
337          * doing the import.
338          */
339         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
340                 return (-1);
341
342         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
343                 free(ne);
344                 return (-1);
345         }
346
347         ne->ne_guid = vdev_guid;
348         ne->ne_next = pl->names;
349         pl->names = ne;
350
351         return (0);
352 }
353
354 /*
355  * Returns true if the named pool matches the given GUID.
356  */
357 static int
358 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
359     boolean_t *isactive)
360 {
361         zpool_handle_t *zhp;
362         uint64_t theguid;
363
364         if (zpool_open_silent(hdl, name, &zhp) != 0)
365                 return (-1);
366
367         if (zhp == NULL) {
368                 *isactive = B_FALSE;
369                 return (0);
370         }
371
372         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
373             &theguid) == 0);
374
375         zpool_close(zhp);
376
377         *isactive = (theguid == guid);
378         return (0);
379 }
380
381 static nvlist_t *
382 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
383 {
384         nvlist_t *nvl;
385         zfs_cmd_t zc = { 0 };
386         int err, dstbuf_size;
387
388         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
389                 return (NULL);
390
391         dstbuf_size = MAX(CONFIG_BUF_MINSIZE, zc.zc_nvlist_conf_size * 4);
392
393         if (zcmd_alloc_dst_nvlist(hdl, &zc, dstbuf_size) != 0) {
394                 zcmd_free_nvlists(&zc);
395                 return (NULL);
396         }
397
398         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
399             &zc)) != 0 && errno == ENOMEM) {
400                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
401                         zcmd_free_nvlists(&zc);
402                         return (NULL);
403                 }
404         }
405
406         if (err) {
407                 zcmd_free_nvlists(&zc);
408                 return (NULL);
409         }
410
411         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
412                 zcmd_free_nvlists(&zc);
413                 return (NULL);
414         }
415
416         zcmd_free_nvlists(&zc);
417         return (nvl);
418 }
419
420 /*
421  * Determine if the vdev id is a hole in the namespace.
422  */
423 boolean_t
424 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
425 {
426         for (int c = 0; c < holes; c++) {
427
428                 /* Top-level is a hole */
429                 if (hole_array[c] == id)
430                         return (B_TRUE);
431         }
432         return (B_FALSE);
433 }
434
435 /*
436  * Convert our list of pools into the definitive set of configurations.  We
437  * start by picking the best config for each toplevel vdev.  Once that's done,
438  * we assemble the toplevel vdevs into a full config for the pool.  We make a
439  * pass to fix up any incorrect paths, and then add it to the main list to
440  * return to the user.
441  */
442 static nvlist_t *
443 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
444     nvlist_t *policy)
445 {
446         pool_entry_t *pe;
447         vdev_entry_t *ve;
448         config_entry_t *ce;
449         nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
450         nvlist_t **spares, **l2cache;
451         uint_t i, nspares, nl2cache;
452         boolean_t config_seen;
453         uint64_t best_txg;
454         char *name, *hostname = NULL;
455         uint64_t guid;
456         uint_t children = 0;
457         nvlist_t **child = NULL;
458         uint_t holes;
459         uint64_t *hole_array, max_id;
460         uint_t c;
461         boolean_t isactive;
462         uint64_t hostid;
463         nvlist_t *nvl;
464         boolean_t found_one = B_FALSE;
465         boolean_t valid_top_config = B_FALSE;
466
467         if (nvlist_alloc(&ret, 0, 0) != 0)
468                 goto nomem;
469
470         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
471                 uint64_t id, max_txg = 0;
472
473                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
474                         goto nomem;
475                 config_seen = B_FALSE;
476
477                 /*
478                  * Iterate over all toplevel vdevs.  Grab the pool configuration
479                  * from the first one we find, and then go through the rest and
480                  * add them as necessary to the 'vdevs' member of the config.
481                  */
482                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
483
484                         /*
485                          * Determine the best configuration for this vdev by
486                          * selecting the config with the latest transaction
487                          * group.
488                          */
489                         best_txg = 0;
490                         for (ce = ve->ve_configs; ce != NULL;
491                             ce = ce->ce_next) {
492
493                                 if (ce->ce_txg > best_txg) {
494                                         tmp = ce->ce_config;
495                                         best_txg = ce->ce_txg;
496                                 }
497                         }
498
499                         /*
500                          * We rely on the fact that the max txg for the
501                          * pool will contain the most up-to-date information
502                          * about the valid top-levels in the vdev namespace.
503                          */
504                         if (best_txg > max_txg) {
505                                 (void) nvlist_remove(config,
506                                     ZPOOL_CONFIG_VDEV_CHILDREN,
507                                     DATA_TYPE_UINT64);
508                                 (void) nvlist_remove(config,
509                                     ZPOOL_CONFIG_HOLE_ARRAY,
510                                     DATA_TYPE_UINT64_ARRAY);
511
512                                 max_txg = best_txg;
513                                 hole_array = NULL;
514                                 holes = 0;
515                                 max_id = 0;
516                                 valid_top_config = B_FALSE;
517
518                                 if (nvlist_lookup_uint64(tmp,
519                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
520                                         verify(nvlist_add_uint64(config,
521                                             ZPOOL_CONFIG_VDEV_CHILDREN,
522                                             max_id) == 0);
523                                         valid_top_config = B_TRUE;
524                                 }
525
526                                 if (nvlist_lookup_uint64_array(tmp,
527                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
528                                     &holes) == 0) {
529                                         verify(nvlist_add_uint64_array(config,
530                                             ZPOOL_CONFIG_HOLE_ARRAY,
531                                             hole_array, holes) == 0);
532                                 }
533                         }
534
535                         if (!config_seen) {
536                                 /*
537                                  * Copy the relevant pieces of data to the pool
538                                  * configuration:
539                                  *
540                                  *      version
541                                  *      pool guid
542                                  *      name
543                                  *      comment (if available)
544                                  *      pool state
545                                  *      hostid (if available)
546                                  *      hostname (if available)
547                                  */
548                                 uint64_t state, version;
549                                 char *comment = NULL;
550
551                                 version = fnvlist_lookup_uint64(tmp,
552                                     ZPOOL_CONFIG_VERSION);
553                                 fnvlist_add_uint64(config,
554                                     ZPOOL_CONFIG_VERSION, version);
555                                 guid = fnvlist_lookup_uint64(tmp,
556                                     ZPOOL_CONFIG_POOL_GUID);
557                                 fnvlist_add_uint64(config,
558                                     ZPOOL_CONFIG_POOL_GUID, guid);
559                                 name = fnvlist_lookup_string(tmp,
560                                     ZPOOL_CONFIG_POOL_NAME);
561                                 fnvlist_add_string(config,
562                                     ZPOOL_CONFIG_POOL_NAME, name);
563
564                                 if (nvlist_lookup_string(tmp,
565                                     ZPOOL_CONFIG_COMMENT, &comment) == 0)
566                                         fnvlist_add_string(config,
567                                             ZPOOL_CONFIG_COMMENT, comment);
568
569                                 state = fnvlist_lookup_uint64(tmp,
570                                     ZPOOL_CONFIG_POOL_STATE);
571                                 fnvlist_add_uint64(config,
572                                     ZPOOL_CONFIG_POOL_STATE, state);
573
574                                 hostid = 0;
575                                 if (nvlist_lookup_uint64(tmp,
576                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
577                                         fnvlist_add_uint64(config,
578                                             ZPOOL_CONFIG_HOSTID, hostid);
579                                         hostname = fnvlist_lookup_string(tmp,
580                                             ZPOOL_CONFIG_HOSTNAME);
581                                         fnvlist_add_string(config,
582                                             ZPOOL_CONFIG_HOSTNAME, hostname);
583                                 }
584
585                                 config_seen = B_TRUE;
586                         }
587
588                         /*
589                          * Add this top-level vdev to the child array.
590                          */
591                         verify(nvlist_lookup_nvlist(tmp,
592                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
593                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
594                             &id) == 0);
595
596                         if (id >= children) {
597                                 nvlist_t **newchild;
598
599                                 newchild = zfs_alloc(hdl, (id + 1) *
600                                     sizeof (nvlist_t *));
601                                 if (newchild == NULL)
602                                         goto nomem;
603
604                                 for (c = 0; c < children; c++)
605                                         newchild[c] = child[c];
606
607                                 free(child);
608                                 child = newchild;
609                                 children = id + 1;
610                         }
611                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
612                                 goto nomem;
613
614                 }
615
616                 /*
617                  * If we have information about all the top-levels then
618                  * clean up the nvlist which we've constructed. This
619                  * means removing any extraneous devices that are
620                  * beyond the valid range or adding devices to the end
621                  * of our array which appear to be missing.
622                  */
623                 if (valid_top_config) {
624                         if (max_id < children) {
625                                 for (c = max_id; c < children; c++)
626                                         nvlist_free(child[c]);
627                                 children = max_id;
628                         } else if (max_id > children) {
629                                 nvlist_t **newchild;
630
631                                 newchild = zfs_alloc(hdl, (max_id) *
632                                     sizeof (nvlist_t *));
633                                 if (newchild == NULL)
634                                         goto nomem;
635
636                                 for (c = 0; c < children; c++)
637                                         newchild[c] = child[c];
638
639                                 free(child);
640                                 child = newchild;
641                                 children = max_id;
642                         }
643                 }
644
645                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
646                     &guid) == 0);
647
648                 /*
649                  * The vdev namespace may contain holes as a result of
650                  * device removal. We must add them back into the vdev
651                  * tree before we process any missing devices.
652                  */
653                 if (holes > 0) {
654                         ASSERT(valid_top_config);
655
656                         for (c = 0; c < children; c++) {
657                                 nvlist_t *holey;
658
659                                 if (child[c] != NULL ||
660                                     !vdev_is_hole(hole_array, holes, c))
661                                         continue;
662
663                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
664                                     0) != 0)
665                                         goto nomem;
666
667                                 /*
668                                  * Holes in the namespace are treated as
669                                  * "hole" top-level vdevs and have a
670                                  * special flag set on them.
671                                  */
672                                 if (nvlist_add_string(holey,
673                                     ZPOOL_CONFIG_TYPE,
674                                     VDEV_TYPE_HOLE) != 0 ||
675                                     nvlist_add_uint64(holey,
676                                     ZPOOL_CONFIG_ID, c) != 0 ||
677                                     nvlist_add_uint64(holey,
678                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
679                                         nvlist_free(holey);
680                                         goto nomem;
681                                 }
682                                 child[c] = holey;
683                         }
684                 }
685
686                 /*
687                  * Look for any missing top-level vdevs.  If this is the case,
688                  * create a faked up 'missing' vdev as a placeholder.  We cannot
689                  * simply compress the child array, because the kernel performs
690                  * certain checks to make sure the vdev IDs match their location
691                  * in the configuration.
692                  */
693                 for (c = 0; c < children; c++) {
694                         if (child[c] == NULL) {
695                                 nvlist_t *missing;
696                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
697                                     0) != 0)
698                                         goto nomem;
699                                 if (nvlist_add_string(missing,
700                                     ZPOOL_CONFIG_TYPE,
701                                     VDEV_TYPE_MISSING) != 0 ||
702                                     nvlist_add_uint64(missing,
703                                     ZPOOL_CONFIG_ID, c) != 0 ||
704                                     nvlist_add_uint64(missing,
705                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
706                                         nvlist_free(missing);
707                                         goto nomem;
708                                 }
709                                 child[c] = missing;
710                         }
711                 }
712
713                 /*
714                  * Put all of this pool's top-level vdevs into a root vdev.
715                  */
716                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
717                         goto nomem;
718                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
719                     VDEV_TYPE_ROOT) != 0 ||
720                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
721                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
722                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
723                     child, children) != 0) {
724                         nvlist_free(nvroot);
725                         goto nomem;
726                 }
727
728                 for (c = 0; c < children; c++)
729                         nvlist_free(child[c]);
730                 free(child);
731                 children = 0;
732                 child = NULL;
733
734                 /*
735                  * Go through and fix up any paths and/or devids based on our
736                  * known list of vdev GUID -> path mappings.
737                  */
738                 if (fix_paths(nvroot, pl->names) != 0) {
739                         nvlist_free(nvroot);
740                         goto nomem;
741                 }
742
743                 /*
744                  * Add the root vdev to this pool's configuration.
745                  */
746                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
747                     nvroot) != 0) {
748                         nvlist_free(nvroot);
749                         goto nomem;
750                 }
751                 nvlist_free(nvroot);
752
753                 /*
754                  * zdb uses this path to report on active pools that were
755                  * imported or created using -R.
756                  */
757                 if (active_ok)
758                         goto add_pool;
759
760                 /*
761                  * Determine if this pool is currently active, in which case we
762                  * can't actually import it.
763                  */
764                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
765                     &name) == 0);
766                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
767                     &guid) == 0);
768
769                 if (pool_active(hdl, name, guid, &isactive) != 0)
770                         goto error;
771
772                 if (isactive) {
773                         nvlist_free(config);
774                         config = NULL;
775                         continue;
776                 }
777
778                 if (policy != NULL) {
779                         if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
780                             policy) != 0)
781                                 goto nomem;
782                 }
783
784                 if ((nvl = refresh_config(hdl, config)) == NULL) {
785                         nvlist_free(config);
786                         config = NULL;
787                         continue;
788                 }
789
790                 nvlist_free(config);
791                 config = nvl;
792
793                 /*
794                  * Go through and update the paths for spares, now that we have
795                  * them.
796                  */
797                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
798                     &nvroot) == 0);
799                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
800                     &spares, &nspares) == 0) {
801                         for (i = 0; i < nspares; i++) {
802                                 if (fix_paths(spares[i], pl->names) != 0)
803                                         goto nomem;
804                         }
805                 }
806
807                 /*
808                  * Update the paths for l2cache devices.
809                  */
810                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
811                     &l2cache, &nl2cache) == 0) {
812                         for (i = 0; i < nl2cache; i++) {
813                                 if (fix_paths(l2cache[i], pl->names) != 0)
814                                         goto nomem;
815                         }
816                 }
817
818                 /*
819                  * Restore the original information read from the actual label.
820                  */
821                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
822                     DATA_TYPE_UINT64);
823                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
824                     DATA_TYPE_STRING);
825                 if (hostid != 0) {
826                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
827                             hostid) == 0);
828                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
829                             hostname) == 0);
830                 }
831
832 add_pool:
833                 /*
834                  * Add this pool to the list of configs.
835                  */
836                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
837                     &name) == 0);
838                 if (nvlist_add_nvlist(ret, name, config) != 0)
839                         goto nomem;
840
841                 found_one = B_TRUE;
842                 nvlist_free(config);
843                 config = NULL;
844         }
845
846         if (!found_one) {
847                 nvlist_free(ret);
848                 ret = NULL;
849         }
850
851         return (ret);
852
853 nomem:
854         (void) no_memory(hdl);
855 error:
856         nvlist_free(config);
857         nvlist_free(ret);
858         for (c = 0; c < children; c++)
859                 nvlist_free(child[c]);
860         free(child);
861
862         return (NULL);
863 }
864
865 /*
866  * Return the offset of the given label.
867  */
868 static uint64_t
869 label_offset(uint64_t size, int l)
870 {
871         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
872         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
873             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
874 }
875
876 /*
877  * Given a file descriptor, read the label information and return an nvlist
878  * describing the configuration, if there is one.
879  * Return 0 on success, or -1 on failure
880  */
881 int
882 zpool_read_label(int fd, nvlist_t **config)
883 {
884         struct stat64 statbuf;
885         int l;
886         vdev_label_t *label;
887         uint64_t state, txg, size;
888
889         *config = NULL;
890
891         if (fstat64(fd, &statbuf) == -1)
892                 return (-1);
893         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
894
895         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
896                 return (-1);
897
898         for (l = 0; l < VDEV_LABELS; l++) {
899                 if (pread64(fd, label, sizeof (vdev_label_t),
900                     label_offset(size, l)) != sizeof (vdev_label_t))
901                         continue;
902
903                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
904                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
905                         continue;
906
907                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
908                     &state) != 0 || state > POOL_STATE_L2CACHE) {
909                         nvlist_free(*config);
910                         continue;
911                 }
912
913                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
914                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
915                     &txg) != 0 || txg == 0)) {
916                         nvlist_free(*config);
917                         continue;
918                 }
919
920                 free(label);
921                 return (0);
922         }
923
924         free(label);
925         *config = NULL;
926         errno = ENOENT;
927         return (-1);
928 }
929
930 /*
931  * Given a file descriptor, read the label information and return an nvlist
932  * describing the configuration, if there is one.
933  * returns the number of valid labels found
934  * If a label is found, returns it via config.  The caller is responsible for
935  * freeing it.
936  */
937 int
938 zpool_read_all_labels(int fd, nvlist_t **config)
939 {
940         struct stat64 statbuf;
941         struct aiocb aiocbs[VDEV_LABELS];
942         struct aiocb *aiocbps[VDEV_LABELS];
943         int l;
944         vdev_phys_t *labels;
945         uint64_t state, txg, size;
946         int nlabels = 0;
947
948         *config = NULL;
949
950         if (fstat64(fd, &statbuf) == -1)
951                 return (0);
952         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
953
954         if ((labels = calloc(VDEV_LABELS, sizeof (vdev_phys_t))) == NULL)
955                 return (0);
956
957         memset(aiocbs, 0, sizeof(aiocbs));
958         for (l = 0; l < VDEV_LABELS; l++) {
959                 aiocbs[l].aio_fildes = fd;
960                 aiocbs[l].aio_offset = label_offset(size, l) + VDEV_SKIP_SIZE;
961                 aiocbs[l].aio_buf = &labels[l];
962                 aiocbs[l].aio_nbytes = sizeof(vdev_phys_t);
963                 aiocbs[l].aio_lio_opcode = LIO_READ;
964                 aiocbps[l] = &aiocbs[l];
965         }
966
967         if (lio_listio(LIO_WAIT, aiocbps, VDEV_LABELS, NULL) != 0) {
968                 if (errno == EAGAIN || errno == EINTR || errno == EIO) {
969                         for (l = 0; l < VDEV_LABELS; l++) {
970                                 errno = 0;
971                                 int r = aio_error(&aiocbs[l]);
972                                 if (r != EINVAL)
973                                         (void)aio_return(&aiocbs[l]);
974                         }
975                 }
976                 free(labels);
977                 return (0);
978         }
979
980         for (l = 0; l < VDEV_LABELS; l++) {
981                 nvlist_t *temp = NULL;
982
983                 if (aio_return(&aiocbs[l]) != sizeof(vdev_phys_t))
984                         continue;
985
986                 if (nvlist_unpack(labels[l].vp_nvlist,
987                     sizeof (labels[l].vp_nvlist), &temp, 0) != 0)
988                         continue;
989
990                 if (nvlist_lookup_uint64(temp, ZPOOL_CONFIG_POOL_STATE,
991                     &state) != 0 || state > POOL_STATE_L2CACHE) {
992                         nvlist_free(temp);
993                         temp = NULL;
994                         continue;
995                 }
996
997                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
998                     (nvlist_lookup_uint64(temp, ZPOOL_CONFIG_POOL_TXG,
999                     &txg) != 0 || txg == 0)) {
1000                         nvlist_free(temp);
1001                         temp = NULL;
1002                         continue;
1003                 }
1004                 if (temp)
1005                         *config = temp;
1006
1007                 nlabels++;
1008         }
1009
1010         free(labels);
1011         return (nlabels);
1012 }
1013
1014 typedef struct rdsk_node {
1015         char *rn_name;
1016         int rn_dfd;
1017         libzfs_handle_t *rn_hdl;
1018         nvlist_t *rn_config;
1019         avl_tree_t *rn_avl;
1020         avl_node_t rn_node;
1021         boolean_t rn_nozpool;
1022 } rdsk_node_t;
1023
1024 static int
1025 slice_cache_compare(const void *arg1, const void *arg2)
1026 {
1027         const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
1028         const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
1029         char *nm1slice, *nm2slice;
1030         int rv;
1031
1032         /*
1033          * slices zero and two are the most likely to provide results,
1034          * so put those first
1035          */
1036         nm1slice = strstr(nm1, "s0");
1037         nm2slice = strstr(nm2, "s0");
1038         if (nm1slice && !nm2slice) {
1039                 return (-1);
1040         }
1041         if (!nm1slice && nm2slice) {
1042                 return (1);
1043         }
1044         nm1slice = strstr(nm1, "s2");
1045         nm2slice = strstr(nm2, "s2");
1046         if (nm1slice && !nm2slice) {
1047                 return (-1);
1048         }
1049         if (!nm1slice && nm2slice) {
1050                 return (1);
1051         }
1052
1053         rv = strcmp(nm1, nm2);
1054         if (rv == 0)
1055                 return (0);
1056         return (rv > 0 ? 1 : -1);
1057 }
1058
1059 #ifdef illumos
1060 static void
1061 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
1062     diskaddr_t size, uint_t blksz)
1063 {
1064         rdsk_node_t tmpnode;
1065         rdsk_node_t *node;
1066         char sname[MAXNAMELEN];
1067
1068         tmpnode.rn_name = &sname[0];
1069         (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
1070             diskname, partno);
1071         /*
1072          * protect against division by zero for disk labels that
1073          * contain a bogus sector size
1074          */
1075         if (blksz == 0)
1076                 blksz = DEV_BSIZE;
1077         /* too small to contain a zpool? */
1078         if ((size < (SPA_MINDEVSIZE / blksz)) &&
1079             (node = avl_find(r, &tmpnode, NULL)))
1080                 node->rn_nozpool = B_TRUE;
1081 }
1082 #endif  /* illumos */
1083
1084 static void
1085 nozpool_all_slices(avl_tree_t *r, const char *sname)
1086 {
1087 #ifdef illumos
1088         char diskname[MAXNAMELEN];
1089         char *ptr;
1090         int i;
1091
1092         (void) strncpy(diskname, sname, MAXNAMELEN);
1093         if (((ptr = strrchr(diskname, 's')) == NULL) &&
1094             ((ptr = strrchr(diskname, 'p')) == NULL))
1095                 return;
1096         ptr[0] = 's';
1097         ptr[1] = '\0';
1098         for (i = 0; i < NDKMAP; i++)
1099                 check_one_slice(r, diskname, i, 0, 1);
1100         ptr[0] = 'p';
1101         for (i = 0; i <= FD_NUMPART; i++)
1102                 check_one_slice(r, diskname, i, 0, 1);
1103 #endif  /* illumos */
1104 }
1105
1106 #ifdef illumos
1107 static void
1108 check_slices(avl_tree_t *r, int fd, const char *sname)
1109 {
1110         struct extvtoc vtoc;
1111         struct dk_gpt *gpt;
1112         char diskname[MAXNAMELEN];
1113         char *ptr;
1114         int i;
1115
1116         (void) strncpy(diskname, sname, MAXNAMELEN);
1117         if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1118                 return;
1119         ptr[1] = '\0';
1120
1121         if (read_extvtoc(fd, &vtoc) >= 0) {
1122                 for (i = 0; i < NDKMAP; i++)
1123                         check_one_slice(r, diskname, i,
1124                             vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1125         } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1126                 /*
1127                  * on x86 we'll still have leftover links that point
1128                  * to slices s[9-15], so use NDKMAP instead
1129                  */
1130                 for (i = 0; i < NDKMAP; i++)
1131                         check_one_slice(r, diskname, i,
1132                             gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1133                 /* nodes p[1-4] are never used with EFI labels */
1134                 ptr[0] = 'p';
1135                 for (i = 1; i <= FD_NUMPART; i++)
1136                         check_one_slice(r, diskname, i, 0, 1);
1137                 efi_free(gpt);
1138         }
1139 }
1140 #endif  /* illumos */
1141
1142 static void
1143 zpool_open_func(void *arg)
1144 {
1145         rdsk_node_t *rn = arg;
1146         struct stat64 statbuf;
1147         nvlist_t *config;
1148         int fd;
1149
1150         if (rn->rn_nozpool)
1151                 return;
1152         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1153                 /* symlink to a device that's no longer there */
1154                 if (errno == ENOENT)
1155                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1156                 return;
1157         }
1158         /*
1159          * Ignore failed stats.  We only want regular
1160          * files, character devs and block devs.
1161          */
1162         if (fstat64(fd, &statbuf) != 0 ||
1163             (!S_ISREG(statbuf.st_mode) &&
1164             !S_ISCHR(statbuf.st_mode) &&
1165             !S_ISBLK(statbuf.st_mode))) {
1166                 (void) close(fd);
1167                 return;
1168         }
1169         /* this file is too small to hold a zpool */
1170 #ifdef illumos
1171         if (S_ISREG(statbuf.st_mode) &&
1172             statbuf.st_size < SPA_MINDEVSIZE) {
1173                 (void) close(fd);
1174                 return;
1175         } else if (!S_ISREG(statbuf.st_mode)) {
1176                 /*
1177                  * Try to read the disk label first so we don't have to
1178                  * open a bunch of minor nodes that can't have a zpool.
1179                  */
1180                 check_slices(rn->rn_avl, fd, rn->rn_name);
1181         }
1182 #else   /* !illumos */
1183         if (statbuf.st_size < SPA_MINDEVSIZE) {
1184                 (void) close(fd);
1185                 return;
1186         }
1187 #endif  /* illumos */
1188
1189         if ((zpool_read_label(fd, &config)) != 0 && errno == ENOMEM) {
1190                 (void) close(fd);
1191                 (void) no_memory(rn->rn_hdl);
1192                 return;
1193         }
1194         (void) close(fd);
1195
1196         rn->rn_config = config;
1197 }
1198
1199 /*
1200  * Given a file descriptor, clear (zero) the label information.
1201  */
1202 int
1203 zpool_clear_label(int fd)
1204 {
1205         struct stat64 statbuf;
1206         int l;
1207         vdev_label_t *label;
1208         uint64_t size;
1209
1210         if (fstat64(fd, &statbuf) == -1)
1211                 return (0);
1212         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1213
1214         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1215                 return (-1);
1216
1217         for (l = 0; l < VDEV_LABELS; l++) {
1218                 if (pwrite64(fd, label, sizeof (vdev_label_t),
1219                     label_offset(size, l)) != sizeof (vdev_label_t)) {
1220                         free(label);
1221                         return (-1);
1222                 }
1223         }
1224
1225         free(label);
1226         return (0);
1227 }
1228
1229 /*
1230  * Given a list of directories to search, find all pools stored on disk.  This
1231  * includes partial pools which are not available to import.  If no args are
1232  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1233  * poolname or guid (but not both) are provided by the caller when trying
1234  * to import a specific pool.
1235  */
1236 static nvlist_t *
1237 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1238 {
1239         int i, dirs = iarg->paths;
1240         struct dirent64 *dp;
1241         char path[MAXPATHLEN];
1242         char *end, **dir = iarg->path;
1243         size_t pathleft;
1244         nvlist_t *ret = NULL;
1245         static char *default_dir = "/dev";
1246         pool_list_t pools = { 0 };
1247         pool_entry_t *pe, *penext;
1248         vdev_entry_t *ve, *venext;
1249         config_entry_t *ce, *cenext;
1250         name_entry_t *ne, *nenext;
1251         avl_tree_t slice_cache;
1252         rdsk_node_t *slice;
1253         void *cookie;
1254
1255         if (dirs == 0) {
1256                 dirs = 1;
1257                 dir = &default_dir;
1258         }
1259
1260         /*
1261          * Go through and read the label configuration information from every
1262          * possible device, organizing the information according to pool GUID
1263          * and toplevel GUID.
1264          */
1265         for (i = 0; i < dirs; i++) {
1266                 tpool_t *t;
1267                 char rdsk[MAXPATHLEN];
1268                 int dfd;
1269                 boolean_t config_failed = B_FALSE;
1270                 DIR *dirp;
1271
1272                 /* use realpath to normalize the path */
1273                 if (realpath(dir[i], path) == 0) {
1274                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1275                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1276                         goto error;
1277                 }
1278                 end = &path[strlen(path)];
1279                 *end++ = '/';
1280                 *end = 0;
1281                 pathleft = &path[sizeof (path)] - end;
1282
1283 #ifdef illumos
1284                 /*
1285                  * Using raw devices instead of block devices when we're
1286                  * reading the labels skips a bunch of slow operations during
1287                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1288                  */
1289                 if (strcmp(path, ZFS_DISK_ROOTD) == 0)
1290                         (void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
1291                 else
1292 #endif
1293                         (void) strlcpy(rdsk, path, sizeof (rdsk));
1294
1295                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1296                     (dirp = fdopendir(dfd)) == NULL) {
1297                         if (dfd >= 0)
1298                                 (void) close(dfd);
1299                         zfs_error_aux(hdl, strerror(errno));
1300                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1301                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1302                             rdsk);
1303                         goto error;
1304                 }
1305
1306                 avl_create(&slice_cache, slice_cache_compare,
1307                     sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1308
1309                 if (strcmp(rdsk, "/dev/") == 0) {
1310                         struct gmesh mesh;
1311                         struct gclass *mp;
1312                         struct ggeom *gp;
1313                         struct gprovider *pp;
1314
1315                         errno = geom_gettree(&mesh);
1316                         if (errno != 0) {
1317                                 zfs_error_aux(hdl, strerror(errno));
1318                                 (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1319                                     dgettext(TEXT_DOMAIN, "cannot get GEOM tree"));
1320                                 goto error;
1321                         }
1322
1323                         LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
1324                                 LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
1325                                         LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
1326                                                 slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1327                                                 slice->rn_name = zfs_strdup(hdl, pp->lg_name);
1328                                                 slice->rn_avl = &slice_cache;
1329                                                 slice->rn_dfd = dfd;
1330                                                 slice->rn_hdl = hdl;
1331                                                 slice->rn_nozpool = B_FALSE;
1332                                                 avl_add(&slice_cache, slice);
1333                                         }
1334                                 }
1335                         }
1336
1337                         geom_deletetree(&mesh);
1338                         goto skipdir;
1339                 }
1340
1341                 /*
1342                  * This is not MT-safe, but we have no MT consumers of libzfs
1343                  */
1344                 while ((dp = readdir64(dirp)) != NULL) {
1345                         const char *name = dp->d_name;
1346                         if (name[0] == '.' &&
1347                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1348                                 continue;
1349
1350                         slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1351                         slice->rn_name = zfs_strdup(hdl, name);
1352                         slice->rn_avl = &slice_cache;
1353                         slice->rn_dfd = dfd;
1354                         slice->rn_hdl = hdl;
1355                         slice->rn_nozpool = B_FALSE;
1356                         avl_add(&slice_cache, slice);
1357                 }
1358 skipdir:
1359                 /*
1360                  * create a thread pool to do all of this in parallel;
1361                  * rn_nozpool is not protected, so this is racy in that
1362                  * multiple tasks could decide that the same slice can
1363                  * not hold a zpool, which is benign.  Also choose
1364                  * double the number of processors; we hold a lot of
1365                  * locks in the kernel, so going beyond this doesn't
1366                  * buy us much.
1367                  */
1368                 t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1369                     0, NULL);
1370                 for (slice = avl_first(&slice_cache); slice;
1371                     (slice = avl_walk(&slice_cache, slice,
1372                     AVL_AFTER)))
1373                         (void) tpool_dispatch(t, zpool_open_func, slice);
1374                 tpool_wait(t);
1375                 tpool_destroy(t);
1376
1377                 cookie = NULL;
1378                 while ((slice = avl_destroy_nodes(&slice_cache,
1379                     &cookie)) != NULL) {
1380                         if (slice->rn_config != NULL && !config_failed) {
1381                                 nvlist_t *config = slice->rn_config;
1382                                 boolean_t matched = B_TRUE;
1383
1384                                 if (iarg->poolname != NULL) {
1385                                         char *pname;
1386
1387                                         matched = nvlist_lookup_string(config,
1388                                             ZPOOL_CONFIG_POOL_NAME,
1389                                             &pname) == 0 &&
1390                                             strcmp(iarg->poolname, pname) == 0;
1391                                 } else if (iarg->guid != 0) {
1392                                         uint64_t this_guid;
1393
1394                                         matched = nvlist_lookup_uint64(config,
1395                                             ZPOOL_CONFIG_POOL_GUID,
1396                                             &this_guid) == 0 &&
1397                                             iarg->guid == this_guid;
1398                                 }
1399                                 if (!matched) {
1400                                         nvlist_free(config);
1401                                 } else {
1402                                         /*
1403                                          * use the non-raw path for the config
1404                                          */
1405                                         (void) strlcpy(end, slice->rn_name,
1406                                             pathleft);
1407                                         if (add_config(hdl, &pools, path,
1408                                             config) != 0)
1409                                                 config_failed = B_TRUE;
1410                                 }
1411                         }
1412                         free(slice->rn_name);
1413                         free(slice);
1414                 }
1415                 avl_destroy(&slice_cache);
1416
1417                 (void) closedir(dirp);
1418
1419                 if (config_failed)
1420                         goto error;
1421         }
1422
1423         ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);
1424
1425 error:
1426         for (pe = pools.pools; pe != NULL; pe = penext) {
1427                 penext = pe->pe_next;
1428                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1429                         venext = ve->ve_next;
1430                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1431                                 cenext = ce->ce_next;
1432                                 nvlist_free(ce->ce_config);
1433                                 free(ce);
1434                         }
1435                         free(ve);
1436                 }
1437                 free(pe);
1438         }
1439
1440         for (ne = pools.names; ne != NULL; ne = nenext) {
1441                 nenext = ne->ne_next;
1442                 free(ne->ne_name);
1443                 free(ne);
1444         }
1445
1446         return (ret);
1447 }
1448
1449 nvlist_t *
1450 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1451 {
1452         importargs_t iarg = { 0 };
1453
1454         iarg.paths = argc;
1455         iarg.path = argv;
1456
1457         return (zpool_find_import_impl(hdl, &iarg));
1458 }
1459
1460 /*
1461  * Given a cache file, return the contents as a list of importable pools.
1462  * poolname or guid (but not both) are provided by the caller when trying
1463  * to import a specific pool.
1464  */
1465 nvlist_t *
1466 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1467     char *poolname, uint64_t guid)
1468 {
1469         char *buf;
1470         int fd;
1471         struct stat64 statbuf;
1472         nvlist_t *raw, *src, *dst;
1473         nvlist_t *pools;
1474         nvpair_t *elem;
1475         char *name;
1476         uint64_t this_guid;
1477         boolean_t active;
1478
1479         verify(poolname == NULL || guid == 0);
1480
1481         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1482                 zfs_error_aux(hdl, "%s", strerror(errno));
1483                 (void) zfs_error(hdl, EZFS_BADCACHE,
1484                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1485                 return (NULL);
1486         }
1487
1488         if (fstat64(fd, &statbuf) != 0) {
1489                 zfs_error_aux(hdl, "%s", strerror(errno));
1490                 (void) close(fd);
1491                 (void) zfs_error(hdl, EZFS_BADCACHE,
1492                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1493                 return (NULL);
1494         }
1495
1496         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1497                 (void) close(fd);
1498                 return (NULL);
1499         }
1500
1501         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1502                 (void) close(fd);
1503                 free(buf);
1504                 (void) zfs_error(hdl, EZFS_BADCACHE,
1505                     dgettext(TEXT_DOMAIN,
1506                     "failed to read cache file contents"));
1507                 return (NULL);
1508         }
1509
1510         (void) close(fd);
1511
1512         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1513                 free(buf);
1514                 (void) zfs_error(hdl, EZFS_BADCACHE,
1515                     dgettext(TEXT_DOMAIN,
1516                     "invalid or corrupt cache file contents"));
1517                 return (NULL);
1518         }
1519
1520         free(buf);
1521
1522         /*
1523          * Go through and get the current state of the pools and refresh their
1524          * state.
1525          */
1526         if (nvlist_alloc(&pools, 0, 0) != 0) {
1527                 (void) no_memory(hdl);
1528                 nvlist_free(raw);
1529                 return (NULL);
1530         }
1531
1532         elem = NULL;
1533         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1534                 src = fnvpair_value_nvlist(elem);
1535
1536                 name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
1537                 if (poolname != NULL && strcmp(poolname, name) != 0)
1538                         continue;
1539
1540                 this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
1541                 if (guid != 0 && guid != this_guid)
1542                         continue;
1543
1544                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1545                         nvlist_free(raw);
1546                         nvlist_free(pools);
1547                         return (NULL);
1548                 }
1549
1550                 if (active)
1551                         continue;
1552
1553                 if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
1554                     cachefile) != 0) {
1555                         (void) no_memory(hdl);
1556                         nvlist_free(raw);
1557                         nvlist_free(pools);
1558                         return (NULL);
1559                 }
1560
1561                 if ((dst = refresh_config(hdl, src)) == NULL) {
1562                         nvlist_free(raw);
1563                         nvlist_free(pools);
1564                         return (NULL);
1565                 }
1566
1567                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1568                         (void) no_memory(hdl);
1569                         nvlist_free(dst);
1570                         nvlist_free(raw);
1571                         nvlist_free(pools);
1572                         return (NULL);
1573                 }
1574                 nvlist_free(dst);
1575         }
1576
1577         nvlist_free(raw);
1578         return (pools);
1579 }
1580
1581 static int
1582 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1583 {
1584         importargs_t *import = data;
1585         int found = 0;
1586
1587         if (import->poolname != NULL) {
1588                 char *pool_name;
1589
1590                 verify(nvlist_lookup_string(zhp->zpool_config,
1591                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1592                 if (strcmp(pool_name, import->poolname) == 0)
1593                         found = 1;
1594         } else {
1595                 uint64_t pool_guid;
1596
1597                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1598                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1599                 if (pool_guid == import->guid)
1600                         found = 1;
1601         }
1602
1603         zpool_close(zhp);
1604         return (found);
1605 }
1606
1607 nvlist_t *
1608 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1609 {
1610         verify(import->poolname == NULL || import->guid == 0);
1611
1612         if (import->unique)
1613                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1614
1615         if (import->cachefile != NULL)
1616                 return (zpool_find_import_cached(hdl, import->cachefile,
1617                     import->poolname, import->guid));
1618
1619         return (zpool_find_import_impl(hdl, import));
1620 }
1621
1622 boolean_t
1623 find_guid(nvlist_t *nv, uint64_t guid)
1624 {
1625         uint64_t tmp;
1626         nvlist_t **child;
1627         uint_t c, children;
1628
1629         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1630         if (tmp == guid)
1631                 return (B_TRUE);
1632
1633         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1634             &child, &children) == 0) {
1635                 for (c = 0; c < children; c++)
1636                         if (find_guid(child[c], guid))
1637                                 return (B_TRUE);
1638         }
1639
1640         return (B_FALSE);
1641 }
1642
1643 typedef struct aux_cbdata {
1644         const char      *cb_type;
1645         uint64_t        cb_guid;
1646         zpool_handle_t  *cb_zhp;
1647 } aux_cbdata_t;
1648
1649 static int
1650 find_aux(zpool_handle_t *zhp, void *data)
1651 {
1652         aux_cbdata_t *cbp = data;
1653         nvlist_t **list;
1654         uint_t i, count;
1655         uint64_t guid;
1656         nvlist_t *nvroot;
1657
1658         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1659             &nvroot) == 0);
1660
1661         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1662             &list, &count) == 0) {
1663                 for (i = 0; i < count; i++) {
1664                         verify(nvlist_lookup_uint64(list[i],
1665                             ZPOOL_CONFIG_GUID, &guid) == 0);
1666                         if (guid == cbp->cb_guid) {
1667                                 cbp->cb_zhp = zhp;
1668                                 return (1);
1669                         }
1670                 }
1671         }
1672
1673         zpool_close(zhp);
1674         return (0);
1675 }
1676
1677 /*
1678  * Determines if the pool is in use.  If so, it returns true and the state of
1679  * the pool as well as the name of the pool.  Both strings are allocated and
1680  * must be freed by the caller.
1681  */
1682 int
1683 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1684     boolean_t *inuse)
1685 {
1686         nvlist_t *config;
1687         char *name;
1688         boolean_t ret;
1689         uint64_t guid, vdev_guid;
1690         zpool_handle_t *zhp;
1691         nvlist_t *pool_config;
1692         uint64_t stateval, isspare;
1693         aux_cbdata_t cb = { 0 };
1694         boolean_t isactive;
1695
1696         *inuse = B_FALSE;
1697
1698         if (zpool_read_label(fd, &config) != 0 && errno == ENOMEM) {
1699                 (void) no_memory(hdl);
1700                 return (-1);
1701         }
1702
1703         if (config == NULL)
1704                 return (0);
1705
1706         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1707             &stateval) == 0);
1708         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1709             &vdev_guid) == 0);
1710
1711         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1712                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1713                     &name) == 0);
1714                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1715                     &guid) == 0);
1716         }
1717
1718         switch (stateval) {
1719         case POOL_STATE_EXPORTED:
1720                 /*
1721                  * A pool with an exported state may in fact be imported
1722                  * read-only, so check the in-core state to see if it's
1723                  * active and imported read-only.  If it is, set
1724                  * its state to active.
1725                  */
1726                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1727                     (zhp = zpool_open_canfail(hdl, name)) != NULL) {
1728                         if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1729                                 stateval = POOL_STATE_ACTIVE;
1730
1731                         /*
1732                          * All we needed the zpool handle for is the
1733                          * readonly prop check.
1734                          */
1735                         zpool_close(zhp);
1736                 }
1737
1738                 ret = B_TRUE;
1739                 break;
1740
1741         case POOL_STATE_ACTIVE:
1742                 /*
1743                  * For an active pool, we have to determine if it's really part
1744                  * of a currently active pool (in which case the pool will exist
1745                  * and the guid will be the same), or whether it's part of an
1746                  * active pool that was disconnected without being explicitly
1747                  * exported.
1748                  */
1749                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1750                         nvlist_free(config);
1751                         return (-1);
1752                 }
1753
1754                 if (isactive) {
1755                         /*
1756                          * Because the device may have been removed while
1757                          * offlined, we only report it as active if the vdev is
1758                          * still present in the config.  Otherwise, pretend like
1759                          * it's not in use.
1760                          */
1761                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1762                             (pool_config = zpool_get_config(zhp, NULL))
1763                             != NULL) {
1764                                 nvlist_t *nvroot;
1765
1766                                 verify(nvlist_lookup_nvlist(pool_config,
1767                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1768                                 ret = find_guid(nvroot, vdev_guid);
1769                         } else {
1770                                 ret = B_FALSE;
1771                         }
1772
1773                         /*
1774                          * If this is an active spare within another pool, we
1775                          * treat it like an unused hot spare.  This allows the
1776                          * user to create a pool with a hot spare that currently
1777                          * in use within another pool.  Since we return B_TRUE,
1778                          * libdiskmgt will continue to prevent generic consumers
1779                          * from using the device.
1780                          */
1781                         if (ret && nvlist_lookup_uint64(config,
1782                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1783                                 stateval = POOL_STATE_SPARE;
1784
1785                         if (zhp != NULL)
1786                                 zpool_close(zhp);
1787                 } else {
1788                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1789                         ret = B_TRUE;
1790                 }
1791                 break;
1792
1793         case POOL_STATE_SPARE:
1794                 /*
1795                  * For a hot spare, it can be either definitively in use, or
1796                  * potentially active.  To determine if it's in use, we iterate
1797                  * over all pools in the system and search for one with a spare
1798                  * with a matching guid.
1799                  *
1800                  * Due to the shared nature of spares, we don't actually report
1801                  * the potentially active case as in use.  This means the user
1802                  * can freely create pools on the hot spares of exported pools,
1803                  * but to do otherwise makes the resulting code complicated, and
1804                  * we end up having to deal with this case anyway.
1805                  */
1806                 cb.cb_zhp = NULL;
1807                 cb.cb_guid = vdev_guid;
1808                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1809                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1810                         name = (char *)zpool_get_name(cb.cb_zhp);
1811                         ret = B_TRUE;
1812                 } else {
1813                         ret = B_FALSE;
1814                 }
1815                 break;
1816
1817         case POOL_STATE_L2CACHE:
1818
1819                 /*
1820                  * Check if any pool is currently using this l2cache device.
1821                  */
1822                 cb.cb_zhp = NULL;
1823                 cb.cb_guid = vdev_guid;
1824                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1825                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1826                         name = (char *)zpool_get_name(cb.cb_zhp);
1827                         ret = B_TRUE;
1828                 } else {
1829                         ret = B_FALSE;
1830                 }
1831                 break;
1832
1833         default:
1834                 ret = B_FALSE;
1835         }
1836
1837
1838         if (ret) {
1839                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1840                         if (cb.cb_zhp)
1841                                 zpool_close(cb.cb_zhp);
1842                         nvlist_free(config);
1843                         return (-1);
1844                 }
1845                 *state = (pool_state_t)stateval;
1846         }
1847
1848         if (cb.cb_zhp)
1849                 zpool_close(cb.cb_zhp);
1850
1851         nvlist_free(config);
1852         *inuse = ret;
1853         return (0);
1854 }