]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / cddl / contrib / opensolaris / lib / libzfs / common / libzfs_import.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25
26 #pragma ident   "%Z%%M% %I%     %E% SMI"
27
28 /*
29  * Pool import support functions.
30  *
31  * To import a pool, we rely on reading the configuration information from the
32  * ZFS label of each device.  If we successfully read the label, then we
33  * organize the configuration information in the following hierarchy:
34  *
35  *      pool guid -> toplevel vdev guid -> label txg
36  *
37  * Duplicate entries matching this same tuple will be discarded.  Once we have
38  * examined every device, we pick the best label txg config for each toplevel
39  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
40  * update any paths that have changed.  Finally, we attempt to import the pool
41  * using our derived config, and record the results.
42  */
43
44 #include <devid.h>
45 #include <dirent.h>
46 #include <errno.h>
47 #include <libintl.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/stat.h>
51 #include <unistd.h>
52 #include <fcntl.h>
53 #include <libgeom.h>
54
55 #include <sys/vdev_impl.h>
56
57 #include "libzfs.h"
58 #include "libzfs_impl.h"
59
60 /*
61  * Intermediate structures used to gather configuration information.
62  */
63 typedef struct config_entry {
64         uint64_t                ce_txg;
65         nvlist_t                *ce_config;
66         struct config_entry     *ce_next;
67 } config_entry_t;
68
69 typedef struct vdev_entry {
70         uint64_t                ve_guid;
71         config_entry_t          *ve_configs;
72         struct vdev_entry       *ve_next;
73 } vdev_entry_t;
74
75 typedef struct pool_entry {
76         uint64_t                pe_guid;
77         vdev_entry_t            *pe_vdevs;
78         struct pool_entry       *pe_next;
79 } pool_entry_t;
80
81 typedef struct name_entry {
82         char                    *ne_name;
83         uint64_t                ne_guid;
84         struct name_entry       *ne_next;
85 } name_entry_t;
86
87 typedef struct pool_list {
88         pool_entry_t            *pools;
89         name_entry_t            *names;
90 } pool_list_t;
91
92 static char *
93 get_devid(const char *path)
94 {
95         int fd;
96         ddi_devid_t devid;
97         char *minor, *ret;
98
99         if ((fd = open(path, O_RDONLY)) < 0)
100                 return (NULL);
101
102         minor = NULL;
103         ret = NULL;
104         if (devid_get(fd, &devid) == 0) {
105                 if (devid_get_minor_name(fd, &minor) == 0)
106                         ret = devid_str_encode(devid, minor);
107                 if (minor != NULL)
108                         devid_str_free(minor);
109                 devid_free(devid);
110         }
111         (void) close(fd);
112
113         return (ret);
114 }
115
116 /*
117  * Go through and fix up any path and/or devid information for the given vdev
118  * configuration.
119  */
120 static int
121 fix_paths(nvlist_t *nv, name_entry_t *names)
122 {
123         nvlist_t **child;
124         uint_t c, children;
125         uint64_t guid;
126         name_entry_t *ne, *best;
127         char *path, *devid;
128         int matched;
129
130         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
131             &child, &children) == 0) {
132                 for (c = 0; c < children; c++)
133                         if (fix_paths(child[c], names) != 0)
134                                 return (-1);
135                 return (0);
136         }
137
138         /*
139          * This is a leaf (file or disk) vdev.  In either case, go through
140          * the name list and see if we find a matching guid.  If so, replace
141          * the path and see if we can calculate a new devid.
142          *
143          * There may be multiple names associated with a particular guid, in
144          * which case we have overlapping slices or multiple paths to the same
145          * disk.  If this is the case, then we want to pick the path that is
146          * the most similar to the original, where "most similar" is the number
147          * of matching characters starting from the end of the path.  This will
148          * preserve slice numbers even if the disks have been reorganized, and
149          * will also catch preferred disk names if multiple paths exist.
150          */
151         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
152         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
153                 path = NULL;
154
155         matched = 0;
156         best = NULL;
157         for (ne = names; ne != NULL; ne = ne->ne_next) {
158                 if (ne->ne_guid == guid) {
159                         const char *src, *dst;
160                         int count;
161
162                         if (path == NULL) {
163                                 best = ne;
164                                 break;
165                         }
166
167                         src = ne->ne_name + strlen(ne->ne_name) - 1;
168                         dst = path + strlen(path) - 1;
169                         for (count = 0; src >= ne->ne_name && dst >= path;
170                             src--, dst--, count++)
171                                 if (*src != *dst)
172                                         break;
173
174                         /*
175                          * At this point, 'count' is the number of characters
176                          * matched from the end.
177                          */
178                         if (count > matched || best == NULL) {
179                                 best = ne;
180                                 matched = count;
181                         }
182                 }
183         }
184
185         if (best == NULL)
186                 return (0);
187
188         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
189                 return (-1);
190
191         if ((devid = get_devid(best->ne_name)) == NULL) {
192                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
193         } else {
194                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
195                         return (-1);
196                 devid_str_free(devid);
197         }
198
199         return (0);
200 }
201
202 /*
203  * Add the given configuration to the list of known devices.
204  */
205 static int
206 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
207     nvlist_t *config)
208 {
209         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
210         pool_entry_t *pe;
211         vdev_entry_t *ve;
212         config_entry_t *ce;
213         name_entry_t *ne;
214
215         /*
216          * If this is a hot spare not currently in use or level 2 cache
217          * device, add it to the list of names to translate, but don't do
218          * anything else.
219          */
220         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
221             &state) == 0 &&
222             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
223             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
224                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
225                         return (-1);
226
227                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
228                         free(ne);
229                         return (-1);
230                 }
231                 ne->ne_guid = vdev_guid;
232                 ne->ne_next = pl->names;
233                 pl->names = ne;
234                 return (0);
235         }
236
237         /*
238          * If we have a valid config but cannot read any of these fields, then
239          * it means we have a half-initialized label.  In vdev_label_init()
240          * we write a label with txg == 0 so that we can identify the device
241          * in case the user refers to the same disk later on.  If we fail to
242          * create the pool, we'll be left with a label in this state
243          * which should not be considered part of a valid pool.
244          */
245         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
246             &pool_guid) != 0 ||
247             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
248             &vdev_guid) != 0 ||
249             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
250             &top_guid) != 0 ||
251             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
252             &txg) != 0 || txg == 0) {
253                 nvlist_free(config);
254                 return (0);
255         }
256
257         /*
258          * First, see if we know about this pool.  If not, then add it to the
259          * list of known pools.
260          */
261         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
262                 if (pe->pe_guid == pool_guid)
263                         break;
264         }
265
266         if (pe == NULL) {
267                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
268                         nvlist_free(config);
269                         return (-1);
270                 }
271                 pe->pe_guid = pool_guid;
272                 pe->pe_next = pl->pools;
273                 pl->pools = pe;
274         }
275
276         /*
277          * Second, see if we know about this toplevel vdev.  Add it if its
278          * missing.
279          */
280         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
281                 if (ve->ve_guid == top_guid)
282                         break;
283         }
284
285         if (ve == NULL) {
286                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
287                         nvlist_free(config);
288                         return (-1);
289                 }
290                 ve->ve_guid = top_guid;
291                 ve->ve_next = pe->pe_vdevs;
292                 pe->pe_vdevs = ve;
293         }
294
295         /*
296          * Third, see if we have a config with a matching transaction group.  If
297          * so, then we do nothing.  Otherwise, add it to the list of known
298          * configs.
299          */
300         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
301                 if (ce->ce_txg == txg)
302                         break;
303         }
304
305         if (ce == NULL) {
306                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
307                         nvlist_free(config);
308                         return (-1);
309                 }
310                 ce->ce_txg = txg;
311                 ce->ce_config = config;
312                 ce->ce_next = ve->ve_configs;
313                 ve->ve_configs = ce;
314         } else {
315                 nvlist_free(config);
316         }
317
318         /*
319          * At this point we've successfully added our config to the list of
320          * known configs.  The last thing to do is add the vdev guid -> path
321          * mappings so that we can fix up the configuration as necessary before
322          * doing the import.
323          */
324         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
325                 return (-1);
326
327         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
328                 free(ne);
329                 return (-1);
330         }
331
332         ne->ne_guid = vdev_guid;
333         ne->ne_next = pl->names;
334         pl->names = ne;
335
336         return (0);
337 }
338
339 /*
340  * Returns true if the named pool matches the given GUID.
341  */
342 static int
343 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
344     boolean_t *isactive)
345 {
346         zpool_handle_t *zhp;
347         uint64_t theguid;
348
349         if (zpool_open_silent(hdl, name, &zhp) != 0)
350                 return (-1);
351
352         if (zhp == NULL) {
353                 *isactive = B_FALSE;
354                 return (0);
355         }
356
357         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
358             &theguid) == 0);
359
360         zpool_close(zhp);
361
362         *isactive = (theguid == guid);
363         return (0);
364 }
365
366 static nvlist_t *
367 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
368 {
369         nvlist_t *nvl;
370         zfs_cmd_t zc = { 0 };
371         int err;
372
373         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
374                 return (NULL);
375
376         if (zcmd_alloc_dst_nvlist(hdl, &zc,
377             zc.zc_nvlist_conf_size * 2) != 0) {
378                 zcmd_free_nvlists(&zc);
379                 return (NULL);
380         }
381
382         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
383             &zc)) != 0 && errno == ENOMEM) {
384                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
385                         zcmd_free_nvlists(&zc);
386                         return (NULL);
387                 }
388         }
389
390         if (err) {
391                 (void) zpool_standard_error(hdl, errno,
392                     dgettext(TEXT_DOMAIN, "cannot discover pools"));
393                 zcmd_free_nvlists(&zc);
394                 return (NULL);
395         }
396
397         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
398                 zcmd_free_nvlists(&zc);
399                 return (NULL);
400         }
401
402         zcmd_free_nvlists(&zc);
403         return (nvl);
404 }
405
406 /*
407  * Convert our list of pools into the definitive set of configurations.  We
408  * start by picking the best config for each toplevel vdev.  Once that's done,
409  * we assemble the toplevel vdevs into a full config for the pool.  We make a
410  * pass to fix up any incorrect paths, and then add it to the main list to
411  * return to the user.
412  */
413 static nvlist_t *
414 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
415 {
416         pool_entry_t *pe;
417         vdev_entry_t *ve;
418         config_entry_t *ce;
419         nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
420         nvlist_t **spares, **l2cache;
421         uint_t i, nspares, nl2cache;
422         boolean_t config_seen;
423         uint64_t best_txg;
424         char *name, *hostname;
425         uint64_t version, guid;
426         uint_t children = 0;
427         nvlist_t **child = NULL;
428         uint_t c;
429         boolean_t isactive;
430         uint64_t hostid;
431         nvlist_t *nvl;
432         boolean_t found_one = B_FALSE;
433
434         if (nvlist_alloc(&ret, 0, 0) != 0)
435                 goto nomem;
436
437         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
438                 uint64_t id;
439
440                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
441                         goto nomem;
442                 config_seen = B_FALSE;
443
444                 /*
445                  * Iterate over all toplevel vdevs.  Grab the pool configuration
446                  * from the first one we find, and then go through the rest and
447                  * add them as necessary to the 'vdevs' member of the config.
448                  */
449                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
450
451                         /*
452                          * Determine the best configuration for this vdev by
453                          * selecting the config with the latest transaction
454                          * group.
455                          */
456                         best_txg = 0;
457                         for (ce = ve->ve_configs; ce != NULL;
458                             ce = ce->ce_next) {
459
460                                 if (ce->ce_txg > best_txg) {
461                                         tmp = ce->ce_config;
462                                         best_txg = ce->ce_txg;
463                                 }
464                         }
465
466                         if (!config_seen) {
467                                 /*
468                                  * Copy the relevant pieces of data to the pool
469                                  * configuration:
470                                  *
471                                  *      version
472                                  *      pool guid
473                                  *      name
474                                  *      pool state
475                                  *      hostid (if available)
476                                  *      hostname (if available)
477                                  */
478                                 uint64_t state;
479
480                                 verify(nvlist_lookup_uint64(tmp,
481                                     ZPOOL_CONFIG_VERSION, &version) == 0);
482                                 if (nvlist_add_uint64(config,
483                                     ZPOOL_CONFIG_VERSION, version) != 0)
484                                         goto nomem;
485                                 verify(nvlist_lookup_uint64(tmp,
486                                     ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
487                                 if (nvlist_add_uint64(config,
488                                     ZPOOL_CONFIG_POOL_GUID, guid) != 0)
489                                         goto nomem;
490                                 verify(nvlist_lookup_string(tmp,
491                                     ZPOOL_CONFIG_POOL_NAME, &name) == 0);
492                                 if (nvlist_add_string(config,
493                                     ZPOOL_CONFIG_POOL_NAME, name) != 0)
494                                         goto nomem;
495                                 verify(nvlist_lookup_uint64(tmp,
496                                     ZPOOL_CONFIG_POOL_STATE, &state) == 0);
497                                 if (nvlist_add_uint64(config,
498                                     ZPOOL_CONFIG_POOL_STATE, state) != 0)
499                                         goto nomem;
500                                 hostid = 0;
501                                 if (nvlist_lookup_uint64(tmp,
502                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
503                                         if (nvlist_add_uint64(config,
504                                             ZPOOL_CONFIG_HOSTID, hostid) != 0)
505                                                 goto nomem;
506                                         verify(nvlist_lookup_string(tmp,
507                                             ZPOOL_CONFIG_HOSTNAME,
508                                             &hostname) == 0);
509                                         if (nvlist_add_string(config,
510                                             ZPOOL_CONFIG_HOSTNAME,
511                                             hostname) != 0)
512                                                 goto nomem;
513                                 }
514
515                                 config_seen = B_TRUE;
516                         }
517
518                         /*
519                          * Add this top-level vdev to the child array.
520                          */
521                         verify(nvlist_lookup_nvlist(tmp,
522                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
523                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
524                             &id) == 0);
525                         if (id >= children) {
526                                 nvlist_t **newchild;
527
528                                 newchild = zfs_alloc(hdl, (id + 1) *
529                                     sizeof (nvlist_t *));
530                                 if (newchild == NULL)
531                                         goto nomem;
532
533                                 for (c = 0; c < children; c++)
534                                         newchild[c] = child[c];
535
536                                 free(child);
537                                 child = newchild;
538                                 children = id + 1;
539                         }
540                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
541                                 goto nomem;
542
543                 }
544
545                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
546                     &guid) == 0);
547
548                 /*
549                  * Look for any missing top-level vdevs.  If this is the case,
550                  * create a faked up 'missing' vdev as a placeholder.  We cannot
551                  * simply compress the child array, because the kernel performs
552                  * certain checks to make sure the vdev IDs match their location
553                  * in the configuration.
554                  */
555                 for (c = 0; c < children; c++)
556                         if (child[c] == NULL) {
557                                 nvlist_t *missing;
558                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
559                                     0) != 0)
560                                         goto nomem;
561                                 if (nvlist_add_string(missing,
562                                     ZPOOL_CONFIG_TYPE,
563                                     VDEV_TYPE_MISSING) != 0 ||
564                                     nvlist_add_uint64(missing,
565                                     ZPOOL_CONFIG_ID, c) != 0 ||
566                                     nvlist_add_uint64(missing,
567                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
568                                         nvlist_free(missing);
569                                         goto nomem;
570                                 }
571                                 child[c] = missing;
572                         }
573
574                 /*
575                  * Put all of this pool's top-level vdevs into a root vdev.
576                  */
577                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
578                         goto nomem;
579                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
580                     VDEV_TYPE_ROOT) != 0 ||
581                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
582                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
583                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
584                     child, children) != 0) {
585                         nvlist_free(nvroot);
586                         goto nomem;
587                 }
588
589                 for (c = 0; c < children; c++)
590                         nvlist_free(child[c]);
591                 free(child);
592                 children = 0;
593                 child = NULL;
594
595                 /*
596                  * Go through and fix up any paths and/or devids based on our
597                  * known list of vdev GUID -> path mappings.
598                  */
599                 if (fix_paths(nvroot, pl->names) != 0) {
600                         nvlist_free(nvroot);
601                         goto nomem;
602                 }
603
604                 /*
605                  * Add the root vdev to this pool's configuration.
606                  */
607                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
608                     nvroot) != 0) {
609                         nvlist_free(nvroot);
610                         goto nomem;
611                 }
612                 nvlist_free(nvroot);
613
614                 /*
615                  * zdb uses this path to report on active pools that were
616                  * imported or created using -R.
617                  */
618                 if (active_ok)
619                         goto add_pool;
620
621                 /*
622                  * Determine if this pool is currently active, in which case we
623                  * can't actually import it.
624                  */
625                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
626                     &name) == 0);
627                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
628                     &guid) == 0);
629
630                 if (pool_active(hdl, name, guid, &isactive) != 0)
631                         goto error;
632
633                 if (isactive) {
634                         nvlist_free(config);
635                         config = NULL;
636                         continue;
637                 }
638
639                 if ((nvl = refresh_config(hdl, config)) == NULL)
640                         goto error;
641
642                 nvlist_free(config);
643                 config = nvl;
644
645                 /*
646                  * Go through and update the paths for spares, now that we have
647                  * them.
648                  */
649                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
650                     &nvroot) == 0);
651                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
652                     &spares, &nspares) == 0) {
653                         for (i = 0; i < nspares; i++) {
654                                 if (fix_paths(spares[i], pl->names) != 0)
655                                         goto nomem;
656                         }
657                 }
658
659                 /*
660                  * Update the paths for l2cache devices.
661                  */
662                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
663                     &l2cache, &nl2cache) == 0) {
664                         for (i = 0; i < nl2cache; i++) {
665                                 if (fix_paths(l2cache[i], pl->names) != 0)
666                                         goto nomem;
667                         }
668                 }
669
670                 /*
671                  * Restore the original information read from the actual label.
672                  */
673                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
674                     DATA_TYPE_UINT64);
675                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
676                     DATA_TYPE_STRING);
677                 if (hostid != 0) {
678                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
679                             hostid) == 0);
680                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
681                             hostname) == 0);
682                 }
683
684 add_pool:
685                 /*
686                  * Add this pool to the list of configs.
687                  */
688                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
689                     &name) == 0);
690                 if (nvlist_add_nvlist(ret, name, config) != 0)
691                         goto nomem;
692
693                 found_one = B_TRUE;
694                 nvlist_free(config);
695                 config = NULL;
696         }
697
698         if (!found_one) {
699                 nvlist_free(ret);
700                 ret = NULL;
701         }
702
703         return (ret);
704
705 nomem:
706         (void) no_memory(hdl);
707 error:
708         nvlist_free(config);
709         nvlist_free(ret);
710         for (c = 0; c < children; c++)
711                 nvlist_free(child[c]);
712         free(child);
713
714         return (NULL);
715 }
716
717 /*
718  * Return the offset of the given label.
719  */
720 static uint64_t
721 label_offset(uint64_t size, int l)
722 {
723         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
724         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
725             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
726 }
727
728 /*
729  * Given a file descriptor, read the label information and return an nvlist
730  * describing the configuration, if there is one.
731  */
732 int
733 zpool_read_label(int fd, nvlist_t **config)
734 {
735         struct stat64 statbuf;
736         int l;
737         vdev_label_t *label;
738         uint64_t state, txg, size;
739
740         *config = NULL;
741
742         if (fstat64(fd, &statbuf) == -1)
743                 return (0);
744         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
745
746         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
747                 return (-1);
748
749         for (l = 0; l < VDEV_LABELS; l++) {
750                 if (pread64(fd, label, sizeof (vdev_label_t),
751                     label_offset(size, l)) != sizeof (vdev_label_t))
752                         continue;
753
754                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
755                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
756                         continue;
757
758                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
759                     &state) != 0 || state > POOL_STATE_L2CACHE) {
760                         nvlist_free(*config);
761                         continue;
762                 }
763
764                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
765                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
766                     &txg) != 0 || txg == 0)) {
767                         nvlist_free(*config);
768                         continue;
769                 }
770
771                 free(label);
772                 return (0);
773         }
774
775         free(label);
776         *config = NULL;
777         return (0);
778 }
779
780 static int
781 geom_find_import(libzfs_handle_t *hdl, pool_list_t *pools)
782 {
783         char path[MAXPATHLEN];
784         struct gmesh mesh;
785         struct gclass *mp;
786         struct ggeom *gp;
787         struct gprovider *pp;
788         nvlist_t *config;
789         int fd, ret = 0;
790
791         /*
792          * Go through and read the label configuration information from every
793          * GEOM provider, organizing the information according to pool GUID
794          * and toplevel GUID.
795          */
796
797         fd = geom_gettree(&mesh);
798         assert(fd == 0);
799
800         LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
801                 LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
802                         LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
803                                 if ((fd = g_open(pp->lg_name, 0)) < 0)
804                                         continue;
805
806                                 (void) snprintf(path, sizeof (path), "%s%s",
807                                     _PATH_DEV, pp->lg_name);
808
809                                 if ((zpool_read_label(fd, &config)) != 0) {
810                                         (void) g_close(fd);
811                                         (void) no_memory(hdl);
812                                         goto error;
813                                 }
814
815                                 (void) g_close(fd);
816
817                                 if (config == NULL)
818                                         continue;
819
820                                 if (add_config(hdl, pools, path, config) != 0) {
821                                         ret = -1;
822                                         goto error;
823                                 }
824                         }
825                 }
826         }
827 error:
828         geom_deletetree(&mesh);
829         return (ret);
830 }
831
832 /*
833  * Given a list of directories to search, find all pools stored on disk.  This
834  * includes partial pools which are not available to import.  If no args are
835  * given (argc is 0), then the default directory (/dev/dsk) is searched.
836  * poolname or guid (but not both) are provided by the caller when trying
837  * to import a specific pool.
838  */
839 static nvlist_t *
840 zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
841     boolean_t active_ok, char *poolname, uint64_t guid)
842 {
843         int i;
844         DIR *dirp = NULL;
845         struct dirent64 *dp;
846         char path[MAXPATHLEN];
847         char *end;
848         size_t pathleft;
849         struct stat64 statbuf;
850         nvlist_t *ret = NULL, *config;
851         static char *default_dir = "/dev/dsk";
852         int fd;
853         pool_list_t pools = { 0 };
854         pool_entry_t *pe, *penext;
855         vdev_entry_t *ve, *venext;
856         config_entry_t *ce, *cenext;
857         name_entry_t *ne, *nenext;
858
859         verify(poolname == NULL || guid == 0);
860
861         if (argc == 0) {
862                 argc = 1;
863                 argv = &default_dir;
864         }
865
866         /*
867          * Go through and read the label configuration information from every
868          * possible device, organizing the information according to pool GUID
869          * and toplevel GUID.
870          */
871         for (i = 0; i < argc; i++) {
872                 char *rdsk;
873                 int dfd;
874
875                 /* use realpath to normalize the path */
876                 if (realpath(argv[i], path) == 0) {
877                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
878                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
879                             argv[i]);
880                         goto error;
881                 }
882                 end = &path[strlen(path)];
883                 *end++ = '/';
884                 *end = 0;
885                 pathleft = &path[sizeof (path)] - end;
886
887                 if (strcmp(argv[i], default_dir) == 0) {
888                         geom_find_import(hdl, &pools);
889                         continue;
890                 }
891
892                 /*
893                  * Using raw devices instead of block devices when we're
894                  * reading the labels skips a bunch of slow operations during
895                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
896                  */
897                 if (strcmp(path, "/dev/dsk/") == 0)
898                         rdsk = "/dev/rdsk/";
899                 else
900                         rdsk = path;
901
902                 if ((dirp = opendir(rdsk)) == NULL) {
903                         zfs_error_aux(hdl, strerror(errno));
904                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
905                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
906                             rdsk);
907                         goto error;
908                 }
909
910                 /*
911                  * This is not MT-safe, but we have no MT consumers of libzfs
912                  */
913                 while ((dp = readdir64(dirp)) != NULL) {
914                         const char *name = dp->d_name;
915                         if (name[0] == '.' &&
916                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
917                                 continue;
918
919                         (void) snprintf(path, sizeof (path), "%s/%s", rdsk,
920                             dp->d_name);
921
922                         if ((fd = open64(path, O_RDONLY)) < 0)
923                                 continue;
924
925                         /*
926                          * Ignore failed stats.  We only want regular
927                          * files, character devs and block devs.
928                          */
929                         if (fstat64(fd, &statbuf) != 0 ||
930                             (!S_ISREG(statbuf.st_mode) &&
931                             !S_ISCHR(statbuf.st_mode) &&
932                             !S_ISBLK(statbuf.st_mode))) {
933                                 (void) close(fd);
934                                 continue;
935                         }
936
937                         if ((zpool_read_label(fd, &config)) != 0) {
938                                 (void) close(fd);
939                                 (void) no_memory(hdl);
940                                 goto error;
941                         }
942
943                         (void) close(fd);
944
945                         if (config != NULL) {
946                                 boolean_t matched = B_TRUE;
947
948                                 if (poolname != NULL) {
949                                         char *pname;
950
951                                         matched = nvlist_lookup_string(config,
952                                             ZPOOL_CONFIG_POOL_NAME,
953                                             &pname) == 0 &&
954                                             strcmp(poolname, pname) == 0;
955                                 } else if (guid != 0) {
956                                         uint64_t this_guid;
957
958                                         matched = nvlist_lookup_uint64(config,
959                                             ZPOOL_CONFIG_POOL_GUID,
960                                             &this_guid) == 0 &&
961                                             guid == this_guid;
962                                 }
963                                 if (!matched) {
964                                         nvlist_free(config);
965                                         config = NULL;
966                                         continue;
967                                 }
968                                 /* use the non-raw path for the config */
969                                 (void) strlcpy(end, name, pathleft);
970                                 if (add_config(hdl, &pools, path, config) != 0)
971                                         goto error;
972                         }
973                 }
974
975                 (void) closedir(dirp);
976                 dirp = NULL;
977         }
978
979         ret = get_configs(hdl, &pools, active_ok);
980
981 error:
982         for (pe = pools.pools; pe != NULL; pe = penext) {
983                 penext = pe->pe_next;
984                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
985                         venext = ve->ve_next;
986                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
987                                 cenext = ce->ce_next;
988                                 if (ce->ce_config)
989                                         nvlist_free(ce->ce_config);
990                                 free(ce);
991                         }
992                         free(ve);
993                 }
994                 free(pe);
995         }
996
997         for (ne = pools.names; ne != NULL; ne = nenext) {
998                 nenext = ne->ne_next;
999                 if (ne->ne_name)
1000                         free(ne->ne_name);
1001                 free(ne);
1002         }
1003
1004         if (dirp)
1005                 (void) closedir(dirp);
1006
1007         return (ret);
1008 }
1009
1010 nvlist_t *
1011 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1012 {
1013         return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, 0));
1014 }
1015
1016 nvlist_t *
1017 zpool_find_import_byname(libzfs_handle_t *hdl, int argc, char **argv,
1018     char *pool)
1019 {
1020         return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, pool, 0));
1021 }
1022
1023 nvlist_t *
1024 zpool_find_import_byguid(libzfs_handle_t *hdl, int argc, char **argv,
1025     uint64_t guid)
1026 {
1027         return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, guid));
1028 }
1029
1030 nvlist_t *
1031 zpool_find_import_activeok(libzfs_handle_t *hdl, int argc, char **argv)
1032 {
1033         return (zpool_find_import_impl(hdl, argc, argv, B_TRUE, NULL, 0));
1034 }
1035
1036 /*
1037  * Given a cache file, return the contents as a list of importable pools.
1038  * poolname or guid (but not both) are provided by the caller when trying
1039  * to import a specific pool.
1040  */
1041 nvlist_t *
1042 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1043     char *poolname, uint64_t guid)
1044 {
1045         char *buf;
1046         int fd;
1047         struct stat64 statbuf;
1048         nvlist_t *raw, *src, *dst;
1049         nvlist_t *pools;
1050         nvpair_t *elem;
1051         char *name;
1052         uint64_t this_guid;
1053         boolean_t active;
1054
1055         verify(poolname == NULL || guid == 0);
1056
1057         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1058                 zfs_error_aux(hdl, "%s", strerror(errno));
1059                 (void) zfs_error(hdl, EZFS_BADCACHE,
1060                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1061                 return (NULL);
1062         }
1063
1064         if (fstat64(fd, &statbuf) != 0) {
1065                 zfs_error_aux(hdl, "%s", strerror(errno));
1066                 (void) close(fd);
1067                 (void) zfs_error(hdl, EZFS_BADCACHE,
1068                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1069                 return (NULL);
1070         }
1071
1072         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1073                 (void) close(fd);
1074                 return (NULL);
1075         }
1076
1077         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1078                 (void) close(fd);
1079                 free(buf);
1080                 (void) zfs_error(hdl, EZFS_BADCACHE,
1081                     dgettext(TEXT_DOMAIN,
1082                     "failed to read cache file contents"));
1083                 return (NULL);
1084         }
1085
1086         (void) close(fd);
1087
1088         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1089                 free(buf);
1090                 (void) zfs_error(hdl, EZFS_BADCACHE,
1091                     dgettext(TEXT_DOMAIN,
1092                     "invalid or corrupt cache file contents"));
1093                 return (NULL);
1094         }
1095
1096         free(buf);
1097
1098         /*
1099          * Go through and get the current state of the pools and refresh their
1100          * state.
1101          */
1102         if (nvlist_alloc(&pools, 0, 0) != 0) {
1103                 (void) no_memory(hdl);
1104                 nvlist_free(raw);
1105                 return (NULL);
1106         }
1107
1108         elem = NULL;
1109         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1110                 verify(nvpair_value_nvlist(elem, &src) == 0);
1111
1112                 verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1113                     &name) == 0);
1114                 if (poolname != NULL && strcmp(poolname, name) != 0)
1115                         continue;
1116
1117                 verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1118                     &this_guid) == 0);
1119                 if (guid != 0) {
1120                         verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1121                             &this_guid) == 0);
1122                         if (guid != this_guid)
1123                                 continue;
1124                 }
1125
1126                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1127                         nvlist_free(raw);
1128                         nvlist_free(pools);
1129                         return (NULL);
1130                 }
1131
1132                 if (active)
1133                         continue;
1134
1135                 if ((dst = refresh_config(hdl, src)) == NULL) {
1136                         nvlist_free(raw);
1137                         nvlist_free(pools);
1138                         return (NULL);
1139                 }
1140
1141                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1142                         (void) no_memory(hdl);
1143                         nvlist_free(dst);
1144                         nvlist_free(raw);
1145                         nvlist_free(pools);
1146                         return (NULL);
1147                 }
1148                 nvlist_free(dst);
1149         }
1150
1151         nvlist_free(raw);
1152         return (pools);
1153 }
1154
1155
1156 boolean_t
1157 find_guid(nvlist_t *nv, uint64_t guid)
1158 {
1159         uint64_t tmp;
1160         nvlist_t **child;
1161         uint_t c, children;
1162
1163         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1164         if (tmp == guid)
1165                 return (B_TRUE);
1166
1167         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1168             &child, &children) == 0) {
1169                 for (c = 0; c < children; c++)
1170                         if (find_guid(child[c], guid))
1171                                 return (B_TRUE);
1172         }
1173
1174         return (B_FALSE);
1175 }
1176
1177 typedef struct aux_cbdata {
1178         const char      *cb_type;
1179         uint64_t        cb_guid;
1180         zpool_handle_t  *cb_zhp;
1181 } aux_cbdata_t;
1182
1183 static int
1184 find_aux(zpool_handle_t *zhp, void *data)
1185 {
1186         aux_cbdata_t *cbp = data;
1187         nvlist_t **list;
1188         uint_t i, count;
1189         uint64_t guid;
1190         nvlist_t *nvroot;
1191
1192         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1193             &nvroot) == 0);
1194
1195         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1196             &list, &count) == 0) {
1197                 for (i = 0; i < count; i++) {
1198                         verify(nvlist_lookup_uint64(list[i],
1199                             ZPOOL_CONFIG_GUID, &guid) == 0);
1200                         if (guid == cbp->cb_guid) {
1201                                 cbp->cb_zhp = zhp;
1202                                 return (1);
1203                         }
1204                 }
1205         }
1206
1207         zpool_close(zhp);
1208         return (0);
1209 }
1210
1211 /*
1212  * Determines if the pool is in use.  If so, it returns true and the state of
1213  * the pool as well as the name of the pool.  Both strings are allocated and
1214  * must be freed by the caller.
1215  */
1216 int
1217 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1218     boolean_t *inuse)
1219 {
1220         nvlist_t *config;
1221         char *name;
1222         boolean_t ret;
1223         uint64_t guid, vdev_guid;
1224         zpool_handle_t *zhp;
1225         nvlist_t *pool_config;
1226         uint64_t stateval, isspare;
1227         aux_cbdata_t cb = { 0 };
1228         boolean_t isactive;
1229
1230         *inuse = B_FALSE;
1231
1232         if (zpool_read_label(fd, &config) != 0) {
1233                 (void) no_memory(hdl);
1234                 return (-1);
1235         }
1236
1237         if (config == NULL)
1238                 return (0);
1239
1240         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1241             &stateval) == 0);
1242         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1243             &vdev_guid) == 0);
1244
1245         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1246                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1247                     &name) == 0);
1248                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1249                     &guid) == 0);
1250         }
1251
1252         switch (stateval) {
1253         case POOL_STATE_EXPORTED:
1254                 ret = B_TRUE;
1255                 break;
1256
1257         case POOL_STATE_ACTIVE:
1258                 /*
1259                  * For an active pool, we have to determine if it's really part
1260                  * of a currently active pool (in which case the pool will exist
1261                  * and the guid will be the same), or whether it's part of an
1262                  * active pool that was disconnected without being explicitly
1263                  * exported.
1264                  */
1265                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1266                         nvlist_free(config);
1267                         return (-1);
1268                 }
1269
1270                 if (isactive) {
1271                         /*
1272                          * Because the device may have been removed while
1273                          * offlined, we only report it as active if the vdev is
1274                          * still present in the config.  Otherwise, pretend like
1275                          * it's not in use.
1276                          */
1277                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1278                             (pool_config = zpool_get_config(zhp, NULL))
1279                             != NULL) {
1280                                 nvlist_t *nvroot;
1281
1282                                 verify(nvlist_lookup_nvlist(pool_config,
1283                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1284                                 ret = find_guid(nvroot, vdev_guid);
1285                         } else {
1286                                 ret = B_FALSE;
1287                         }
1288
1289                         /*
1290                          * If this is an active spare within another pool, we
1291                          * treat it like an unused hot spare.  This allows the
1292                          * user to create a pool with a hot spare that currently
1293                          * in use within another pool.  Since we return B_TRUE,
1294                          * libdiskmgt will continue to prevent generic consumers
1295                          * from using the device.
1296                          */
1297                         if (ret && nvlist_lookup_uint64(config,
1298                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1299                                 stateval = POOL_STATE_SPARE;
1300
1301                         if (zhp != NULL)
1302                                 zpool_close(zhp);
1303                 } else {
1304                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1305                         ret = B_TRUE;
1306                 }
1307                 break;
1308
1309         case POOL_STATE_SPARE:
1310                 /*
1311                  * For a hot spare, it can be either definitively in use, or
1312                  * potentially active.  To determine if it's in use, we iterate
1313                  * over all pools in the system and search for one with a spare
1314                  * with a matching guid.
1315                  *
1316                  * Due to the shared nature of spares, we don't actually report
1317                  * the potentially active case as in use.  This means the user
1318                  * can freely create pools on the hot spares of exported pools,
1319                  * but to do otherwise makes the resulting code complicated, and
1320                  * we end up having to deal with this case anyway.
1321                  */
1322                 cb.cb_zhp = NULL;
1323                 cb.cb_guid = vdev_guid;
1324                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1325                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1326                         name = (char *)zpool_get_name(cb.cb_zhp);
1327                         ret = TRUE;
1328                 } else {
1329                         ret = FALSE;
1330                 }
1331                 break;
1332
1333         case POOL_STATE_L2CACHE:
1334
1335                 /*
1336                  * Check if any pool is currently using this l2cache device.
1337                  */
1338                 cb.cb_zhp = NULL;
1339                 cb.cb_guid = vdev_guid;
1340                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1341                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1342                         name = (char *)zpool_get_name(cb.cb_zhp);
1343                         ret = TRUE;
1344                 } else {
1345                         ret = FALSE;
1346                 }
1347                 break;
1348
1349         default:
1350                 ret = B_FALSE;
1351         }
1352
1353
1354         if (ret) {
1355                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1356                         if (cb.cb_zhp)
1357                                 zpool_close(cb.cb_zhp);
1358                         nvlist_free(config);
1359                         return (-1);
1360                 }
1361                 *state = (pool_state_t)stateval;
1362         }
1363
1364         if (cb.cb_zhp)
1365                 zpool_close(cb.cb_zhp);
1366
1367         nvlist_free(config);
1368         *inuse = ret;
1369         return (0);
1370 }