2 * Copyright (c) 2007 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 * Stand-alone ZFS file reader.
34 #include <sys/endian.h>
36 #include <sys/stdint.h>
38 #include <machine/_inttypes.h>
49 static struct zfsmount zfsmount __unused;
52 * The indirect_child_t represents the vdev that we will read from, when we
53 * need to read all copies of the data (e.g. for scrub or reconstruction).
54 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
55 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
56 * ic_vdev is a child of the mirror.
58 typedef struct indirect_child {
64 * The indirect_split_t represents one mapped segment of an i/o to the
65 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
66 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
67 * For split blocks, there will be several of these.
69 typedef struct indirect_split {
70 list_node_t is_node; /* link on iv_splits */
73 * is_split_offset is the offset into the i/o.
74 * This is the sum of the previous splits' is_size's.
76 uint64_t is_split_offset;
78 vdev_t *is_vdev; /* top-level vdev */
79 uint64_t is_target_offset; /* offset on is_vdev */
81 int is_children; /* number of entries in is_child[] */
84 * is_good_child is the child that we are currently using to
85 * attempt reconstruction.
89 indirect_child_t is_child[1]; /* variable-length */
93 * The indirect_vsd_t is associated with each i/o to the indirect vdev.
94 * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
96 typedef struct indirect_vsd {
97 boolean_t iv_split_block;
98 boolean_t iv_reconstruct;
100 list_t iv_splits; /* list of indirect_split_t's */
104 * List of all vdevs, chained through v_alllink.
106 static vdev_list_t zfs_vdevs;
109 * List of ZFS features supported for read
111 static const char *features_for_read[] = {
112 "org.illumos:lz4_compress",
113 "com.delphix:hole_birth",
114 "com.delphix:extensible_dataset",
115 "com.delphix:embedded_data",
116 "org.open-zfs:large_blocks",
117 "org.illumos:sha512",
119 "org.zfsonlinux:large_dnode",
120 "com.joyent:multi_vdev_crash_dump",
121 "com.delphix:spacemap_histogram",
122 "com.delphix:zpool_checkpoint",
123 "com.delphix:spacemap_v2",
124 "com.datto:encryption",
125 "org.zfsonlinux:allocation_classes",
126 "com.datto:resilver_defer",
127 "com.delphix:device_removal",
128 "com.delphix:obsolete_counts",
129 "com.intel:allocation_classes",
130 "org.freebsd:zstd_compress",
131 "com.datto:encryption",
136 * List of all pools, chained through spa_link.
138 static spa_list_t zfs_pools;
140 static const dnode_phys_t *dnode_cache_obj;
141 static uint64_t dnode_cache_bn;
142 static char *dnode_cache_buf;
144 static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf);
145 static int zfs_get_root(const spa_t *spa, uint64_t *objid);
146 static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result);
147 static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode,
148 const char *name, uint64_t integer_size, uint64_t num_integers,
150 static int objset_get_dnode(const spa_t *, const objset_phys_t *, uint64_t,
152 static int dnode_read(const spa_t *, const dnode_phys_t *, off_t, void *,
154 static int vdev_indirect_read(vdev_t *, const blkptr_t *, void *, off_t,
156 static int vdev_mirror_read(vdev_t *, const blkptr_t *, void *, off_t, size_t);
157 vdev_indirect_mapping_t *vdev_indirect_mapping_open(spa_t *, objset_phys_t *,
159 vdev_indirect_mapping_entry_phys_t *
160 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *, uint64_t,
161 uint64_t, uint64_t *);
166 STAILQ_INIT(&zfs_vdevs);
167 STAILQ_INIT(&zfs_pools);
169 dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE);
175 nvlist_check_features_for_read(nvlist_t *nvl)
177 nvlist_t *features = NULL;
180 nv_string_t *nvp_name;
183 rc = nvlist_find(nvl, ZPOOL_CONFIG_FEATURES_FOR_READ,
184 DATA_TYPE_NVLIST, NULL, &features, NULL);
188 data = (nvs_data_t *)features->nv_data;
189 nvp = &data->nvl_pair; /* first pair in nvlist */
191 while (nvp->encoded_size != 0 && nvp->decoded_size != 0) {
194 nvp_name = (nv_string_t *)((uintptr_t)nvp + sizeof(*nvp));
197 for (i = 0; features_for_read[i] != NULL; i++) {
198 if (memcmp(nvp_name->nv_data, features_for_read[i],
199 nvp_name->nv_size) == 0) {
206 printf("ZFS: unsupported feature: %.*s\n",
207 nvp_name->nv_size, nvp_name->nv_data);
210 nvp = (nvp_header_t *)((uint8_t *)nvp + nvp->encoded_size);
212 nvlist_destroy(features);
218 vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf,
219 off_t offset, size_t size)
224 if (!vdev->v_phys_read)
228 psize = BP_GET_PSIZE(bp);
233 rc = vdev->v_phys_read(vdev, vdev->v_read_priv, offset, buf, psize);
236 rc = zio_checksum_verify(vdev->v_spa, bp, buf);
242 typedef struct remap_segment {
246 uint64_t rs_split_offset;
250 static remap_segment_t *
251 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
253 remap_segment_t *rs = malloc(sizeof (remap_segment_t));
257 rs->rs_offset = offset;
258 rs->rs_asize = asize;
259 rs->rs_split_offset = split_offset;
265 vdev_indirect_mapping_t *
266 vdev_indirect_mapping_open(spa_t *spa, objset_phys_t *os,
267 uint64_t mapping_object)
269 vdev_indirect_mapping_t *vim;
270 vdev_indirect_mapping_phys_t *vim_phys;
273 vim = calloc(1, sizeof (*vim));
277 vim->vim_dn = calloc(1, sizeof (*vim->vim_dn));
278 if (vim->vim_dn == NULL) {
283 rc = objset_get_dnode(spa, os, mapping_object, vim->vim_dn);
291 vim->vim_phys = malloc(sizeof (*vim->vim_phys));
292 if (vim->vim_phys == NULL) {
298 vim_phys = (vdev_indirect_mapping_phys_t *)DN_BONUS(vim->vim_dn);
299 *vim->vim_phys = *vim_phys;
301 vim->vim_objset = os;
302 vim->vim_object = mapping_object;
303 vim->vim_entries = NULL;
305 vim->vim_havecounts =
306 (vim->vim_dn->dn_bonuslen > VDEV_INDIRECT_MAPPING_SIZE_V0);
312 * Compare an offset with an indirect mapping entry; there are three
313 * possible scenarios:
315 * 1. The offset is "less than" the mapping entry; meaning the
316 * offset is less than the source offset of the mapping entry. In
317 * this case, there is no overlap between the offset and the
318 * mapping entry and -1 will be returned.
320 * 2. The offset is "greater than" the mapping entry; meaning the
321 * offset is greater than the mapping entry's source offset plus
322 * the entry's size. In this case, there is no overlap between
323 * the offset and the mapping entry and 1 will be returned.
325 * NOTE: If the offset is actually equal to the entry's offset
326 * plus size, this is considered to be "greater" than the entry,
327 * and this case applies (i.e. 1 will be returned). Thus, the
328 * entry's "range" can be considered to be inclusive at its
329 * start, but exclusive at its end: e.g. [src, src + size).
331 * 3. The last case to consider is if the offset actually falls
332 * within the mapping entry's range. If this is the case, the
333 * offset is considered to be "equal to" the mapping entry and
334 * 0 will be returned.
336 * NOTE: If the offset is equal to the entry's source offset,
337 * this case applies and 0 will be returned. If the offset is
338 * equal to the entry's source plus its size, this case does
339 * *not* apply (see "NOTE" above for scenario 2), and 1 will be
343 dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem)
345 const uint64_t *key = v_key;
346 const vdev_indirect_mapping_entry_phys_t *array_elem =
348 uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem);
350 if (*key < src_offset) {
352 } else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) {
360 * Return array entry.
362 static vdev_indirect_mapping_entry_phys_t *
363 vdev_indirect_mapping_entry(vdev_indirect_mapping_t *vim, uint64_t index)
369 if (vim->vim_phys->vimp_num_entries == 0)
372 if (vim->vim_entries == NULL) {
375 bsize = vim->vim_dn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
376 size = vim->vim_phys->vimp_num_entries *
377 sizeof (*vim->vim_entries);
379 size = bsize / sizeof (*vim->vim_entries);
380 size *= sizeof (*vim->vim_entries);
382 vim->vim_entries = malloc(size);
383 if (vim->vim_entries == NULL)
385 vim->vim_num_entries = size / sizeof (*vim->vim_entries);
386 offset = index * sizeof (*vim->vim_entries);
389 /* We have data in vim_entries */
391 if (index >= vim->vim_entry_offset &&
392 index <= vim->vim_entry_offset + vim->vim_num_entries) {
393 index -= vim->vim_entry_offset;
394 return (&vim->vim_entries[index]);
396 offset = index * sizeof (*vim->vim_entries);
399 vim->vim_entry_offset = index;
400 size = vim->vim_num_entries * sizeof (*vim->vim_entries);
401 rc = dnode_read(vim->vim_spa, vim->vim_dn, offset, vim->vim_entries,
404 /* Read error, invalidate vim_entries. */
405 free(vim->vim_entries);
406 vim->vim_entries = NULL;
409 index -= vim->vim_entry_offset;
410 return (&vim->vim_entries[index]);
414 * Returns the mapping entry for the given offset.
416 * It's possible that the given offset will not be in the mapping table
417 * (i.e. no mapping entries contain this offset), in which case, the
418 * return value value depends on the "next_if_missing" parameter.
420 * If the offset is not found in the table and "next_if_missing" is
421 * B_FALSE, then NULL will always be returned. The behavior is intended
422 * to allow consumers to get the entry corresponding to the offset
423 * parameter, iff the offset overlaps with an entry in the table.
425 * If the offset is not found in the table and "next_if_missing" is
426 * B_TRUE, then the entry nearest to the given offset will be returned,
427 * such that the entry's source offset is greater than the offset
428 * passed in (i.e. the "next" mapping entry in the table is returned, if
429 * the offset is missing from the table). If there are no entries whose
430 * source offset is greater than the passed in offset, NULL is returned.
432 static vdev_indirect_mapping_entry_phys_t *
433 vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim,
436 ASSERT(vim->vim_phys->vimp_num_entries > 0);
438 vdev_indirect_mapping_entry_phys_t *entry;
440 uint64_t last = vim->vim_phys->vimp_num_entries - 1;
444 * We don't define these inside of the while loop because we use
445 * their value in the case that offset isn't in the mapping.
450 while (last >= base) {
451 mid = base + ((last - base) >> 1);
453 entry = vdev_indirect_mapping_entry(vim, mid);
456 result = dva_mapping_overlap_compare(&offset, entry);
460 } else if (result < 0) {
470 * Given an indirect vdev and an extent on that vdev, it duplicates the
471 * physical entries of the indirect mapping that correspond to the extent
472 * to a new array and returns a pointer to it. In addition, copied_entries
473 * is populated with the number of mapping entries that were duplicated.
475 * Finally, since we are doing an allocation, it is up to the caller to
476 * free the array allocated in this function.
478 vdev_indirect_mapping_entry_phys_t *
479 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
480 uint64_t asize, uint64_t *copied_entries)
482 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
483 vdev_indirect_mapping_t *vim = vd->v_mapping;
484 uint64_t entries = 0;
486 vdev_indirect_mapping_entry_phys_t *first_mapping =
487 vdev_indirect_mapping_entry_for_offset(vim, offset);
488 ASSERT3P(first_mapping, !=, NULL);
490 vdev_indirect_mapping_entry_phys_t *m = first_mapping;
492 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
493 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
494 uint64_t inner_size = MIN(asize, size - inner_offset);
496 offset += inner_size;
502 size_t copy_length = entries * sizeof (*first_mapping);
503 duplicate_mappings = malloc(copy_length);
504 if (duplicate_mappings != NULL)
505 bcopy(first_mapping, duplicate_mappings, copy_length);
509 *copied_entries = entries;
511 return (duplicate_mappings);
515 vdev_lookup_top(spa_t *spa, uint64_t vdev)
520 vlist = &spa->spa_root_vdev->v_children;
521 STAILQ_FOREACH(rvd, vlist, v_childlink)
522 if (rvd->v_id == vdev)
529 * This is a callback for vdev_indirect_remap() which allocates an
530 * indirect_split_t for each split segment and adds it to iv_splits.
533 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
534 uint64_t size, void *arg)
538 indirect_vsd_t *iv = zio->io_vsd;
540 if (vd->v_read == vdev_indirect_read)
543 if (vd->v_read == vdev_mirror_read)
546 indirect_split_t *is =
547 malloc(offsetof(indirect_split_t, is_child[n]));
549 zio->io_error = ENOMEM;
552 bzero(is, offsetof(indirect_split_t, is_child[n]));
556 is->is_split_offset = split_offset;
557 is->is_target_offset = offset;
561 * Note that we only consider multiple copies of the data for
562 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
563 * though they use the same ops as mirror, because there's only one
564 * "good" copy under the replacing/spare.
566 if (vd->v_read == vdev_mirror_read) {
570 STAILQ_FOREACH(kid, &vd->v_children, v_childlink) {
571 is->is_child[i++].ic_vdev = kid;
574 is->is_child[0].ic_vdev = vd;
577 list_insert_tail(&iv->iv_splits, is);
581 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, void *arg)
584 spa_t *spa = vd->v_spa;
588 list_create(&stack, sizeof (remap_segment_t),
589 offsetof(remap_segment_t, rs_node));
591 rs = rs_alloc(vd, offset, asize, 0);
593 printf("vdev_indirect_remap: out of memory.\n");
594 zio->io_error = ENOMEM;
596 for (; rs != NULL; rs = list_remove_head(&stack)) {
597 vdev_t *v = rs->rs_vd;
598 uint64_t num_entries = 0;
599 /* vdev_indirect_mapping_t *vim = v->v_mapping; */
600 vdev_indirect_mapping_entry_phys_t *mapping =
601 vdev_indirect_mapping_duplicate_adjacent_entries(v,
602 rs->rs_offset, rs->rs_asize, &num_entries);
604 if (num_entries == 0)
605 zio->io_error = ENOMEM;
607 for (uint64_t i = 0; i < num_entries; i++) {
608 vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
609 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
610 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
611 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
612 uint64_t inner_offset = rs->rs_offset -
613 DVA_MAPPING_GET_SRC_OFFSET(m);
614 uint64_t inner_size =
615 MIN(rs->rs_asize, size - inner_offset);
616 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
618 if (dst_v->v_read == vdev_indirect_read) {
621 o = rs_alloc(dst_v, dst_offset + inner_offset,
622 inner_size, rs->rs_split_offset);
624 printf("vdev_indirect_remap: "
626 zio->io_error = ENOMEM;
630 list_insert_head(&stack, o);
632 vdev_indirect_gather_splits(rs->rs_split_offset, dst_v,
633 dst_offset + inner_offset,
637 * vdev_indirect_gather_splits can have memory
638 * allocation error, we can not recover from it.
640 if (zio->io_error != 0)
642 rs->rs_offset += inner_size;
643 rs->rs_asize -= inner_size;
644 rs->rs_split_offset += inner_size;
649 if (zio->io_error != 0)
653 list_destroy(&stack);
657 vdev_indirect_map_free(zio_t *zio)
659 indirect_vsd_t *iv = zio->io_vsd;
660 indirect_split_t *is;
662 while ((is = list_head(&iv->iv_splits)) != NULL) {
663 for (int c = 0; c < is->is_children; c++) {
664 indirect_child_t *ic = &is->is_child[c];
667 list_remove(&iv->iv_splits, is);
674 vdev_indirect_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
675 off_t offset, size_t bytes)
678 spa_t *spa = vdev->v_spa;
680 indirect_split_t *first;
683 iv = calloc(1, sizeof(*iv));
687 list_create(&iv->iv_splits,
688 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
690 bzero(&zio, sizeof(zio));
692 zio.io_bp = (blkptr_t *)bp;
695 zio.io_offset = offset;
699 if (vdev->v_mapping == NULL) {
700 vdev_indirect_config_t *vic;
702 vic = &vdev->vdev_indirect_config;
703 vdev->v_mapping = vdev_indirect_mapping_open(spa,
704 spa->spa_mos, vic->vic_mapping_object);
707 vdev_indirect_remap(vdev, offset, bytes, &zio);
708 if (zio.io_error != 0)
709 return (zio.io_error);
711 first = list_head(&iv->iv_splits);
712 if (first->is_size == zio.io_size) {
714 * This is not a split block; we are pointing to the entire
715 * data, which will checksum the same as the original data.
716 * Pass the BP down so that the child i/o can verify the
717 * checksum, and try a different location if available
718 * (e.g. on a mirror).
720 * While this special case could be handled the same as the
721 * general (split block) case, doing it this way ensures
722 * that the vast majority of blocks on indirect vdevs
723 * (which are not split) are handled identically to blocks
724 * on non-indirect vdevs. This allows us to be less strict
725 * about performance in the general (but rare) case.
727 rc = first->is_vdev->v_read(first->is_vdev, zio.io_bp,
728 zio.io_data, first->is_target_offset, bytes);
730 iv->iv_split_block = B_TRUE;
732 * Read one copy of each split segment, from the
733 * top-level vdev. Since we don't know the
734 * checksum of each split individually, the child
735 * zio can't ensure that we get the right data.
736 * E.g. if it's a mirror, it will just read from a
737 * random (healthy) leaf vdev. We have to verify
738 * the checksum in vdev_indirect_io_done().
740 for (indirect_split_t *is = list_head(&iv->iv_splits);
741 is != NULL; is = list_next(&iv->iv_splits, is)) {
742 char *ptr = zio.io_data;
744 rc = is->is_vdev->v_read(is->is_vdev, zio.io_bp,
745 ptr + is->is_split_offset, is->is_target_offset,
748 if (zio_checksum_verify(spa, zio.io_bp, zio.io_data))
754 vdev_indirect_map_free(&zio);
762 vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
763 off_t offset, size_t bytes)
766 return (vdev_read_phys(vdev, bp, buf,
767 offset + VDEV_LABEL_START_SIZE, bytes));
771 vdev_missing_read(vdev_t *vdev __unused, const blkptr_t *bp __unused,
772 void *buf __unused, off_t offset __unused, size_t bytes __unused)
779 vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
780 off_t offset, size_t bytes)
786 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
787 if (kid->v_state != VDEV_STATE_HEALTHY)
789 rc = kid->v_read(kid, bp, buf, offset, bytes);
798 vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
799 off_t offset, size_t bytes)
804 * Here we should have two kids:
805 * First one which is the one we are replacing and we can trust
806 * only this one to have valid data, but it might not be present.
807 * Second one is that one we are replacing with. It is most likely
808 * healthy, but we can't trust it has needed data, so we won't use it.
810 kid = STAILQ_FIRST(&vdev->v_children);
813 if (kid->v_state != VDEV_STATE_HEALTHY)
815 return (kid->v_read(kid, bp, buf, offset, bytes));
819 vdev_find(uint64_t guid)
823 STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink)
824 if (vdev->v_guid == guid)
831 vdev_create(uint64_t guid, vdev_read_t *_read)
834 vdev_indirect_config_t *vic;
836 vdev = calloc(1, sizeof(vdev_t));
838 STAILQ_INIT(&vdev->v_children);
840 vdev->v_read = _read;
843 * root vdev has no read function, we use this fact to
844 * skip setting up data we do not need for root vdev.
845 * We only point root vdev from spa.
848 vic = &vdev->vdev_indirect_config;
849 vic->vic_prev_indirect_vdev = UINT64_MAX;
850 STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink);
858 vdev_set_initial_state(vdev_t *vdev, const nvlist_t *nvlist)
860 uint64_t is_offline, is_faulted, is_degraded, is_removed, isnt_present;
863 is_offline = is_removed = is_faulted = is_degraded = isnt_present = 0;
865 (void) nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, NULL,
867 (void) nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, NULL,
869 (void) nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, NULL,
871 (void) nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64,
872 NULL, &is_degraded, NULL);
873 (void) nvlist_find(nvlist, ZPOOL_CONFIG_NOT_PRESENT, DATA_TYPE_UINT64,
874 NULL, &isnt_present, NULL);
875 (void) nvlist_find(nvlist, ZPOOL_CONFIG_IS_LOG, DATA_TYPE_UINT64, NULL,
879 vdev->v_state = VDEV_STATE_OFFLINE;
880 else if (is_removed != 0)
881 vdev->v_state = VDEV_STATE_REMOVED;
882 else if (is_faulted != 0)
883 vdev->v_state = VDEV_STATE_FAULTED;
884 else if (is_degraded != 0)
885 vdev->v_state = VDEV_STATE_DEGRADED;
886 else if (isnt_present != 0)
887 vdev->v_state = VDEV_STATE_CANT_OPEN;
889 vdev->v_islog = is_log != 0;
893 vdev_init(uint64_t guid, const nvlist_t *nvlist, vdev_t **vdevp)
895 uint64_t id, ashift, asize, nparity;
902 if (nvlist_find(nvlist, ZPOOL_CONFIG_ID, DATA_TYPE_UINT64, NULL, &id,
904 nvlist_find(nvlist, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, NULL,
909 if (memcmp(type, VDEV_TYPE_MIRROR, len) != 0 &&
910 memcmp(type, VDEV_TYPE_DISK, len) != 0 &&
912 memcmp(type, VDEV_TYPE_FILE, len) != 0 &&
914 memcmp(type, VDEV_TYPE_RAIDZ, len) != 0 &&
915 memcmp(type, VDEV_TYPE_INDIRECT, len) != 0 &&
916 memcmp(type, VDEV_TYPE_REPLACING, len) != 0 &&
917 memcmp(type, VDEV_TYPE_HOLE, len) != 0) {
918 printf("ZFS: can only boot from disk, mirror, raidz1, "
919 "raidz2 and raidz3 vdevs, got: %.*s\n", len, type);
923 if (memcmp(type, VDEV_TYPE_MIRROR, len) == 0)
924 vdev = vdev_create(guid, vdev_mirror_read);
925 else if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0)
926 vdev = vdev_create(guid, vdev_raidz_read);
927 else if (memcmp(type, VDEV_TYPE_REPLACING, len) == 0)
928 vdev = vdev_create(guid, vdev_replacing_read);
929 else if (memcmp(type, VDEV_TYPE_INDIRECT, len) == 0) {
930 vdev_indirect_config_t *vic;
932 vdev = vdev_create(guid, vdev_indirect_read);
934 vdev->v_state = VDEV_STATE_HEALTHY;
935 vic = &vdev->vdev_indirect_config;
938 ZPOOL_CONFIG_INDIRECT_OBJECT,
940 NULL, &vic->vic_mapping_object, NULL);
942 ZPOOL_CONFIG_INDIRECT_BIRTHS,
944 NULL, &vic->vic_births_object, NULL);
946 ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
948 NULL, &vic->vic_prev_indirect_vdev, NULL);
950 } else if (memcmp(type, VDEV_TYPE_HOLE, len) == 0) {
951 vdev = vdev_create(guid, vdev_missing_read);
953 vdev = vdev_create(guid, vdev_disk_read);
959 vdev_set_initial_state(vdev, nvlist);
961 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT,
962 DATA_TYPE_UINT64, NULL, &ashift, NULL) == 0)
963 vdev->v_ashift = ashift;
965 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE,
966 DATA_TYPE_UINT64, NULL, &asize, NULL) == 0) {
967 vdev->v_psize = asize +
968 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
971 if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY,
972 DATA_TYPE_UINT64, NULL, &nparity, NULL) == 0)
973 vdev->v_nparity = nparity;
975 if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH,
976 DATA_TYPE_STRING, NULL, &path, &pathlen) == 0) {
977 char prefix[] = "/dev/";
979 len = strlen(prefix);
980 if (len < pathlen && memcmp(path, prefix, len) == 0) {
984 name = malloc(pathlen + 1);
985 bcopy(path, name, pathlen);
986 name[pathlen] = '\0';
990 if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) {
991 if (vdev->v_nparity < 1 ||
992 vdev->v_nparity > 3) {
993 printf("ZFS: invalid raidz parity: %d\n",
997 (void) asprintf(&name, "%.*s%d-%" PRIu64, len, type,
998 vdev->v_nparity, id);
1000 (void) asprintf(&name, "%.*s-%" PRIu64, len, type, id);
1002 vdev->v_name = name;
1009 * Find slot for vdev. We return either NULL to signal to use
1010 * STAILQ_INSERT_HEAD, or we return link element to be used with
1011 * STAILQ_INSERT_AFTER.
1014 vdev_find_previous(vdev_t *top_vdev, vdev_t *vdev)
1016 vdev_t *v, *previous;
1018 if (STAILQ_EMPTY(&top_vdev->v_children))
1022 STAILQ_FOREACH(v, &top_vdev->v_children, v_childlink) {
1023 if (v->v_id > vdev->v_id)
1026 if (v->v_id == vdev->v_id)
1029 if (v->v_id < vdev->v_id)
1036 vdev_child_count(vdev_t *vdev)
1042 STAILQ_FOREACH(v, &vdev->v_children, v_childlink) {
1049 * Insert vdev into top_vdev children list. List is ordered by v_id.
1052 vdev_insert(vdev_t *top_vdev, vdev_t *vdev)
1058 * The top level vdev can appear in random order, depending how
1059 * the firmware is presenting the disk devices.
1060 * However, we will insert vdev to create list ordered by v_id,
1061 * so we can use either STAILQ_INSERT_HEAD or STAILQ_INSERT_AFTER
1062 * as STAILQ does not have insert before.
1064 previous = vdev_find_previous(top_vdev, vdev);
1066 if (previous == NULL) {
1067 STAILQ_INSERT_HEAD(&top_vdev->v_children, vdev, v_childlink);
1068 } else if (previous->v_id == vdev->v_id) {
1070 * This vdev was configured from label config,
1071 * do not insert duplicate.
1075 STAILQ_INSERT_AFTER(&top_vdev->v_children, previous, vdev,
1079 count = vdev_child_count(top_vdev);
1080 if (top_vdev->v_nchildren < count)
1081 top_vdev->v_nchildren = count;
1085 vdev_from_nvlist(spa_t *spa, uint64_t top_guid, const nvlist_t *nvlist)
1087 vdev_t *top_vdev, *vdev;
1088 nvlist_t *kids = NULL;
1092 top_vdev = vdev_find(top_guid);
1093 if (top_vdev == NULL) {
1094 rc = vdev_init(top_guid, nvlist, &top_vdev);
1097 top_vdev->v_spa = spa;
1098 top_vdev->v_top = top_vdev;
1099 vdev_insert(spa->spa_root_vdev, top_vdev);
1102 /* Add children if there are any. */
1103 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
1104 &nkids, &kids, NULL);
1106 for (int i = 0; i < nkids; i++) {
1109 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID,
1110 DATA_TYPE_UINT64, NULL, &guid, NULL);
1112 nvlist_destroy(kids);
1115 rc = vdev_init(guid, kids, &vdev);
1117 nvlist_destroy(kids);
1122 vdev->v_top = top_vdev;
1123 vdev_insert(top_vdev, vdev);
1125 rc = nvlist_next(kids);
1127 nvlist_destroy(kids);
1133 * When there are no children, nvlist_find() does return
1134 * error, reset it because leaf devices have no children.
1138 nvlist_destroy(kids);
1144 vdev_init_from_label(spa_t *spa, const nvlist_t *nvlist)
1146 uint64_t pool_guid, top_guid;
1150 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
1151 NULL, &pool_guid, NULL) ||
1152 nvlist_find(nvlist, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64,
1153 NULL, &top_guid, NULL) ||
1154 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST,
1155 NULL, &vdevs, NULL)) {
1156 printf("ZFS: can't find vdev details\n");
1160 rc = vdev_from_nvlist(spa, top_guid, vdevs);
1161 nvlist_destroy(vdevs);
1166 vdev_set_state(vdev_t *vdev)
1172 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1173 vdev_set_state(kid);
1177 * A mirror or raidz is healthy if all its kids are healthy. A
1178 * mirror is degraded if any of its kids is healthy; a raidz
1179 * is degraded if at most nparity kids are offline.
1181 if (STAILQ_FIRST(&vdev->v_children)) {
1184 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1185 if (kid->v_state == VDEV_STATE_HEALTHY)
1190 if (bad_kids == 0) {
1191 vdev->v_state = VDEV_STATE_HEALTHY;
1193 if (vdev->v_read == vdev_mirror_read) {
1195 vdev->v_state = VDEV_STATE_DEGRADED;
1197 vdev->v_state = VDEV_STATE_OFFLINE;
1199 } else if (vdev->v_read == vdev_raidz_read) {
1200 if (bad_kids > vdev->v_nparity) {
1201 vdev->v_state = VDEV_STATE_OFFLINE;
1203 vdev->v_state = VDEV_STATE_DEGRADED;
1211 vdev_update_from_nvlist(uint64_t top_guid, const nvlist_t *nvlist)
1214 nvlist_t *kids = NULL;
1217 /* Update top vdev. */
1218 vdev = vdev_find(top_guid);
1220 vdev_set_initial_state(vdev, nvlist);
1222 /* Update children if there are any. */
1223 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
1224 &nkids, &kids, NULL);
1226 for (int i = 0; i < nkids; i++) {
1229 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID,
1230 DATA_TYPE_UINT64, NULL, &guid, NULL);
1234 vdev = vdev_find(guid);
1236 vdev_set_initial_state(vdev, kids);
1238 rc = nvlist_next(kids);
1245 nvlist_destroy(kids);
1251 vdev_init_from_nvlist(spa_t *spa, const nvlist_t *nvlist)
1253 uint64_t pool_guid, vdev_children;
1254 nvlist_t *vdevs = NULL, *kids = NULL;
1257 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
1258 NULL, &pool_guid, NULL) ||
1259 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64,
1260 NULL, &vdev_children, NULL) ||
1261 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST,
1262 NULL, &vdevs, NULL)) {
1263 printf("ZFS: can't find vdev details\n");
1268 if (spa->spa_guid != pool_guid) {
1269 nvlist_destroy(vdevs);
1273 spa->spa_root_vdev->v_nchildren = vdev_children;
1275 rc = nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
1276 &nkids, &kids, NULL);
1277 nvlist_destroy(vdevs);
1280 * MOS config has at least one child for root vdev.
1285 for (int i = 0; i < nkids; i++) {
1289 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64,
1293 vdev = vdev_find(guid);
1295 * Top level vdev is missing, create it.
1298 rc = vdev_from_nvlist(spa, guid, kids);
1300 rc = vdev_update_from_nvlist(guid, kids);
1303 rc = nvlist_next(kids);
1307 nvlist_destroy(kids);
1310 * Re-evaluate top-level vdev state.
1312 vdev_set_state(spa->spa_root_vdev);
1318 spa_find_by_guid(uint64_t guid)
1322 STAILQ_FOREACH(spa, &zfs_pools, spa_link)
1323 if (spa->spa_guid == guid)
1330 spa_find_by_name(const char *name)
1334 STAILQ_FOREACH(spa, &zfs_pools, spa_link)
1335 if (strcmp(spa->spa_name, name) == 0)
1342 spa_create(uint64_t guid, const char *name)
1346 if ((spa = calloc(1, sizeof(spa_t))) == NULL)
1348 if ((spa->spa_name = strdup(name)) == NULL) {
1352 spa->spa_uberblock = &spa->spa_uberblock_master;
1353 spa->spa_mos = &spa->spa_mos_master;
1354 spa->spa_guid = guid;
1355 spa->spa_root_vdev = vdev_create(guid, NULL);
1356 if (spa->spa_root_vdev == NULL) {
1357 free(spa->spa_name);
1361 spa->spa_root_vdev->v_name = strdup("root");
1362 STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link);
1368 state_name(vdev_state_t state)
1370 static const char *names[] = {
1380 return (names[state]);
1385 #define pager_printf printf
1390 pager_printf(const char *fmt, ...)
1395 va_start(args, fmt);
1396 vsnprintf(line, sizeof(line), fmt, args);
1398 return (pager_output(line));
1403 #define STATUS_FORMAT " %s %s\n"
1406 print_state(int indent, const char *name, vdev_state_t state)
1412 for (i = 0; i < indent; i++)
1415 return (pager_printf(STATUS_FORMAT, buf, state_name(state)));
1419 vdev_status(vdev_t *vdev, int indent)
1424 if (vdev->v_islog) {
1425 (void) pager_output(" logs\n");
1429 ret = print_state(indent, vdev->v_name, vdev->v_state);
1433 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1434 ret = vdev_status(kid, indent + 1);
1442 spa_status(spa_t *spa)
1444 static char bootfs[ZFS_MAXNAMELEN];
1448 int good_kids, bad_kids, degraded_kids, ret;
1451 ret = pager_printf(" pool: %s\n", spa->spa_name);
1455 if (zfs_get_root(spa, &rootid) == 0 &&
1456 zfs_rlookup(spa, rootid, bootfs) == 0) {
1457 if (bootfs[0] == '\0')
1458 ret = pager_printf("bootfs: %s\n", spa->spa_name);
1460 ret = pager_printf("bootfs: %s/%s\n", spa->spa_name,
1465 ret = pager_printf("config:\n\n");
1468 ret = pager_printf(STATUS_FORMAT, "NAME", "STATE");
1475 vlist = &spa->spa_root_vdev->v_children;
1476 STAILQ_FOREACH(vdev, vlist, v_childlink) {
1477 if (vdev->v_state == VDEV_STATE_HEALTHY)
1479 else if (vdev->v_state == VDEV_STATE_DEGRADED)
1485 state = VDEV_STATE_CLOSED;
1486 if (good_kids > 0 && (degraded_kids + bad_kids) == 0)
1487 state = VDEV_STATE_HEALTHY;
1488 else if ((good_kids + degraded_kids) > 0)
1489 state = VDEV_STATE_DEGRADED;
1491 ret = print_state(0, spa->spa_name, state);
1495 STAILQ_FOREACH(vdev, vlist, v_childlink) {
1496 ret = vdev_status(vdev, 1);
1504 spa_all_status(void)
1507 int first = 1, ret = 0;
1509 STAILQ_FOREACH(spa, &zfs_pools, spa_link) {
1511 ret = pager_printf("\n");
1516 ret = spa_status(spa);
1524 vdev_label_offset(uint64_t psize, int l, uint64_t offset)
1526 uint64_t label_offset;
1528 if (l < VDEV_LABELS / 2)
1531 label_offset = psize - VDEV_LABELS * sizeof (vdev_label_t);
1533 return (offset + l * sizeof (vdev_label_t) + label_offset);
1537 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
1539 unsigned int seq1 = 0;
1540 unsigned int seq2 = 0;
1541 int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg);
1546 cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
1550 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
1551 seq1 = MMP_SEQ(ub1);
1553 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
1554 seq2 = MMP_SEQ(ub2);
1556 return (AVL_CMP(seq1, seq2));
1560 uberblock_verify(uberblock_t *ub)
1562 if (ub->ub_magic == BSWAP_64((uint64_t)UBERBLOCK_MAGIC)) {
1563 byteswap_uint64_array(ub, sizeof (uberblock_t));
1566 if (ub->ub_magic != UBERBLOCK_MAGIC ||
1567 !SPA_VERSION_IS_SUPPORTED(ub->ub_version))
1574 vdev_label_read(vdev_t *vd, int l, void *buf, uint64_t offset,
1580 off = vdev_label_offset(vd->v_psize, l, offset);
1583 BP_SET_LSIZE(&bp, size);
1584 BP_SET_PSIZE(&bp, size);
1585 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
1586 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
1587 DVA_SET_OFFSET(BP_IDENTITY(&bp), off);
1588 ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0);
1590 return (vdev_read_phys(vd, &bp, buf, off, size));
1594 vdev_get_label_asize(nvlist_t *nvl)
1603 if (nvlist_find(nvl, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST,
1604 NULL, &vdevs, NULL) != 0)
1608 * Get vdev type. We will calculate asize for raidz, mirror and disk.
1609 * For raidz, the asize is raw size of all children.
1611 if (nvlist_find(vdevs, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING,
1612 NULL, &type, &len) != 0)
1615 if (memcmp(type, VDEV_TYPE_MIRROR, len) != 0 &&
1616 memcmp(type, VDEV_TYPE_DISK, len) != 0 &&
1617 memcmp(type, VDEV_TYPE_RAIDZ, len) != 0)
1620 if (nvlist_find(vdevs, ZPOOL_CONFIG_ASIZE, DATA_TYPE_UINT64,
1621 NULL, &asize, NULL) != 0)
1624 if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) {
1628 if (nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN,
1629 DATA_TYPE_NVLIST_ARRAY, &nkids, &kids, NULL) != 0) {
1635 nvlist_destroy(kids);
1638 asize += VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
1640 nvlist_destroy(vdevs);
1645 vdev_label_read_config(vdev_t *vd, uint64_t txg)
1648 uint64_t best_txg = 0;
1649 uint64_t label_txg = 0;
1651 nvlist_t *nvl = NULL, *tmp;
1654 label = malloc(sizeof (vdev_phys_t));
1658 for (int l = 0; l < VDEV_LABELS; l++) {
1659 const unsigned char *nvlist;
1661 if (vdev_label_read(vd, l, label,
1662 offsetof(vdev_label_t, vl_vdev_phys),
1663 sizeof (vdev_phys_t)))
1666 nvlist = (const unsigned char *) label->vp_nvlist;
1667 tmp = nvlist_import(nvlist + 4, nvlist[0], nvlist[1]);
1671 error = nvlist_find(tmp, ZPOOL_CONFIG_POOL_TXG,
1672 DATA_TYPE_UINT64, NULL, &label_txg, NULL);
1673 if (error != 0 || label_txg == 0) {
1674 nvlist_destroy(nvl);
1679 if (label_txg <= txg && label_txg > best_txg) {
1680 best_txg = label_txg;
1681 nvlist_destroy(nvl);
1686 * Use asize from pool config. We need this
1687 * because we can get bad value from BIOS.
1689 asize = vdev_get_label_asize(nvl);
1691 vd->v_psize = asize;
1694 nvlist_destroy(tmp);
1697 if (best_txg == 0) {
1698 nvlist_destroy(nvl);
1707 vdev_uberblock_load(vdev_t *vd, uberblock_t *ub)
1711 buf = malloc(VDEV_UBERBLOCK_SIZE(vd));
1715 for (int l = 0; l < VDEV_LABELS; l++) {
1716 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
1717 if (vdev_label_read(vd, l, buf,
1718 VDEV_UBERBLOCK_OFFSET(vd, n),
1719 VDEV_UBERBLOCK_SIZE(vd)))
1721 if (uberblock_verify(buf) != 0)
1724 if (vdev_uberblock_compare(buf, ub) > 0)
1732 vdev_probe(vdev_phys_read_t *_read, void *read_priv, spa_t **spap)
1739 uint64_t guid, vdev_children;
1740 uint64_t pool_txg, pool_guid;
1741 const char *pool_name;
1745 * Load the vdev label and figure out which
1746 * uberblock is most current.
1748 memset(&vtmp, 0, sizeof(vtmp));
1749 vtmp.v_phys_read = _read;
1750 vtmp.v_read_priv = read_priv;
1751 vtmp.v_psize = P2ALIGN(ldi_get_size(read_priv),
1752 (uint64_t)sizeof (vdev_label_t));
1754 /* Test for minimum device size. */
1755 if (vtmp.v_psize < SPA_MINDEVSIZE)
1758 nvl = vdev_label_read_config(&vtmp, UINT64_MAX);
1762 if (nvlist_find(nvl, ZPOOL_CONFIG_VERSION, DATA_TYPE_UINT64,
1763 NULL, &val, NULL) != 0) {
1764 nvlist_destroy(nvl);
1768 if (!SPA_VERSION_IS_SUPPORTED(val)) {
1769 printf("ZFS: unsupported ZFS version %u (should be %u)\n",
1770 (unsigned)val, (unsigned)SPA_VERSION);
1771 nvlist_destroy(nvl);
1775 /* Check ZFS features for read */
1776 rc = nvlist_check_features_for_read(nvl);
1778 nvlist_destroy(nvl);
1782 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_STATE, DATA_TYPE_UINT64,
1783 NULL, &val, NULL) != 0) {
1784 nvlist_destroy(nvl);
1788 if (val == POOL_STATE_DESTROYED) {
1789 /* We don't boot only from destroyed pools. */
1790 nvlist_destroy(nvl);
1794 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64,
1795 NULL, &pool_txg, NULL) != 0 ||
1796 nvlist_find(nvl, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
1797 NULL, &pool_guid, NULL) != 0 ||
1798 nvlist_find(nvl, ZPOOL_CONFIG_POOL_NAME, DATA_TYPE_STRING,
1799 NULL, &pool_name, &namelen) != 0) {
1801 * Cache and spare devices end up here - just ignore
1804 nvlist_destroy(nvl);
1809 * Create the pool if this is the first time we've seen it.
1811 spa = spa_find_by_guid(pool_guid);
1815 nvlist_find(nvl, ZPOOL_CONFIG_VDEV_CHILDREN,
1816 DATA_TYPE_UINT64, NULL, &vdev_children, NULL);
1817 name = malloc(namelen + 1);
1819 nvlist_destroy(nvl);
1822 bcopy(pool_name, name, namelen);
1823 name[namelen] = '\0';
1824 spa = spa_create(pool_guid, name);
1827 nvlist_destroy(nvl);
1830 spa->spa_root_vdev->v_nchildren = vdev_children;
1832 if (pool_txg > spa->spa_txg)
1833 spa->spa_txg = pool_txg;
1836 * Get the vdev tree and create our in-core copy of it.
1837 * If we already have a vdev with this guid, this must
1838 * be some kind of alias (overlapping slices, dangerously dedicated
1841 if (nvlist_find(nvl, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64,
1842 NULL, &guid, NULL) != 0) {
1843 nvlist_destroy(nvl);
1846 vdev = vdev_find(guid);
1847 /* Has this vdev already been inited? */
1848 if (vdev && vdev->v_phys_read) {
1849 nvlist_destroy(nvl);
1853 rc = vdev_init_from_label(spa, nvl);
1854 nvlist_destroy(nvl);
1859 * We should already have created an incomplete vdev for this
1860 * vdev. Find it and initialise it with our read proc.
1862 vdev = vdev_find(guid);
1864 vdev->v_phys_read = _read;
1865 vdev->v_read_priv = read_priv;
1866 vdev->v_psize = vtmp.v_psize;
1868 * If no other state is set, mark vdev healthy.
1870 if (vdev->v_state == VDEV_STATE_UNKNOWN)
1871 vdev->v_state = VDEV_STATE_HEALTHY;
1873 printf("ZFS: inconsistent nvlist contents\n");
1878 spa->spa_with_log = vdev->v_islog;
1881 * Re-evaluate top-level vdev state.
1883 vdev_set_state(vdev->v_top);
1886 * Ok, we are happy with the pool so far. Lets find
1887 * the best uberblock and then we can actually access
1888 * the contents of the pool.
1890 vdev_uberblock_load(vdev, spa->spa_uberblock);
1902 for (v = 0; v < 32; v++)
1909 zio_read_gang(const spa_t *spa, const blkptr_t *bp, void *buf)
1912 zio_gbh_phys_t zio_gb;
1916 /* Artificial BP for gang block header. */
1918 BP_SET_PSIZE(&gbh_bp, SPA_GANGBLOCKSIZE);
1919 BP_SET_LSIZE(&gbh_bp, SPA_GANGBLOCKSIZE);
1920 BP_SET_CHECKSUM(&gbh_bp, ZIO_CHECKSUM_GANG_HEADER);
1921 BP_SET_COMPRESS(&gbh_bp, ZIO_COMPRESS_OFF);
1922 for (i = 0; i < SPA_DVAS_PER_BP; i++)
1923 DVA_SET_GANG(&gbh_bp.blk_dva[i], 0);
1925 /* Read gang header block using the artificial BP. */
1926 if (zio_read(spa, &gbh_bp, &zio_gb))
1930 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
1931 blkptr_t *gbp = &zio_gb.zg_blkptr[i];
1933 if (BP_IS_HOLE(gbp))
1935 if (zio_read(spa, gbp, pbuf))
1937 pbuf += BP_GET_PSIZE(gbp);
1940 if (zio_checksum_verify(spa, bp, buf))
1946 zio_read(const spa_t *spa, const blkptr_t *bp, void *buf)
1948 int cpfunc = BP_GET_COMPRESS(bp);
1949 uint64_t align, size;
1954 * Process data embedded in block pointer
1956 if (BP_IS_EMBEDDED(bp)) {
1957 ASSERT(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
1959 size = BPE_GET_PSIZE(bp);
1960 ASSERT(size <= BPE_PAYLOAD_SIZE);
1962 if (cpfunc != ZIO_COMPRESS_OFF)
1963 pbuf = malloc(size);
1970 decode_embedded_bp_compressed(bp, pbuf);
1973 if (cpfunc != ZIO_COMPRESS_OFF) {
1974 error = zio_decompress_data(cpfunc, pbuf,
1975 size, buf, BP_GET_LSIZE(bp));
1979 printf("ZFS: i/o error - unable to decompress "
1980 "block pointer data, error %d\n", error);
1986 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
1987 const dva_t *dva = &bp->blk_dva[i];
1993 if (!dva->dva_word[0] && !dva->dva_word[1])
1996 vdevid = DVA_GET_VDEV(dva);
1997 offset = DVA_GET_OFFSET(dva);
1998 vlist = &spa->spa_root_vdev->v_children;
1999 STAILQ_FOREACH(vdev, vlist, v_childlink) {
2000 if (vdev->v_id == vdevid)
2003 if (!vdev || !vdev->v_read)
2006 size = BP_GET_PSIZE(bp);
2007 if (vdev->v_read == vdev_raidz_read) {
2008 align = 1ULL << vdev->v_ashift;
2009 if (P2PHASE(size, align) != 0)
2010 size = P2ROUNDUP(size, align);
2012 if (size != BP_GET_PSIZE(bp) || cpfunc != ZIO_COMPRESS_OFF)
2013 pbuf = malloc(size);
2022 if (DVA_GET_GANG(dva))
2023 error = zio_read_gang(spa, bp, pbuf);
2025 error = vdev->v_read(vdev, bp, pbuf, offset, size);
2027 if (cpfunc != ZIO_COMPRESS_OFF)
2028 error = zio_decompress_data(cpfunc, pbuf,
2029 BP_GET_PSIZE(bp), buf, BP_GET_LSIZE(bp));
2030 else if (size != BP_GET_PSIZE(bp))
2031 bcopy(pbuf, buf, BP_GET_PSIZE(bp));
2033 printf("zio_read error: %d\n", error);
2041 printf("ZFS: i/o error - all block copies unavailable\n");
2047 dnode_read(const spa_t *spa, const dnode_phys_t *dnode, off_t offset,
2048 void *buf, size_t buflen)
2050 int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT;
2051 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2052 int nlevels = dnode->dn_nlevels;
2055 if (bsize > SPA_MAXBLOCKSIZE) {
2056 printf("ZFS: I/O error - blocks larger than %llu are not "
2057 "supported\n", SPA_MAXBLOCKSIZE);
2062 * Note: bsize may not be a power of two here so we need to do an
2063 * actual divide rather than a bitshift.
2065 while (buflen > 0) {
2066 uint64_t bn = offset / bsize;
2067 int boff = offset % bsize;
2069 const blkptr_t *indbp;
2072 if (bn > dnode->dn_maxblkid)
2075 if (dnode == dnode_cache_obj && bn == dnode_cache_bn)
2078 indbp = dnode->dn_blkptr;
2079 for (i = 0; i < nlevels; i++) {
2081 * Copy the bp from the indirect array so that
2082 * we can re-use the scratch buffer for multi-level
2085 ibn = bn >> ((nlevels - i - 1) * ibshift);
2086 ibn &= ((1 << ibshift) - 1);
2088 if (BP_IS_HOLE(&bp)) {
2089 memset(dnode_cache_buf, 0, bsize);
2092 rc = zio_read(spa, &bp, dnode_cache_buf);
2095 indbp = (const blkptr_t *) dnode_cache_buf;
2097 dnode_cache_obj = dnode;
2098 dnode_cache_bn = bn;
2102 * The buffer contains our data block. Copy what we
2103 * need from it and loop.
2106 if (i > buflen) i = buflen;
2107 memcpy(buf, &dnode_cache_buf[boff], i);
2108 buf = ((char *)buf) + i;
2117 * Lookup a value in a microzap directory.
2120 mzap_lookup(const mzap_phys_t *mz, size_t size, const char *name,
2123 const mzap_ent_phys_t *mze;
2127 * Microzap objects use exactly one block. Read the whole
2130 chunks = size / MZAP_ENT_LEN - 1;
2131 for (i = 0; i < chunks; i++) {
2132 mze = &mz->mz_chunk[i];
2133 if (strcmp(mze->mze_name, name) == 0) {
2134 *value = mze->mze_value;
2143 * Compare a name with a zap leaf entry. Return non-zero if the name
2147 fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc,
2151 const zap_leaf_chunk_t *nc;
2154 namelen = zc->l_entry.le_name_numints;
2156 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk);
2158 while (namelen > 0) {
2162 if (len > ZAP_LEAF_ARRAY_BYTES)
2163 len = ZAP_LEAF_ARRAY_BYTES;
2164 if (memcmp(p, nc->l_array.la_array, len))
2168 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next);
2175 * Extract a uint64_t value from a zap leaf entry.
2178 fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc)
2180 const zap_leaf_chunk_t *vc;
2185 vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk);
2186 for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) {
2187 value = (value << 8) | p[i];
2194 stv(int len, void *addr, uint64_t value)
2198 *(uint8_t *)addr = value;
2201 *(uint16_t *)addr = value;
2204 *(uint32_t *)addr = value;
2207 *(uint64_t *)addr = value;
2213 * Extract a array from a zap leaf entry.
2216 fzap_leaf_array(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc,
2217 uint64_t integer_size, uint64_t num_integers, void *buf)
2219 uint64_t array_int_len = zc->l_entry.le_value_intlen;
2221 uint64_t *u64 = buf;
2223 int len = MIN(zc->l_entry.le_value_numints, num_integers);
2224 int chunk = zc->l_entry.le_value_chunk;
2227 if (integer_size == 8 && len == 1) {
2228 *u64 = fzap_leaf_value(zl, zc);
2233 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(zl, chunk).l_array;
2236 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(zl));
2237 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) {
2238 value = (value << 8) | la->la_array[i];
2240 if (byten == array_int_len) {
2241 stv(integer_size, p, value);
2249 chunk = la->la_next;
2254 fzap_check_size(uint64_t integer_size, uint64_t num_integers)
2257 switch (integer_size) {
2267 if (integer_size * num_integers > ZAP_MAXVALUELEN)
2274 zap_leaf_free(zap_leaf_t *leaf)
2281 zap_get_leaf_byblk(fat_zap_t *zap, uint64_t blk, zap_leaf_t **lp)
2283 int bs = FZAP_BLOCK_SHIFT(zap);
2286 *lp = malloc(sizeof(**lp));
2291 (*lp)->l_phys = malloc(1 << bs);
2293 if ((*lp)->l_phys == NULL) {
2297 err = dnode_read(zap->zap_spa, zap->zap_dnode, blk << bs, (*lp)->l_phys,
2306 zap_table_load(fat_zap_t *zap, zap_table_phys_t *tbl, uint64_t idx,
2309 int bs = FZAP_BLOCK_SHIFT(zap);
2310 uint64_t blk = idx >> (bs - 3);
2311 uint64_t off = idx & ((1 << (bs - 3)) - 1);
2315 buf = malloc(1 << zap->zap_block_shift);
2318 rc = dnode_read(zap->zap_spa, zap->zap_dnode, (tbl->zt_blk + blk) << bs,
2319 buf, 1 << zap->zap_block_shift);
2327 zap_idx_to_blk(fat_zap_t *zap, uint64_t idx, uint64_t *valp)
2329 if (zap->zap_phys->zap_ptrtbl.zt_numblks == 0) {
2330 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx);
2333 return (zap_table_load(zap, &zap->zap_phys->zap_ptrtbl,
2338 #define ZAP_HASH_IDX(hash, n) (((n) == 0) ? 0 : ((hash) >> (64 - (n))))
2340 zap_deref_leaf(fat_zap_t *zap, uint64_t h, zap_leaf_t **lp)
2345 idx = ZAP_HASH_IDX(h, zap->zap_phys->zap_ptrtbl.zt_shift);
2346 err = zap_idx_to_blk(zap, idx, &blk);
2349 return (zap_get_leaf_byblk(zap, blk, lp));
2352 #define CHAIN_END 0xffff /* end of the chunk chain */
2353 #define LEAF_HASH(l, h) \
2354 ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \
2356 (64 - ZAP_LEAF_HASH_SHIFT(l) - (l)->l_phys->l_hdr.lh_prefix_len)))
2357 #define LEAF_HASH_ENTPTR(l, h) (&(l)->l_phys->l_hash[LEAF_HASH(l, h)])
2360 zap_leaf_lookup(zap_leaf_t *zl, uint64_t hash, const char *name,
2361 uint64_t integer_size, uint64_t num_integers, void *value)
2365 struct zap_leaf_entry *le;
2368 * Make sure this chunk matches our hash.
2370 if (zl->l_phys->l_hdr.lh_prefix_len > 0 &&
2371 zl->l_phys->l_hdr.lh_prefix !=
2372 hash >> (64 - zl->l_phys->l_hdr.lh_prefix_len))
2376 for (chunkp = LEAF_HASH_ENTPTR(zl, hash);
2377 *chunkp != CHAIN_END; chunkp = &le->le_next) {
2378 zap_leaf_chunk_t *zc;
2379 uint16_t chunk = *chunkp;
2381 le = ZAP_LEAF_ENTRY(zl, chunk);
2382 if (le->le_hash != hash)
2384 zc = &ZAP_LEAF_CHUNK(zl, chunk);
2385 if (fzap_name_equal(zl, zc, name)) {
2386 if (zc->l_entry.le_value_intlen > integer_size) {
2389 fzap_leaf_array(zl, zc, integer_size,
2390 num_integers, value);
2400 * Lookup a value in a fatzap directory.
2403 fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh,
2404 const char *name, uint64_t integer_size, uint64_t num_integers,
2407 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2413 if (zh->zap_magic != ZAP_MAGIC)
2416 if ((rc = fzap_check_size(integer_size, num_integers)) != 0) {
2420 z.zap_block_shift = ilog2(bsize);
2423 z.zap_dnode = dnode;
2425 hash = zap_hash(zh->zap_salt, name);
2426 rc = zap_deref_leaf(&z, hash, &zl);
2430 rc = zap_leaf_lookup(zl, hash, name, integer_size, num_integers, value);
2437 * Lookup a name in a zap object and return its value as a uint64_t.
2440 zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name,
2441 uint64_t integer_size, uint64_t num_integers, void *value)
2445 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2451 rc = dnode_read(spa, dnode, 0, zap, size);
2455 switch (zap->zap_block_type) {
2457 rc = mzap_lookup((const mzap_phys_t *)zap, size, name, value);
2460 rc = fzap_lookup(spa, dnode, zap, name, integer_size,
2461 num_integers, value);
2464 printf("ZFS: invalid zap_type=%" PRIx64 "\n",
2465 zap->zap_block_type);
2474 * List a microzap directory.
2477 mzap_list(const mzap_phys_t *mz, size_t size,
2478 int (*callback)(const char *, uint64_t))
2480 const mzap_ent_phys_t *mze;
2484 * Microzap objects use exactly one block. Read the whole
2488 chunks = size / MZAP_ENT_LEN - 1;
2489 for (i = 0; i < chunks; i++) {
2490 mze = &mz->mz_chunk[i];
2491 if (mze->mze_name[0]) {
2492 rc = callback(mze->mze_name, mze->mze_value);
2502 * List a fatzap directory.
2505 fzap_list(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh,
2506 int (*callback)(const char *, uint64_t))
2508 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2513 if (zh->zap_magic != ZAP_MAGIC)
2516 z.zap_block_shift = ilog2(bsize);
2520 * This assumes that the leaf blocks start at block 1. The
2521 * documentation isn't exactly clear on this.
2524 zl.l_bs = z.zap_block_shift;
2525 zl.l_phys = malloc(bsize);
2526 if (zl.l_phys == NULL)
2529 for (i = 0; i < zh->zap_num_leafs; i++) {
2530 off_t off = ((off_t)(i + 1)) << zl.l_bs;
2534 if (dnode_read(spa, dnode, off, zl.l_phys, bsize)) {
2539 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) {
2540 zap_leaf_chunk_t *zc, *nc;
2543 zc = &ZAP_LEAF_CHUNK(&zl, j);
2544 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY)
2546 namelen = zc->l_entry.le_name_numints;
2547 if (namelen > sizeof(name))
2548 namelen = sizeof(name);
2551 * Paste the name back together.
2553 nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk);
2555 while (namelen > 0) {
2558 if (len > ZAP_LEAF_ARRAY_BYTES)
2559 len = ZAP_LEAF_ARRAY_BYTES;
2560 memcpy(p, nc->l_array.la_array, len);
2563 nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next);
2567 * Assume the first eight bytes of the value are
2570 value = fzap_leaf_value(&zl, zc);
2572 /* printf("%s 0x%jx\n", name, (uintmax_t)value); */
2573 rc = callback((const char *)name, value);
2585 static int zfs_printf(const char *name, uint64_t value __unused)
2588 printf("%s\n", name);
2594 * List a zap directory.
2597 zap_list(const spa_t *spa, const dnode_phys_t *dnode)
2600 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2607 rc = dnode_read(spa, dnode, 0, zap, size);
2609 if (zap->zap_block_type == ZBT_MICRO)
2610 rc = mzap_list((const mzap_phys_t *)zap, size,
2613 rc = fzap_list(spa, dnode, zap, zfs_printf);
2620 objset_get_dnode(const spa_t *spa, const objset_phys_t *os, uint64_t objnum,
2621 dnode_phys_t *dnode)
2625 offset = objnum * sizeof(dnode_phys_t);
2626 return dnode_read(spa, &os->os_meta_dnode, offset,
2627 dnode, sizeof(dnode_phys_t));
2631 * Lookup a name in a microzap directory.
2634 mzap_rlookup(const mzap_phys_t *mz, size_t size, char *name, uint64_t value)
2636 const mzap_ent_phys_t *mze;
2640 * Microzap objects use exactly one block. Read the whole
2643 chunks = size / MZAP_ENT_LEN - 1;
2644 for (i = 0; i < chunks; i++) {
2645 mze = &mz->mz_chunk[i];
2646 if (value == mze->mze_value) {
2647 strcpy(name, mze->mze_name);
2656 fzap_name_copy(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, char *name)
2659 const zap_leaf_chunk_t *nc;
2662 namelen = zc->l_entry.le_name_numints;
2664 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk);
2666 while (namelen > 0) {
2669 if (len > ZAP_LEAF_ARRAY_BYTES)
2670 len = ZAP_LEAF_ARRAY_BYTES;
2671 memcpy(p, nc->l_array.la_array, len);
2674 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next);
2681 fzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh,
2682 char *name, uint64_t value)
2684 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2689 if (zh->zap_magic != ZAP_MAGIC)
2692 z.zap_block_shift = ilog2(bsize);
2696 * This assumes that the leaf blocks start at block 1. The
2697 * documentation isn't exactly clear on this.
2700 zl.l_bs = z.zap_block_shift;
2701 zl.l_phys = malloc(bsize);
2702 if (zl.l_phys == NULL)
2705 for (i = 0; i < zh->zap_num_leafs; i++) {
2706 off_t off = ((off_t)(i + 1)) << zl.l_bs;
2708 rc = dnode_read(spa, dnode, off, zl.l_phys, bsize);
2712 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) {
2713 zap_leaf_chunk_t *zc;
2715 zc = &ZAP_LEAF_CHUNK(&zl, j);
2716 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY)
2718 if (zc->l_entry.le_value_intlen != 8 ||
2719 zc->l_entry.le_value_numints != 1)
2722 if (fzap_leaf_value(&zl, zc) == value) {
2723 fzap_name_copy(&zl, zc, name);
2736 zap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name,
2740 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2747 rc = dnode_read(spa, dnode, 0, zap, size);
2749 if (zap->zap_block_type == ZBT_MICRO)
2750 rc = mzap_rlookup((const mzap_phys_t *)zap, size,
2753 rc = fzap_rlookup(spa, dnode, zap, name, value);
2760 zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result)
2763 char component[256];
2764 uint64_t dir_obj, parent_obj, child_dir_zapobj;
2765 dnode_phys_t child_dir_zap, dataset, dir, parent;
2767 dsl_dataset_phys_t *ds;
2771 p = &name[sizeof(name) - 1];
2774 if (objset_get_dnode(spa, spa->spa_mos, objnum, &dataset)) {
2775 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
2778 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
2779 dir_obj = ds->ds_dir_obj;
2782 if (objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir) != 0)
2784 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2786 /* Actual loop condition. */
2787 parent_obj = dd->dd_parent_obj;
2788 if (parent_obj == 0)
2791 if (objset_get_dnode(spa, spa->spa_mos, parent_obj,
2794 dd = (dsl_dir_phys_t *)&parent.dn_bonus;
2795 child_dir_zapobj = dd->dd_child_dir_zapobj;
2796 if (objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj,
2797 &child_dir_zap) != 0)
2799 if (zap_rlookup(spa, &child_dir_zap, component, dir_obj) != 0)
2802 len = strlen(component);
2804 memcpy(p, component, len);
2808 /* Actual loop iteration. */
2809 dir_obj = parent_obj;
2820 zfs_lookup_dataset(const spa_t *spa, const char *name, uint64_t *objnum)
2823 uint64_t dir_obj, child_dir_zapobj;
2824 dnode_phys_t child_dir_zap, dir;
2828 if (objset_get_dnode(spa, spa->spa_mos,
2829 DMU_POOL_DIRECTORY_OBJECT, &dir))
2831 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (dir_obj),
2837 if (objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir))
2839 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2843 /* Actual loop condition #1. */
2849 memcpy(element, p, q - p);
2850 element[q - p] = '\0';
2857 child_dir_zapobj = dd->dd_child_dir_zapobj;
2858 if (objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj,
2859 &child_dir_zap) != 0)
2862 /* Actual loop condition #2. */
2863 if (zap_lookup(spa, &child_dir_zap, element, sizeof (dir_obj),
2868 *objnum = dd->dd_head_dataset_obj;
2874 zfs_list_dataset(const spa_t *spa, uint64_t objnum/*, int pos, char *entry*/)
2876 uint64_t dir_obj, child_dir_zapobj;
2877 dnode_phys_t child_dir_zap, dir, dataset;
2878 dsl_dataset_phys_t *ds;
2881 if (objset_get_dnode(spa, spa->spa_mos, objnum, &dataset)) {
2882 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
2885 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
2886 dir_obj = ds->ds_dir_obj;
2888 if (objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir)) {
2889 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj);
2892 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2894 child_dir_zapobj = dd->dd_child_dir_zapobj;
2895 if (objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj,
2896 &child_dir_zap) != 0) {
2897 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj);
2901 return (zap_list(spa, &child_dir_zap) != 0);
2905 zfs_callback_dataset(const spa_t *spa, uint64_t objnum,
2906 int (*callback)(const char *, uint64_t))
2908 uint64_t dir_obj, child_dir_zapobj;
2909 dnode_phys_t child_dir_zap, dir, dataset;
2910 dsl_dataset_phys_t *ds;
2916 err = objset_get_dnode(spa, spa->spa_mos, objnum, &dataset);
2918 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
2921 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
2922 dir_obj = ds->ds_dir_obj;
2924 err = objset_get_dnode(spa, spa->spa_mos, dir_obj, &dir);
2926 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj);
2929 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2931 child_dir_zapobj = dd->dd_child_dir_zapobj;
2932 err = objset_get_dnode(spa, spa->spa_mos, child_dir_zapobj,
2935 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj);
2939 size = child_dir_zap.dn_datablkszsec << SPA_MINBLOCKSHIFT;
2942 err = dnode_read(spa, &child_dir_zap, 0, zap, size);
2946 if (zap->zap_block_type == ZBT_MICRO)
2947 err = mzap_list((const mzap_phys_t *)zap, size,
2950 err = fzap_list(spa, &child_dir_zap, zap, callback);
2961 * Find the object set given the object number of its dataset object
2962 * and return its details in *objset
2965 zfs_mount_dataset(const spa_t *spa, uint64_t objnum, objset_phys_t *objset)
2967 dnode_phys_t dataset;
2968 dsl_dataset_phys_t *ds;
2970 if (objset_get_dnode(spa, spa->spa_mos, objnum, &dataset)) {
2971 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
2975 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
2976 if (zio_read(spa, &ds->ds_bp, objset)) {
2977 printf("ZFS: can't read object set for dataset %ju\n",
2986 * Find the object set pointed to by the BOOTFS property or the root
2987 * dataset if there is none and return its details in *objset
2990 zfs_get_root(const spa_t *spa, uint64_t *objid)
2992 dnode_phys_t dir, propdir;
2993 uint64_t props, bootfs, root;
2998 * Start with the MOS directory object.
3000 if (objset_get_dnode(spa, spa->spa_mos,
3001 DMU_POOL_DIRECTORY_OBJECT, &dir)) {
3002 printf("ZFS: can't read MOS object directory\n");
3007 * Lookup the pool_props and see if we can find a bootfs.
3009 if (zap_lookup(spa, &dir, DMU_POOL_PROPS,
3010 sizeof(props), 1, &props) == 0 &&
3011 objset_get_dnode(spa, spa->spa_mos, props, &propdir) == 0 &&
3012 zap_lookup(spa, &propdir, "bootfs",
3013 sizeof(bootfs), 1, &bootfs) == 0 && bootfs != 0) {
3018 * Lookup the root dataset directory
3020 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET,
3021 sizeof(root), 1, &root) ||
3022 objset_get_dnode(spa, spa->spa_mos, root, &dir)) {
3023 printf("ZFS: can't find root dsl_dir\n");
3028 * Use the information from the dataset directory's bonus buffer
3029 * to find the dataset object and from that the object set itself.
3031 dsl_dir_phys_t *dd = (dsl_dir_phys_t *)&dir.dn_bonus;
3032 *objid = dd->dd_head_dataset_obj;
3037 zfs_mount(const spa_t *spa, uint64_t rootobj, struct zfsmount *mount)
3043 * Find the root object set if not explicitly provided
3045 if (rootobj == 0 && zfs_get_root(spa, &rootobj)) {
3046 printf("ZFS: can't find root filesystem\n");
3050 if (zfs_mount_dataset(spa, rootobj, &mount->objset)) {
3051 printf("ZFS: can't open root filesystem\n");
3055 mount->rootobj = rootobj;
3061 * callback function for feature name checks.
3064 check_feature(const char *name, uint64_t value)
3070 if (name[0] == '\0')
3073 for (i = 0; features_for_read[i] != NULL; i++) {
3074 if (strcmp(name, features_for_read[i]) == 0)
3077 printf("ZFS: unsupported feature: %s\n", name);
3082 * Checks whether the MOS features that are active are supported.
3085 check_mos_features(const spa_t *spa)
3093 if ((rc = objset_get_dnode(spa, spa->spa_mos, DMU_OT_OBJECT_DIRECTORY,
3096 if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ,
3097 sizeof (objnum), 1, &objnum)) != 0) {
3099 * It is older pool without features. As we have already
3100 * tested the label, just return without raising the error.
3105 if ((rc = objset_get_dnode(spa, spa->spa_mos, objnum, &dir)) != 0)
3108 if (dir.dn_type != DMU_OTN_ZAP_METADATA)
3111 size = dir.dn_datablkszsec << SPA_MINBLOCKSHIFT;
3116 if (dnode_read(spa, &dir, 0, zap, size)) {
3121 if (zap->zap_block_type == ZBT_MICRO)
3122 rc = mzap_list((const mzap_phys_t *)zap, size, check_feature);
3124 rc = fzap_list(spa, &dir, zap, check_feature);
3131 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
3139 if ((rc = objset_get_dnode(spa, spa->spa_mos, obj, &dir)) != 0)
3141 if (dir.dn_type != DMU_OT_PACKED_NVLIST &&
3142 dir.dn_bonustype != DMU_OT_PACKED_NVLIST_SIZE) {
3146 if (dir.dn_bonuslen != sizeof (uint64_t))
3149 size = *(uint64_t *)DN_BONUS(&dir);
3154 rc = dnode_read(spa, &dir, 0, nv, size);
3160 *value = nvlist_import(nv + 4, nv[0], nv[1]);
3166 zfs_spa_init(spa_t *spa)
3168 struct uberblock checkpoint;
3170 uint64_t config_object;
3174 if (zio_read(spa, &spa->spa_uberblock->ub_rootbp, spa->spa_mos)) {
3175 printf("ZFS: can't read MOS of pool %s\n", spa->spa_name);
3178 if (spa->spa_mos->os_type != DMU_OST_META) {
3179 printf("ZFS: corrupted MOS of pool %s\n", spa->spa_name);
3183 if (objset_get_dnode(spa, &spa->spa_mos_master,
3184 DMU_POOL_DIRECTORY_OBJECT, &dir)) {
3185 printf("ZFS: failed to read pool %s directory object\n",
3189 /* this is allowed to fail, older pools do not have salt */
3190 rc = zap_lookup(spa, &dir, DMU_POOL_CHECKSUM_SALT, 1,
3191 sizeof (spa->spa_cksum_salt.zcs_bytes),
3192 spa->spa_cksum_salt.zcs_bytes);
3194 rc = check_mos_features(spa);
3196 printf("ZFS: pool %s is not supported\n", spa->spa_name);
3200 rc = zap_lookup(spa, &dir, DMU_POOL_CONFIG,
3201 sizeof (config_object), 1, &config_object);
3203 printf("ZFS: can not read MOS %s\n", DMU_POOL_CONFIG);
3206 rc = load_nvlist(spa, config_object, &nvlist);
3210 rc = zap_lookup(spa, &dir, DMU_POOL_ZPOOL_CHECKPOINT,
3211 sizeof(uint64_t), sizeof(checkpoint) / sizeof(uint64_t),
3213 if (rc == 0 && checkpoint.ub_checkpoint_txg != 0) {
3214 memcpy(&spa->spa_uberblock_checkpoint, &checkpoint,
3215 sizeof(checkpoint));
3216 if (zio_read(spa, &spa->spa_uberblock_checkpoint.ub_rootbp,
3217 &spa->spa_mos_checkpoint)) {
3218 printf("ZFS: can not read checkpoint data.\n");
3224 * Update vdevs from MOS config. Note, we do skip encoding bytes
3225 * here. See also vdev_label_read_config().
3227 rc = vdev_init_from_nvlist(spa, nvlist);
3228 nvlist_destroy(nvlist);
3233 zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb)
3236 if (dn->dn_bonustype != DMU_OT_SA) {
3237 znode_phys_t *zp = (znode_phys_t *)dn->dn_bonus;
3239 sb->st_mode = zp->zp_mode;
3240 sb->st_uid = zp->zp_uid;
3241 sb->st_gid = zp->zp_gid;
3242 sb->st_size = zp->zp_size;
3244 sa_hdr_phys_t *sahdrp;
3249 if (dn->dn_bonuslen != 0)
3250 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn);
3252 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0) {
3253 blkptr_t *bp = DN_SPILL_BLKPTR(dn);
3256 size = BP_GET_LSIZE(bp);
3261 error = zio_read(spa, bp, buf);
3272 hdrsize = SA_HDR_SIZE(sahdrp);
3273 sb->st_mode = *(uint64_t *)((char *)sahdrp + hdrsize +
3275 sb->st_uid = *(uint64_t *)((char *)sahdrp + hdrsize +
3277 sb->st_gid = *(uint64_t *)((char *)sahdrp + hdrsize +
3279 sb->st_size = *(uint64_t *)((char *)sahdrp + hdrsize +
3288 zfs_dnode_readlink(const spa_t *spa, dnode_phys_t *dn, char *path, size_t psize)
3292 if (dn->dn_bonustype == DMU_OT_SA) {
3293 sa_hdr_phys_t *sahdrp = NULL;
3299 if (dn->dn_bonuslen != 0) {
3300 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn);
3304 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) == 0)
3306 bp = DN_SPILL_BLKPTR(dn);
3308 size = BP_GET_LSIZE(bp);
3313 rc = zio_read(spa, bp, buf);
3320 hdrsize = SA_HDR_SIZE(sahdrp);
3321 p = (char *)((uintptr_t)sahdrp + hdrsize + SA_SYMLINK_OFFSET);
3322 memcpy(path, p, psize);
3327 * Second test is purely to silence bogus compiler
3328 * warning about accessing past the end of dn_bonus.
3330 if (psize + sizeof(znode_phys_t) <= dn->dn_bonuslen &&
3331 sizeof(znode_phys_t) <= sizeof(dn->dn_bonus)) {
3332 memcpy(path, &dn->dn_bonus[sizeof(znode_phys_t)], psize);
3334 rc = dnode_read(spa, dn, 0, path, psize);
3341 STAILQ_ENTRY(obj_list) entry;
3345 * Lookup a file and return its dnode.
3348 zfs_lookup(const struct zfsmount *mount, const char *upath, dnode_phys_t *dnode)
3357 int symlinks_followed = 0;
3359 struct obj_list *entry, *tentry;
3360 STAILQ_HEAD(, obj_list) on_cache = STAILQ_HEAD_INITIALIZER(on_cache);
3363 if (mount->objset.os_type != DMU_OST_ZFS) {
3364 printf("ZFS: unexpected object set type %ju\n",
3365 (uintmax_t)mount->objset.os_type);
3369 if ((entry = malloc(sizeof(struct obj_list))) == NULL)
3373 * Get the root directory dnode.
3375 rc = objset_get_dnode(spa, &mount->objset, MASTER_NODE_OBJ, &dn);
3381 rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, sizeof(objnum), 1, &objnum);
3386 entry->objnum = objnum;
3387 STAILQ_INSERT_HEAD(&on_cache, entry, entry);
3389 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
3395 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
3404 while (*q != '\0' && *q != '/')
3408 if (p + 1 == q && p[0] == '.') {
3413 if (p + 2 == q && p[0] == '.' && p[1] == '.') {
3415 if (STAILQ_FIRST(&on_cache) ==
3416 STAILQ_LAST(&on_cache, obj_list, entry)) {
3420 entry = STAILQ_FIRST(&on_cache);
3421 STAILQ_REMOVE_HEAD(&on_cache, entry);
3423 objnum = (STAILQ_FIRST(&on_cache))->objnum;
3426 if (q - p + 1 > sizeof(element)) {
3430 memcpy(element, p, q - p);
3434 if ((rc = zfs_dnode_stat(spa, &dn, &sb)) != 0)
3436 if (!S_ISDIR(sb.st_mode)) {
3441 rc = zap_lookup(spa, &dn, element, sizeof (objnum), 1, &objnum);
3444 objnum = ZFS_DIRENT_OBJ(objnum);
3446 if ((entry = malloc(sizeof(struct obj_list))) == NULL) {
3450 entry->objnum = objnum;
3451 STAILQ_INSERT_HEAD(&on_cache, entry, entry);
3452 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
3457 * Check for symlink.
3459 rc = zfs_dnode_stat(spa, &dn, &sb);
3462 if (S_ISLNK(sb.st_mode)) {
3463 if (symlinks_followed > 10) {
3467 symlinks_followed++;
3470 * Read the link value and copy the tail of our
3471 * current path onto the end.
3473 if (sb.st_size + strlen(p) + 1 > sizeof(path)) {
3477 strcpy(&path[sb.st_size], p);
3479 rc = zfs_dnode_readlink(spa, &dn, path, sb.st_size);
3484 * Restart with the new path, starting either at
3485 * the root or at the parent depending whether or
3486 * not the link is relative.
3490 while (STAILQ_FIRST(&on_cache) !=
3491 STAILQ_LAST(&on_cache, obj_list, entry)) {
3492 entry = STAILQ_FIRST(&on_cache);
3493 STAILQ_REMOVE_HEAD(&on_cache, entry);
3497 entry = STAILQ_FIRST(&on_cache);
3498 STAILQ_REMOVE_HEAD(&on_cache, entry);
3501 objnum = (STAILQ_FIRST(&on_cache))->objnum;
3507 STAILQ_FOREACH_SAFE(entry, &on_cache, entry, tentry)