2 * Copyright (c) 2007 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 * Stand-alone ZFS file reader.
34 #include <sys/endian.h>
36 #include <sys/stdint.h>
38 #include <machine/_inttypes.h>
49 static struct zfsmount zfsmount __unused;
52 * The indirect_child_t represents the vdev that we will read from, when we
53 * need to read all copies of the data (e.g. for scrub or reconstruction).
54 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
55 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
56 * ic_vdev is a child of the mirror.
58 typedef struct indirect_child {
64 * The indirect_split_t represents one mapped segment of an i/o to the
65 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
66 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
67 * For split blocks, there will be several of these.
69 typedef struct indirect_split {
70 list_node_t is_node; /* link on iv_splits */
73 * is_split_offset is the offset into the i/o.
74 * This is the sum of the previous splits' is_size's.
76 uint64_t is_split_offset;
78 vdev_t *is_vdev; /* top-level vdev */
79 uint64_t is_target_offset; /* offset on is_vdev */
81 int is_children; /* number of entries in is_child[] */
84 * is_good_child is the child that we are currently using to
85 * attempt reconstruction.
89 indirect_child_t is_child[1]; /* variable-length */
93 * The indirect_vsd_t is associated with each i/o to the indirect vdev.
94 * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
96 typedef struct indirect_vsd {
97 boolean_t iv_split_block;
98 boolean_t iv_reconstruct;
100 list_t iv_splits; /* list of indirect_split_t's */
104 * List of all vdevs, chained through v_alllink.
106 static vdev_list_t zfs_vdevs;
109 * List of ZFS features supported for read
111 static const char *features_for_read[] = {
112 "org.illumos:lz4_compress",
113 "com.delphix:hole_birth",
114 "com.delphix:extensible_dataset",
115 "com.delphix:embedded_data",
116 "org.open-zfs:large_blocks",
117 "org.illumos:sha512",
119 "org.zfsonlinux:large_dnode",
120 "com.joyent:multi_vdev_crash_dump",
121 "com.delphix:spacemap_histogram",
122 "com.delphix:zpool_checkpoint",
123 "com.delphix:spacemap_v2",
124 "com.datto:encryption",
125 "org.zfsonlinux:allocation_classes",
126 "com.datto:resilver_defer",
127 "com.delphix:device_removal",
128 "com.delphix:obsolete_counts",
129 "com.intel:allocation_classes",
134 * List of all pools, chained through spa_link.
136 static spa_list_t zfs_pools;
138 static const dnode_phys_t *dnode_cache_obj;
139 static uint64_t dnode_cache_bn;
140 static char *dnode_cache_buf;
142 static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf);
143 static int zfs_get_root(const spa_t *spa, uint64_t *objid);
144 static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result);
145 static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode,
146 const char *name, uint64_t integer_size, uint64_t num_integers,
148 static int objset_get_dnode(const spa_t *, const objset_phys_t *, uint64_t,
150 static int dnode_read(const spa_t *, const dnode_phys_t *, off_t, void *,
152 static int vdev_indirect_read(vdev_t *, const blkptr_t *, void *, off_t,
154 static int vdev_mirror_read(vdev_t *, const blkptr_t *, void *, off_t, size_t);
155 vdev_indirect_mapping_t *vdev_indirect_mapping_open(spa_t *, objset_phys_t *,
157 vdev_indirect_mapping_entry_phys_t *
158 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *, uint64_t,
159 uint64_t, uint64_t *);
164 STAILQ_INIT(&zfs_vdevs);
165 STAILQ_INIT(&zfs_pools);
167 dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE);
173 nvlist_check_features_for_read(nvlist_t *nvl)
175 nvlist_t *features = NULL;
178 nv_string_t *nvp_name;
181 rc = nvlist_find(nvl, ZPOOL_CONFIG_FEATURES_FOR_READ,
182 DATA_TYPE_NVLIST, NULL, &features, NULL);
186 data = (nvs_data_t *)features->nv_data;
187 nvp = &data->nvl_pair; /* first pair in nvlist */
189 while (nvp->encoded_size != 0 && nvp->decoded_size != 0) {
192 nvp_name = (nv_string_t *)((uintptr_t)nvp + sizeof(*nvp));
195 for (i = 0; features_for_read[i] != NULL; i++) {
196 if (memcmp(nvp_name->nv_data, features_for_read[i],
197 nvp_name->nv_size) == 0) {
204 printf("ZFS: unsupported feature: %.*s\n",
205 nvp_name->nv_size, nvp_name->nv_data);
208 nvp = (nvp_header_t *)((uint8_t *)nvp + nvp->encoded_size);
210 nvlist_destroy(features);
216 vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf,
217 off_t offset, size_t size)
222 if (!vdev->v_phys_read)
226 psize = BP_GET_PSIZE(bp);
231 rc = vdev->v_phys_read(vdev, vdev->v_read_priv, offset, buf, psize);
234 rc = zio_checksum_verify(vdev->v_spa, bp, buf);
240 typedef struct remap_segment {
244 uint64_t rs_split_offset;
248 static remap_segment_t *
249 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
251 remap_segment_t *rs = malloc(sizeof (remap_segment_t));
255 rs->rs_offset = offset;
256 rs->rs_asize = asize;
257 rs->rs_split_offset = split_offset;
263 vdev_indirect_mapping_t *
264 vdev_indirect_mapping_open(spa_t *spa, objset_phys_t *os,
265 uint64_t mapping_object)
267 vdev_indirect_mapping_t *vim;
268 vdev_indirect_mapping_phys_t *vim_phys;
271 vim = calloc(1, sizeof (*vim));
275 vim->vim_dn = calloc(1, sizeof (*vim->vim_dn));
276 if (vim->vim_dn == NULL) {
281 rc = objset_get_dnode(spa, os, mapping_object, vim->vim_dn);
289 vim->vim_phys = malloc(sizeof (*vim->vim_phys));
290 if (vim->vim_phys == NULL) {
296 vim_phys = (vdev_indirect_mapping_phys_t *)DN_BONUS(vim->vim_dn);
297 *vim->vim_phys = *vim_phys;
299 vim->vim_objset = os;
300 vim->vim_object = mapping_object;
301 vim->vim_entries = NULL;
303 vim->vim_havecounts =
304 (vim->vim_dn->dn_bonuslen > VDEV_INDIRECT_MAPPING_SIZE_V0);
310 * Compare an offset with an indirect mapping entry; there are three
311 * possible scenarios:
313 * 1. The offset is "less than" the mapping entry; meaning the
314 * offset is less than the source offset of the mapping entry. In
315 * this case, there is no overlap between the offset and the
316 * mapping entry and -1 will be returned.
318 * 2. The offset is "greater than" the mapping entry; meaning the
319 * offset is greater than the mapping entry's source offset plus
320 * the entry's size. In this case, there is no overlap between
321 * the offset and the mapping entry and 1 will be returned.
323 * NOTE: If the offset is actually equal to the entry's offset
324 * plus size, this is considered to be "greater" than the entry,
325 * and this case applies (i.e. 1 will be returned). Thus, the
326 * entry's "range" can be considered to be inclusive at its
327 * start, but exclusive at its end: e.g. [src, src + size).
329 * 3. The last case to consider is if the offset actually falls
330 * within the mapping entry's range. If this is the case, the
331 * offset is considered to be "equal to" the mapping entry and
332 * 0 will be returned.
334 * NOTE: If the offset is equal to the entry's source offset,
335 * this case applies and 0 will be returned. If the offset is
336 * equal to the entry's source plus its size, this case does
337 * *not* apply (see "NOTE" above for scenario 2), and 1 will be
341 dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem)
343 const uint64_t *key = v_key;
344 const vdev_indirect_mapping_entry_phys_t *array_elem =
346 uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem);
348 if (*key < src_offset) {
350 } else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) {
358 * Return array entry.
360 static vdev_indirect_mapping_entry_phys_t *
361 vdev_indirect_mapping_entry(vdev_indirect_mapping_t *vim, uint64_t index)
367 if (vim->vim_phys->vimp_num_entries == 0)
370 if (vim->vim_entries == NULL) {
373 bsize = vim->vim_dn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
374 size = vim->vim_phys->vimp_num_entries *
375 sizeof (*vim->vim_entries);
377 size = bsize / sizeof (*vim->vim_entries);
378 size *= sizeof (*vim->vim_entries);
380 vim->vim_entries = malloc(size);
381 if (vim->vim_entries == NULL)
383 vim->vim_num_entries = size / sizeof (*vim->vim_entries);
384 offset = index * sizeof (*vim->vim_entries);
387 /* We have data in vim_entries */
389 if (index >= vim->vim_entry_offset &&
390 index <= vim->vim_entry_offset + vim->vim_num_entries) {
391 index -= vim->vim_entry_offset;
392 return (&vim->vim_entries[index]);
394 offset = index * sizeof (*vim->vim_entries);
397 vim->vim_entry_offset = index;
398 size = vim->vim_num_entries * sizeof (*vim->vim_entries);
399 rc = dnode_read(vim->vim_spa, vim->vim_dn, offset, vim->vim_entries,
402 /* Read error, invalidate vim_entries. */
403 free(vim->vim_entries);
404 vim->vim_entries = NULL;
407 index -= vim->vim_entry_offset;
408 return (&vim->vim_entries[index]);
412 * Returns the mapping entry for the given offset.
414 * It's possible that the given offset will not be in the mapping table
415 * (i.e. no mapping entries contain this offset), in which case, the
416 * return value value depends on the "next_if_missing" parameter.
418 * If the offset is not found in the table and "next_if_missing" is
419 * B_FALSE, then NULL will always be returned. The behavior is intended
420 * to allow consumers to get the entry corresponding to the offset
421 * parameter, iff the offset overlaps with an entry in the table.
423 * If the offset is not found in the table and "next_if_missing" is
424 * B_TRUE, then the entry nearest to the given offset will be returned,
425 * such that the entry's source offset is greater than the offset
426 * passed in (i.e. the "next" mapping entry in the table is returned, if
427 * the offset is missing from the table). If there are no entries whose
428 * source offset is greater than the passed in offset, NULL is returned.
430 static vdev_indirect_mapping_entry_phys_t *
431 vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim,
434 ASSERT(vim->vim_phys->vimp_num_entries > 0);
436 vdev_indirect_mapping_entry_phys_t *entry;
438 uint64_t last = vim->vim_phys->vimp_num_entries - 1;
442 * We don't define these inside of the while loop because we use
443 * their value in the case that offset isn't in the mapping.
448 while (last >= base) {
449 mid = base + ((last - base) >> 1);
451 entry = vdev_indirect_mapping_entry(vim, mid);
454 result = dva_mapping_overlap_compare(&offset, entry);
458 } else if (result < 0) {
468 * Given an indirect vdev and an extent on that vdev, it duplicates the
469 * physical entries of the indirect mapping that correspond to the extent
470 * to a new array and returns a pointer to it. In addition, copied_entries
471 * is populated with the number of mapping entries that were duplicated.
473 * Finally, since we are doing an allocation, it is up to the caller to
474 * free the array allocated in this function.
476 vdev_indirect_mapping_entry_phys_t *
477 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
478 uint64_t asize, uint64_t *copied_entries)
480 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
481 vdev_indirect_mapping_t *vim = vd->v_mapping;
482 uint64_t entries = 0;
484 vdev_indirect_mapping_entry_phys_t *first_mapping =
485 vdev_indirect_mapping_entry_for_offset(vim, offset);
486 ASSERT3P(first_mapping, !=, NULL);
488 vdev_indirect_mapping_entry_phys_t *m = first_mapping;
490 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
491 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
492 uint64_t inner_size = MIN(asize, size - inner_offset);
494 offset += inner_size;
500 size_t copy_length = entries * sizeof (*first_mapping);
501 duplicate_mappings = malloc(copy_length);
502 if (duplicate_mappings != NULL)
503 bcopy(first_mapping, duplicate_mappings, copy_length);
507 *copied_entries = entries;
509 return (duplicate_mappings);
513 vdev_lookup_top(spa_t *spa, uint64_t vdev)
518 vlist = &spa->spa_root_vdev->v_children;
519 STAILQ_FOREACH(rvd, vlist, v_childlink)
520 if (rvd->v_id == vdev)
527 * This is a callback for vdev_indirect_remap() which allocates an
528 * indirect_split_t for each split segment and adds it to iv_splits.
531 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
532 uint64_t size, void *arg)
536 indirect_vsd_t *iv = zio->io_vsd;
538 if (vd->v_read == vdev_indirect_read)
541 if (vd->v_read == vdev_mirror_read)
544 indirect_split_t *is =
545 malloc(offsetof(indirect_split_t, is_child[n]));
547 zio->io_error = ENOMEM;
550 bzero(is, offsetof(indirect_split_t, is_child[n]));
554 is->is_split_offset = split_offset;
555 is->is_target_offset = offset;
559 * Note that we only consider multiple copies of the data for
560 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
561 * though they use the same ops as mirror, because there's only one
562 * "good" copy under the replacing/spare.
564 if (vd->v_read == vdev_mirror_read) {
568 STAILQ_FOREACH(kid, &vd->v_children, v_childlink) {
569 is->is_child[i++].ic_vdev = kid;
572 is->is_child[0].ic_vdev = vd;
575 list_insert_tail(&iv->iv_splits, is);
579 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, void *arg)
582 spa_t *spa = vd->v_spa;
586 list_create(&stack, sizeof (remap_segment_t),
587 offsetof(remap_segment_t, rs_node));
589 rs = rs_alloc(vd, offset, asize, 0);
591 printf("vdev_indirect_remap: out of memory.\n");
592 zio->io_error = ENOMEM;
594 for (; rs != NULL; rs = list_remove_head(&stack)) {
595 vdev_t *v = rs->rs_vd;
596 uint64_t num_entries = 0;
597 /* vdev_indirect_mapping_t *vim = v->v_mapping; */
598 vdev_indirect_mapping_entry_phys_t *mapping =
599 vdev_indirect_mapping_duplicate_adjacent_entries(v,
600 rs->rs_offset, rs->rs_asize, &num_entries);
602 if (num_entries == 0)
603 zio->io_error = ENOMEM;
605 for (uint64_t i = 0; i < num_entries; i++) {
606 vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
607 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
608 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
609 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
610 uint64_t inner_offset = rs->rs_offset -
611 DVA_MAPPING_GET_SRC_OFFSET(m);
612 uint64_t inner_size =
613 MIN(rs->rs_asize, size - inner_offset);
614 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
616 if (dst_v->v_read == vdev_indirect_read) {
619 o = rs_alloc(dst_v, dst_offset + inner_offset,
620 inner_size, rs->rs_split_offset);
622 printf("vdev_indirect_remap: "
624 zio->io_error = ENOMEM;
628 list_insert_head(&stack, o);
630 vdev_indirect_gather_splits(rs->rs_split_offset, dst_v,
631 dst_offset + inner_offset,
635 * vdev_indirect_gather_splits can have memory
636 * allocation error, we can not recover from it.
638 if (zio->io_error != 0)
640 rs->rs_offset += inner_size;
641 rs->rs_asize -= inner_size;
642 rs->rs_split_offset += inner_size;
647 if (zio->io_error != 0)
651 list_destroy(&stack);
655 vdev_indirect_map_free(zio_t *zio)
657 indirect_vsd_t *iv = zio->io_vsd;
658 indirect_split_t *is;
660 while ((is = list_head(&iv->iv_splits)) != NULL) {
661 for (int c = 0; c < is->is_children; c++) {
662 indirect_child_t *ic = &is->is_child[c];
665 list_remove(&iv->iv_splits, is);
672 vdev_indirect_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
673 off_t offset, size_t bytes)
676 spa_t *spa = vdev->v_spa;
678 indirect_split_t *first;
681 iv = calloc(1, sizeof(*iv));
685 list_create(&iv->iv_splits,
686 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
688 bzero(&zio, sizeof(zio));
690 zio.io_bp = (blkptr_t *)bp;
693 zio.io_offset = offset;
697 if (vdev->v_mapping == NULL) {
698 vdev_indirect_config_t *vic;
700 vic = &vdev->vdev_indirect_config;
701 vdev->v_mapping = vdev_indirect_mapping_open(spa,
702 &spa->spa_mos, vic->vic_mapping_object);
705 vdev_indirect_remap(vdev, offset, bytes, &zio);
706 if (zio.io_error != 0)
707 return (zio.io_error);
709 first = list_head(&iv->iv_splits);
710 if (first->is_size == zio.io_size) {
712 * This is not a split block; we are pointing to the entire
713 * data, which will checksum the same as the original data.
714 * Pass the BP down so that the child i/o can verify the
715 * checksum, and try a different location if available
716 * (e.g. on a mirror).
718 * While this special case could be handled the same as the
719 * general (split block) case, doing it this way ensures
720 * that the vast majority of blocks on indirect vdevs
721 * (which are not split) are handled identically to blocks
722 * on non-indirect vdevs. This allows us to be less strict
723 * about performance in the general (but rare) case.
725 rc = first->is_vdev->v_read(first->is_vdev, zio.io_bp,
726 zio.io_data, first->is_target_offset, bytes);
728 iv->iv_split_block = B_TRUE;
730 * Read one copy of each split segment, from the
731 * top-level vdev. Since we don't know the
732 * checksum of each split individually, the child
733 * zio can't ensure that we get the right data.
734 * E.g. if it's a mirror, it will just read from a
735 * random (healthy) leaf vdev. We have to verify
736 * the checksum in vdev_indirect_io_done().
738 for (indirect_split_t *is = list_head(&iv->iv_splits);
739 is != NULL; is = list_next(&iv->iv_splits, is)) {
740 char *ptr = zio.io_data;
742 rc = is->is_vdev->v_read(is->is_vdev, zio.io_bp,
743 ptr + is->is_split_offset, is->is_target_offset,
746 if (zio_checksum_verify(spa, zio.io_bp, zio.io_data))
752 vdev_indirect_map_free(&zio);
760 vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
761 off_t offset, size_t bytes)
764 return (vdev_read_phys(vdev, bp, buf,
765 offset + VDEV_LABEL_START_SIZE, bytes));
769 vdev_missing_read(vdev_t *vdev __unused, const blkptr_t *bp __unused,
770 void *buf __unused, off_t offset __unused, size_t bytes __unused)
777 vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
778 off_t offset, size_t bytes)
784 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
785 if (kid->v_state != VDEV_STATE_HEALTHY)
787 rc = kid->v_read(kid, bp, buf, offset, bytes);
796 vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
797 off_t offset, size_t bytes)
802 * Here we should have two kids:
803 * First one which is the one we are replacing and we can trust
804 * only this one to have valid data, but it might not be present.
805 * Second one is that one we are replacing with. It is most likely
806 * healthy, but we can't trust it has needed data, so we won't use it.
808 kid = STAILQ_FIRST(&vdev->v_children);
811 if (kid->v_state != VDEV_STATE_HEALTHY)
813 return (kid->v_read(kid, bp, buf, offset, bytes));
817 vdev_find(uint64_t guid)
821 STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink)
822 if (vdev->v_guid == guid)
829 vdev_create(uint64_t guid, vdev_read_t *_read)
832 vdev_indirect_config_t *vic;
834 vdev = calloc(1, sizeof(vdev_t));
836 STAILQ_INIT(&vdev->v_children);
838 vdev->v_read = _read;
841 * root vdev has no read function, we use this fact to
842 * skip setting up data we do not need for root vdev.
843 * We only point root vdev from spa.
846 vic = &vdev->vdev_indirect_config;
847 vic->vic_prev_indirect_vdev = UINT64_MAX;
848 STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink);
856 vdev_set_initial_state(vdev_t *vdev, const nvlist_t *nvlist)
858 uint64_t is_offline, is_faulted, is_degraded, is_removed, isnt_present;
861 is_offline = is_removed = is_faulted = is_degraded = isnt_present = 0;
863 (void) nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, NULL,
865 (void) nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, NULL,
867 (void) nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, NULL,
869 (void) nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64,
870 NULL, &is_degraded, NULL);
871 (void) nvlist_find(nvlist, ZPOOL_CONFIG_NOT_PRESENT, DATA_TYPE_UINT64,
872 NULL, &isnt_present, NULL);
873 (void) nvlist_find(nvlist, ZPOOL_CONFIG_IS_LOG, DATA_TYPE_UINT64, NULL,
877 vdev->v_state = VDEV_STATE_OFFLINE;
878 else if (is_removed != 0)
879 vdev->v_state = VDEV_STATE_REMOVED;
880 else if (is_faulted != 0)
881 vdev->v_state = VDEV_STATE_FAULTED;
882 else if (is_degraded != 0)
883 vdev->v_state = VDEV_STATE_DEGRADED;
884 else if (isnt_present != 0)
885 vdev->v_state = VDEV_STATE_CANT_OPEN;
887 vdev->v_islog = is_log != 0;
891 vdev_init(uint64_t guid, const nvlist_t *nvlist, vdev_t **vdevp)
893 uint64_t id, ashift, asize, nparity;
900 if (nvlist_find(nvlist, ZPOOL_CONFIG_ID, DATA_TYPE_UINT64, NULL, &id,
902 nvlist_find(nvlist, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING, NULL,
907 if (memcmp(type, VDEV_TYPE_MIRROR, len) != 0 &&
908 memcmp(type, VDEV_TYPE_DISK, len) != 0 &&
910 memcmp(type, VDEV_TYPE_FILE, len) != 0 &&
912 memcmp(type, VDEV_TYPE_RAIDZ, len) != 0 &&
913 memcmp(type, VDEV_TYPE_INDIRECT, len) != 0 &&
914 memcmp(type, VDEV_TYPE_REPLACING, len) != 0 &&
915 memcmp(type, VDEV_TYPE_HOLE, len) != 0) {
916 printf("ZFS: can only boot from disk, mirror, raidz1, "
917 "raidz2 and raidz3 vdevs, got: %.*s\n", len, type);
921 if (memcmp(type, VDEV_TYPE_MIRROR, len) == 0)
922 vdev = vdev_create(guid, vdev_mirror_read);
923 else if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0)
924 vdev = vdev_create(guid, vdev_raidz_read);
925 else if (memcmp(type, VDEV_TYPE_REPLACING, len) == 0)
926 vdev = vdev_create(guid, vdev_replacing_read);
927 else if (memcmp(type, VDEV_TYPE_INDIRECT, len) == 0) {
928 vdev_indirect_config_t *vic;
930 vdev = vdev_create(guid, vdev_indirect_read);
932 vdev->v_state = VDEV_STATE_HEALTHY;
933 vic = &vdev->vdev_indirect_config;
936 ZPOOL_CONFIG_INDIRECT_OBJECT,
938 NULL, &vic->vic_mapping_object, NULL);
940 ZPOOL_CONFIG_INDIRECT_BIRTHS,
942 NULL, &vic->vic_births_object, NULL);
944 ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
946 NULL, &vic->vic_prev_indirect_vdev, NULL);
948 } else if (memcmp(type, VDEV_TYPE_HOLE, len) == 0) {
949 vdev = vdev_create(guid, vdev_missing_read);
951 vdev = vdev_create(guid, vdev_disk_read);
957 vdev_set_initial_state(vdev, nvlist);
959 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT,
960 DATA_TYPE_UINT64, NULL, &ashift, NULL) == 0)
961 vdev->v_ashift = ashift;
963 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE,
964 DATA_TYPE_UINT64, NULL, &asize, NULL) == 0) {
965 vdev->v_psize = asize +
966 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
969 if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY,
970 DATA_TYPE_UINT64, NULL, &nparity, NULL) == 0)
971 vdev->v_nparity = nparity;
973 if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH,
974 DATA_TYPE_STRING, NULL, &path, &pathlen) == 0) {
975 char prefix[] = "/dev/";
977 len = strlen(prefix);
978 if (len < pathlen && memcmp(path, prefix, len) == 0) {
982 name = malloc(pathlen + 1);
983 bcopy(path, name, pathlen);
984 name[pathlen] = '\0';
988 if (memcmp(type, VDEV_TYPE_RAIDZ, len) == 0) {
989 if (vdev->v_nparity < 1 ||
990 vdev->v_nparity > 3) {
991 printf("ZFS: invalid raidz parity: %d\n",
995 (void) asprintf(&name, "%.*s%d-%" PRIu64, len, type,
996 vdev->v_nparity, id);
998 (void) asprintf(&name, "%.*s-%" PRIu64, len, type, id);
1000 vdev->v_name = name;
1007 * Find slot for vdev. We return either NULL to signal to use
1008 * STAILQ_INSERT_HEAD, or we return link element to be used with
1009 * STAILQ_INSERT_AFTER.
1012 vdev_find_previous(vdev_t *top_vdev, vdev_t *vdev)
1014 vdev_t *v, *previous;
1016 if (STAILQ_EMPTY(&top_vdev->v_children))
1020 STAILQ_FOREACH(v, &top_vdev->v_children, v_childlink) {
1021 if (v->v_id > vdev->v_id)
1024 if (v->v_id == vdev->v_id)
1027 if (v->v_id < vdev->v_id)
1034 vdev_child_count(vdev_t *vdev)
1040 STAILQ_FOREACH(v, &vdev->v_children, v_childlink) {
1047 * Insert vdev into top_vdev children list. List is ordered by v_id.
1050 vdev_insert(vdev_t *top_vdev, vdev_t *vdev)
1056 * The top level vdev can appear in random order, depending how
1057 * the firmware is presenting the disk devices.
1058 * However, we will insert vdev to create list ordered by v_id,
1059 * so we can use either STAILQ_INSERT_HEAD or STAILQ_INSERT_AFTER
1060 * as STAILQ does not have insert before.
1062 previous = vdev_find_previous(top_vdev, vdev);
1064 if (previous == NULL) {
1065 STAILQ_INSERT_HEAD(&top_vdev->v_children, vdev, v_childlink);
1066 } else if (previous->v_id == vdev->v_id) {
1068 * This vdev was configured from label config,
1069 * do not insert duplicate.
1073 STAILQ_INSERT_AFTER(&top_vdev->v_children, previous, vdev,
1077 count = vdev_child_count(top_vdev);
1078 if (top_vdev->v_nchildren < count)
1079 top_vdev->v_nchildren = count;
1083 vdev_from_nvlist(spa_t *spa, uint64_t top_guid, const nvlist_t *nvlist)
1085 vdev_t *top_vdev, *vdev;
1086 nvlist_t *kids = NULL;
1090 top_vdev = vdev_find(top_guid);
1091 if (top_vdev == NULL) {
1092 rc = vdev_init(top_guid, nvlist, &top_vdev);
1095 top_vdev->v_spa = spa;
1096 top_vdev->v_top = top_vdev;
1097 vdev_insert(spa->spa_root_vdev, top_vdev);
1100 /* Add children if there are any. */
1101 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
1102 &nkids, &kids, NULL);
1104 for (int i = 0; i < nkids; i++) {
1107 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID,
1108 DATA_TYPE_UINT64, NULL, &guid, NULL);
1110 nvlist_destroy(kids);
1113 rc = vdev_init(guid, kids, &vdev);
1118 vdev->v_top = top_vdev;
1119 vdev_insert(top_vdev, vdev);
1121 rc = nvlist_next(kids);
1125 * When there are no children, nvlist_find() does return
1126 * error, reset it because leaf devices have no children.
1130 nvlist_destroy(kids);
1136 vdev_init_from_label(spa_t *spa, const nvlist_t *nvlist)
1138 uint64_t pool_guid, top_guid;
1142 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
1143 NULL, &pool_guid, NULL) ||
1144 nvlist_find(nvlist, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64,
1145 NULL, &top_guid, NULL) ||
1146 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST,
1147 NULL, &vdevs, NULL)) {
1148 printf("ZFS: can't find vdev details\n");
1152 rc = vdev_from_nvlist(spa, top_guid, vdevs);
1153 nvlist_destroy(vdevs);
1158 vdev_set_state(vdev_t *vdev)
1164 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1165 vdev_set_state(kid);
1169 * A mirror or raidz is healthy if all its kids are healthy. A
1170 * mirror is degraded if any of its kids is healthy; a raidz
1171 * is degraded if at most nparity kids are offline.
1173 if (STAILQ_FIRST(&vdev->v_children)) {
1176 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1177 if (kid->v_state == VDEV_STATE_HEALTHY)
1182 if (bad_kids == 0) {
1183 vdev->v_state = VDEV_STATE_HEALTHY;
1185 if (vdev->v_read == vdev_mirror_read) {
1187 vdev->v_state = VDEV_STATE_DEGRADED;
1189 vdev->v_state = VDEV_STATE_OFFLINE;
1191 } else if (vdev->v_read == vdev_raidz_read) {
1192 if (bad_kids > vdev->v_nparity) {
1193 vdev->v_state = VDEV_STATE_OFFLINE;
1195 vdev->v_state = VDEV_STATE_DEGRADED;
1203 vdev_update_from_nvlist(uint64_t top_guid, const nvlist_t *nvlist)
1206 nvlist_t *kids = NULL;
1209 /* Update top vdev. */
1210 vdev = vdev_find(top_guid);
1212 vdev_set_initial_state(vdev, nvlist);
1214 /* Update children if there are any. */
1215 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
1216 &nkids, &kids, NULL);
1218 for (int i = 0; i < nkids; i++) {
1221 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID,
1222 DATA_TYPE_UINT64, NULL, &guid, NULL);
1226 vdev = vdev_find(guid);
1228 vdev_set_initial_state(vdev, kids);
1230 rc = nvlist_next(kids);
1235 nvlist_destroy(kids);
1241 vdev_init_from_nvlist(spa_t *spa, const nvlist_t *nvlist)
1243 uint64_t pool_guid, vdev_children;
1244 nvlist_t *vdevs = NULL, *kids = NULL;
1247 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
1248 NULL, &pool_guid, NULL) ||
1249 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64,
1250 NULL, &vdev_children, NULL) ||
1251 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST,
1252 NULL, &vdevs, NULL)) {
1253 printf("ZFS: can't find vdev details\n");
1258 if (spa->spa_guid != pool_guid) {
1259 nvlist_destroy(vdevs);
1263 spa->spa_root_vdev->v_nchildren = vdev_children;
1265 rc = nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
1266 &nkids, &kids, NULL);
1267 nvlist_destroy(vdevs);
1270 * MOS config has at least one child for root vdev.
1275 for (int i = 0; i < nkids; i++) {
1279 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64,
1283 vdev = vdev_find(guid);
1285 * Top level vdev is missing, create it.
1288 rc = vdev_from_nvlist(spa, guid, kids);
1290 rc = vdev_update_from_nvlist(guid, kids);
1295 nvlist_destroy(kids);
1298 * Re-evaluate top-level vdev state.
1300 vdev_set_state(spa->spa_root_vdev);
1306 spa_find_by_guid(uint64_t guid)
1310 STAILQ_FOREACH(spa, &zfs_pools, spa_link)
1311 if (spa->spa_guid == guid)
1318 spa_find_by_name(const char *name)
1322 STAILQ_FOREACH(spa, &zfs_pools, spa_link)
1323 if (strcmp(spa->spa_name, name) == 0)
1331 spa_get_primary(void)
1334 return (STAILQ_FIRST(&zfs_pools));
1338 spa_get_primary_vdev(const spa_t *spa)
1344 spa = spa_get_primary();
1347 vdev = spa->spa_root_vdev;
1350 for (kid = STAILQ_FIRST(&vdev->v_children); kid != NULL;
1351 kid = STAILQ_FIRST(&vdev->v_children))
1358 spa_create(uint64_t guid, const char *name)
1362 if ((spa = calloc(1, sizeof(spa_t))) == NULL)
1364 if ((spa->spa_name = strdup(name)) == NULL) {
1368 spa->spa_guid = guid;
1369 spa->spa_root_vdev = vdev_create(guid, NULL);
1370 if (spa->spa_root_vdev == NULL) {
1371 free(spa->spa_name);
1375 spa->spa_root_vdev->v_name = strdup("root");
1376 STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link);
1382 state_name(vdev_state_t state)
1384 static const char *names[] = {
1394 return (names[state]);
1399 #define pager_printf printf
1404 pager_printf(const char *fmt, ...)
1409 va_start(args, fmt);
1410 vsnprintf(line, sizeof(line), fmt, args);
1412 return (pager_output(line));
1417 #define STATUS_FORMAT " %s %s\n"
1420 print_state(int indent, const char *name, vdev_state_t state)
1426 for (i = 0; i < indent; i++)
1429 return (pager_printf(STATUS_FORMAT, buf, state_name(state)));
1433 vdev_status(vdev_t *vdev, int indent)
1438 if (vdev->v_islog) {
1439 (void) pager_output(" logs\n");
1443 ret = print_state(indent, vdev->v_name, vdev->v_state);
1447 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1448 ret = vdev_status(kid, indent + 1);
1456 spa_status(spa_t *spa)
1458 static char bootfs[ZFS_MAXNAMELEN];
1462 int good_kids, bad_kids, degraded_kids, ret;
1465 ret = pager_printf(" pool: %s\n", spa->spa_name);
1469 if (zfs_get_root(spa, &rootid) == 0 &&
1470 zfs_rlookup(spa, rootid, bootfs) == 0) {
1471 if (bootfs[0] == '\0')
1472 ret = pager_printf("bootfs: %s\n", spa->spa_name);
1474 ret = pager_printf("bootfs: %s/%s\n", spa->spa_name,
1479 ret = pager_printf("config:\n\n");
1482 ret = pager_printf(STATUS_FORMAT, "NAME", "STATE");
1489 vlist = &spa->spa_root_vdev->v_children;
1490 STAILQ_FOREACH(vdev, vlist, v_childlink) {
1491 if (vdev->v_state == VDEV_STATE_HEALTHY)
1493 else if (vdev->v_state == VDEV_STATE_DEGRADED)
1499 state = VDEV_STATE_CLOSED;
1500 if (good_kids > 0 && (degraded_kids + bad_kids) == 0)
1501 state = VDEV_STATE_HEALTHY;
1502 else if ((good_kids + degraded_kids) > 0)
1503 state = VDEV_STATE_DEGRADED;
1505 ret = print_state(0, spa->spa_name, state);
1509 STAILQ_FOREACH(vdev, vlist, v_childlink) {
1510 ret = vdev_status(vdev, 1);
1518 spa_all_status(void)
1521 int first = 1, ret = 0;
1523 STAILQ_FOREACH(spa, &zfs_pools, spa_link) {
1525 ret = pager_printf("\n");
1530 ret = spa_status(spa);
1538 vdev_label_offset(uint64_t psize, int l, uint64_t offset)
1540 uint64_t label_offset;
1542 if (l < VDEV_LABELS / 2)
1545 label_offset = psize - VDEV_LABELS * sizeof (vdev_label_t);
1547 return (offset + l * sizeof (vdev_label_t) + label_offset);
1551 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
1553 unsigned int seq1 = 0;
1554 unsigned int seq2 = 0;
1555 int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg);
1560 cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
1564 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
1565 seq1 = MMP_SEQ(ub1);
1567 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
1568 seq2 = MMP_SEQ(ub2);
1570 return (AVL_CMP(seq1, seq2));
1574 uberblock_verify(uberblock_t *ub)
1576 if (ub->ub_magic == BSWAP_64((uint64_t)UBERBLOCK_MAGIC)) {
1577 byteswap_uint64_array(ub, sizeof (uberblock_t));
1580 if (ub->ub_magic != UBERBLOCK_MAGIC ||
1581 !SPA_VERSION_IS_SUPPORTED(ub->ub_version))
1588 vdev_label_read(vdev_t *vd, int l, void *buf, uint64_t offset,
1594 off = vdev_label_offset(vd->v_psize, l, offset);
1597 BP_SET_LSIZE(&bp, size);
1598 BP_SET_PSIZE(&bp, size);
1599 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
1600 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
1601 DVA_SET_OFFSET(BP_IDENTITY(&bp), off);
1602 ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0);
1604 return (vdev_read_phys(vd, &bp, buf, off, size));
1608 vdev_label_read_config(vdev_t *vd, uint64_t txg)
1611 uint64_t best_txg = 0;
1612 uint64_t label_txg = 0;
1614 nvlist_t *nvl = NULL, *tmp;
1617 label = malloc(sizeof (vdev_phys_t));
1621 for (int l = 0; l < VDEV_LABELS; l++) {
1622 const unsigned char *nvlist;
1624 if (vdev_label_read(vd, l, label,
1625 offsetof(vdev_label_t, vl_vdev_phys),
1626 sizeof (vdev_phys_t)))
1629 nvlist = (const unsigned char *) label->vp_nvlist;
1630 tmp = nvlist_import(nvlist + 4, nvlist[0], nvlist[1]);
1634 error = nvlist_find(tmp, ZPOOL_CONFIG_POOL_TXG,
1635 DATA_TYPE_UINT64, NULL, &label_txg, NULL);
1636 if (error != 0 || label_txg == 0) {
1637 nvlist_destroy(nvl);
1642 if (label_txg <= txg && label_txg > best_txg) {
1643 best_txg = label_txg;
1644 nvlist_destroy(nvl);
1649 * Use asize from pool config. We need this
1650 * because we can get bad value from BIOS.
1652 if (nvlist_find(nvl, ZPOOL_CONFIG_ASIZE,
1653 DATA_TYPE_UINT64, NULL, &asize, NULL) == 0) {
1654 vd->v_psize = asize +
1655 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
1658 nvlist_destroy(tmp);
1661 if (best_txg == 0) {
1662 nvlist_destroy(nvl);
1671 vdev_uberblock_load(vdev_t *vd, uberblock_t *ub)
1675 buf = malloc(VDEV_UBERBLOCK_SIZE(vd));
1679 for (int l = 0; l < VDEV_LABELS; l++) {
1680 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
1681 if (vdev_label_read(vd, l, buf,
1682 VDEV_UBERBLOCK_OFFSET(vd, n),
1683 VDEV_UBERBLOCK_SIZE(vd)))
1685 if (uberblock_verify(buf) != 0)
1688 if (vdev_uberblock_compare(buf, ub) > 0)
1696 vdev_probe(vdev_phys_read_t *_read, void *read_priv, spa_t **spap)
1703 uint64_t guid, vdev_children;
1704 uint64_t pool_txg, pool_guid;
1705 const char *pool_name;
1709 * Load the vdev label and figure out which
1710 * uberblock is most current.
1712 memset(&vtmp, 0, sizeof(vtmp));
1713 vtmp.v_phys_read = _read;
1714 vtmp.v_read_priv = read_priv;
1715 vtmp.v_psize = P2ALIGN(ldi_get_size(read_priv),
1716 (uint64_t)sizeof (vdev_label_t));
1718 /* Test for minimum device size. */
1719 if (vtmp.v_psize < SPA_MINDEVSIZE)
1722 nvl = vdev_label_read_config(&vtmp, UINT64_MAX);
1726 if (nvlist_find(nvl, ZPOOL_CONFIG_VERSION, DATA_TYPE_UINT64,
1727 NULL, &val, NULL) != 0) {
1728 nvlist_destroy(nvl);
1732 if (!SPA_VERSION_IS_SUPPORTED(val)) {
1733 printf("ZFS: unsupported ZFS version %u (should be %u)\n",
1734 (unsigned)val, (unsigned)SPA_VERSION);
1735 nvlist_destroy(nvl);
1739 /* Check ZFS features for read */
1740 rc = nvlist_check_features_for_read(nvl);
1742 nvlist_destroy(nvl);
1746 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_STATE, DATA_TYPE_UINT64,
1747 NULL, &val, NULL) != 0) {
1748 nvlist_destroy(nvl);
1752 if (val == POOL_STATE_DESTROYED) {
1753 /* We don't boot only from destroyed pools. */
1754 nvlist_destroy(nvl);
1758 if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64,
1759 NULL, &pool_txg, NULL) != 0 ||
1760 nvlist_find(nvl, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
1761 NULL, &pool_guid, NULL) != 0 ||
1762 nvlist_find(nvl, ZPOOL_CONFIG_POOL_NAME, DATA_TYPE_STRING,
1763 NULL, &pool_name, &namelen) != 0) {
1765 * Cache and spare devices end up here - just ignore
1768 nvlist_destroy(nvl);
1773 * Create the pool if this is the first time we've seen it.
1775 spa = spa_find_by_guid(pool_guid);
1779 nvlist_find(nvl, ZPOOL_CONFIG_VDEV_CHILDREN,
1780 DATA_TYPE_UINT64, NULL, &vdev_children, NULL);
1781 name = malloc(namelen + 1);
1783 nvlist_destroy(nvl);
1786 bcopy(pool_name, name, namelen);
1787 name[namelen] = '\0';
1788 spa = spa_create(pool_guid, name);
1791 nvlist_destroy(nvl);
1794 spa->spa_root_vdev->v_nchildren = vdev_children;
1796 if (pool_txg > spa->spa_txg)
1797 spa->spa_txg = pool_txg;
1800 * Get the vdev tree and create our in-core copy of it.
1801 * If we already have a vdev with this guid, this must
1802 * be some kind of alias (overlapping slices, dangerously dedicated
1805 if (nvlist_find(nvl, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64,
1806 NULL, &guid, NULL) != 0) {
1807 nvlist_destroy(nvl);
1810 vdev = vdev_find(guid);
1811 /* Has this vdev already been inited? */
1812 if (vdev && vdev->v_phys_read) {
1813 nvlist_destroy(nvl);
1817 rc = vdev_init_from_label(spa, nvl);
1818 nvlist_destroy(nvl);
1823 * We should already have created an incomplete vdev for this
1824 * vdev. Find it and initialise it with our read proc.
1826 vdev = vdev_find(guid);
1828 vdev->v_phys_read = _read;
1829 vdev->v_read_priv = read_priv;
1830 vdev->v_psize = vtmp.v_psize;
1832 * If no other state is set, mark vdev healthy.
1834 if (vdev->v_state == VDEV_STATE_UNKNOWN)
1835 vdev->v_state = VDEV_STATE_HEALTHY;
1837 printf("ZFS: inconsistent nvlist contents\n");
1842 spa->spa_with_log = vdev->v_islog;
1845 * Re-evaluate top-level vdev state.
1847 vdev_set_state(vdev->v_top);
1850 * Ok, we are happy with the pool so far. Lets find
1851 * the best uberblock and then we can actually access
1852 * the contents of the pool.
1854 vdev_uberblock_load(vdev, &spa->spa_uberblock);
1866 for (v = 0; v < 32; v++)
1873 zio_read_gang(const spa_t *spa, const blkptr_t *bp, void *buf)
1876 zio_gbh_phys_t zio_gb;
1880 /* Artificial BP for gang block header. */
1882 BP_SET_PSIZE(&gbh_bp, SPA_GANGBLOCKSIZE);
1883 BP_SET_LSIZE(&gbh_bp, SPA_GANGBLOCKSIZE);
1884 BP_SET_CHECKSUM(&gbh_bp, ZIO_CHECKSUM_GANG_HEADER);
1885 BP_SET_COMPRESS(&gbh_bp, ZIO_COMPRESS_OFF);
1886 for (i = 0; i < SPA_DVAS_PER_BP; i++)
1887 DVA_SET_GANG(&gbh_bp.blk_dva[i], 0);
1889 /* Read gang header block using the artificial BP. */
1890 if (zio_read(spa, &gbh_bp, &zio_gb))
1894 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
1895 blkptr_t *gbp = &zio_gb.zg_blkptr[i];
1897 if (BP_IS_HOLE(gbp))
1899 if (zio_read(spa, gbp, pbuf))
1901 pbuf += BP_GET_PSIZE(gbp);
1904 if (zio_checksum_verify(spa, bp, buf))
1910 zio_read(const spa_t *spa, const blkptr_t *bp, void *buf)
1912 int cpfunc = BP_GET_COMPRESS(bp);
1913 uint64_t align, size;
1918 * Process data embedded in block pointer
1920 if (BP_IS_EMBEDDED(bp)) {
1921 ASSERT(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
1923 size = BPE_GET_PSIZE(bp);
1924 ASSERT(size <= BPE_PAYLOAD_SIZE);
1926 if (cpfunc != ZIO_COMPRESS_OFF)
1927 pbuf = malloc(size);
1934 decode_embedded_bp_compressed(bp, pbuf);
1937 if (cpfunc != ZIO_COMPRESS_OFF) {
1938 error = zio_decompress_data(cpfunc, pbuf,
1939 size, buf, BP_GET_LSIZE(bp));
1943 printf("ZFS: i/o error - unable to decompress "
1944 "block pointer data, error %d\n", error);
1950 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
1951 const dva_t *dva = &bp->blk_dva[i];
1957 if (!dva->dva_word[0] && !dva->dva_word[1])
1960 vdevid = DVA_GET_VDEV(dva);
1961 offset = DVA_GET_OFFSET(dva);
1962 vlist = &spa->spa_root_vdev->v_children;
1963 STAILQ_FOREACH(vdev, vlist, v_childlink) {
1964 if (vdev->v_id == vdevid)
1967 if (!vdev || !vdev->v_read)
1970 size = BP_GET_PSIZE(bp);
1971 if (vdev->v_read == vdev_raidz_read) {
1972 align = 1ULL << vdev->v_ashift;
1973 if (P2PHASE(size, align) != 0)
1974 size = P2ROUNDUP(size, align);
1976 if (size != BP_GET_PSIZE(bp) || cpfunc != ZIO_COMPRESS_OFF)
1977 pbuf = malloc(size);
1986 if (DVA_GET_GANG(dva))
1987 error = zio_read_gang(spa, bp, pbuf);
1989 error = vdev->v_read(vdev, bp, pbuf, offset, size);
1991 if (cpfunc != ZIO_COMPRESS_OFF)
1992 error = zio_decompress_data(cpfunc, pbuf,
1993 BP_GET_PSIZE(bp), buf, BP_GET_LSIZE(bp));
1994 else if (size != BP_GET_PSIZE(bp))
1995 bcopy(pbuf, buf, BP_GET_PSIZE(bp));
1997 printf("zio_read error: %d\n", error);
2005 printf("ZFS: i/o error - all block copies unavailable\n");
2011 dnode_read(const spa_t *spa, const dnode_phys_t *dnode, off_t offset,
2012 void *buf, size_t buflen)
2014 int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT;
2015 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2016 int nlevels = dnode->dn_nlevels;
2019 if (bsize > SPA_MAXBLOCKSIZE) {
2020 printf("ZFS: I/O error - blocks larger than %llu are not "
2021 "supported\n", SPA_MAXBLOCKSIZE);
2026 * Note: bsize may not be a power of two here so we need to do an
2027 * actual divide rather than a bitshift.
2029 while (buflen > 0) {
2030 uint64_t bn = offset / bsize;
2031 int boff = offset % bsize;
2033 const blkptr_t *indbp;
2036 if (bn > dnode->dn_maxblkid)
2039 if (dnode == dnode_cache_obj && bn == dnode_cache_bn)
2042 indbp = dnode->dn_blkptr;
2043 for (i = 0; i < nlevels; i++) {
2045 * Copy the bp from the indirect array so that
2046 * we can re-use the scratch buffer for multi-level
2049 ibn = bn >> ((nlevels - i - 1) * ibshift);
2050 ibn &= ((1 << ibshift) - 1);
2052 if (BP_IS_HOLE(&bp)) {
2053 memset(dnode_cache_buf, 0, bsize);
2056 rc = zio_read(spa, &bp, dnode_cache_buf);
2059 indbp = (const blkptr_t *) dnode_cache_buf;
2061 dnode_cache_obj = dnode;
2062 dnode_cache_bn = bn;
2066 * The buffer contains our data block. Copy what we
2067 * need from it and loop.
2070 if (i > buflen) i = buflen;
2071 memcpy(buf, &dnode_cache_buf[boff], i);
2072 buf = ((char *)buf) + i;
2081 * Lookup a value in a microzap directory.
2084 mzap_lookup(const mzap_phys_t *mz, size_t size, const char *name,
2087 const mzap_ent_phys_t *mze;
2091 * Microzap objects use exactly one block. Read the whole
2094 chunks = size / MZAP_ENT_LEN - 1;
2095 for (i = 0; i < chunks; i++) {
2096 mze = &mz->mz_chunk[i];
2097 if (strcmp(mze->mze_name, name) == 0) {
2098 *value = mze->mze_value;
2107 * Compare a name with a zap leaf entry. Return non-zero if the name
2111 fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc,
2115 const zap_leaf_chunk_t *nc;
2118 namelen = zc->l_entry.le_name_numints;
2120 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk);
2122 while (namelen > 0) {
2126 if (len > ZAP_LEAF_ARRAY_BYTES)
2127 len = ZAP_LEAF_ARRAY_BYTES;
2128 if (memcmp(p, nc->l_array.la_array, len))
2132 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next);
2139 * Extract a uint64_t value from a zap leaf entry.
2142 fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc)
2144 const zap_leaf_chunk_t *vc;
2149 vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk);
2150 for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) {
2151 value = (value << 8) | p[i];
2158 stv(int len, void *addr, uint64_t value)
2162 *(uint8_t *)addr = value;
2165 *(uint16_t *)addr = value;
2168 *(uint32_t *)addr = value;
2171 *(uint64_t *)addr = value;
2177 * Extract a array from a zap leaf entry.
2180 fzap_leaf_array(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc,
2181 uint64_t integer_size, uint64_t num_integers, void *buf)
2183 uint64_t array_int_len = zc->l_entry.le_value_intlen;
2185 uint64_t *u64 = buf;
2187 int len = MIN(zc->l_entry.le_value_numints, num_integers);
2188 int chunk = zc->l_entry.le_value_chunk;
2191 if (integer_size == 8 && len == 1) {
2192 *u64 = fzap_leaf_value(zl, zc);
2197 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(zl, chunk).l_array;
2200 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(zl));
2201 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) {
2202 value = (value << 8) | la->la_array[i];
2204 if (byten == array_int_len) {
2205 stv(integer_size, p, value);
2213 chunk = la->la_next;
2218 fzap_check_size(uint64_t integer_size, uint64_t num_integers)
2221 switch (integer_size) {
2231 if (integer_size * num_integers > ZAP_MAXVALUELEN)
2238 zap_leaf_free(zap_leaf_t *leaf)
2245 zap_get_leaf_byblk(fat_zap_t *zap, uint64_t blk, zap_leaf_t **lp)
2247 int bs = FZAP_BLOCK_SHIFT(zap);
2250 *lp = malloc(sizeof(**lp));
2255 (*lp)->l_phys = malloc(1 << bs);
2257 if ((*lp)->l_phys == NULL) {
2261 err = dnode_read(zap->zap_spa, zap->zap_dnode, blk << bs, (*lp)->l_phys,
2270 zap_table_load(fat_zap_t *zap, zap_table_phys_t *tbl, uint64_t idx,
2273 int bs = FZAP_BLOCK_SHIFT(zap);
2274 uint64_t blk = idx >> (bs - 3);
2275 uint64_t off = idx & ((1 << (bs - 3)) - 1);
2279 buf = malloc(1 << zap->zap_block_shift);
2282 rc = dnode_read(zap->zap_spa, zap->zap_dnode, (tbl->zt_blk + blk) << bs,
2283 buf, 1 << zap->zap_block_shift);
2291 zap_idx_to_blk(fat_zap_t *zap, uint64_t idx, uint64_t *valp)
2293 if (zap->zap_phys->zap_ptrtbl.zt_numblks == 0) {
2294 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx);
2297 return (zap_table_load(zap, &zap->zap_phys->zap_ptrtbl,
2302 #define ZAP_HASH_IDX(hash, n) (((n) == 0) ? 0 : ((hash) >> (64 - (n))))
2304 zap_deref_leaf(fat_zap_t *zap, uint64_t h, zap_leaf_t **lp)
2309 idx = ZAP_HASH_IDX(h, zap->zap_phys->zap_ptrtbl.zt_shift);
2310 err = zap_idx_to_blk(zap, idx, &blk);
2313 return (zap_get_leaf_byblk(zap, blk, lp));
2316 #define CHAIN_END 0xffff /* end of the chunk chain */
2317 #define LEAF_HASH(l, h) \
2318 ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \
2320 (64 - ZAP_LEAF_HASH_SHIFT(l) - (l)->l_phys->l_hdr.lh_prefix_len)))
2321 #define LEAF_HASH_ENTPTR(l, h) (&(l)->l_phys->l_hash[LEAF_HASH(l, h)])
2324 zap_leaf_lookup(zap_leaf_t *zl, uint64_t hash, const char *name,
2325 uint64_t integer_size, uint64_t num_integers, void *value)
2329 struct zap_leaf_entry *le;
2332 * Make sure this chunk matches our hash.
2334 if (zl->l_phys->l_hdr.lh_prefix_len > 0 &&
2335 zl->l_phys->l_hdr.lh_prefix !=
2336 hash >> (64 - zl->l_phys->l_hdr.lh_prefix_len))
2340 for (chunkp = LEAF_HASH_ENTPTR(zl, hash);
2341 *chunkp != CHAIN_END; chunkp = &le->le_next) {
2342 zap_leaf_chunk_t *zc;
2343 uint16_t chunk = *chunkp;
2345 le = ZAP_LEAF_ENTRY(zl, chunk);
2346 if (le->le_hash != hash)
2348 zc = &ZAP_LEAF_CHUNK(zl, chunk);
2349 if (fzap_name_equal(zl, zc, name)) {
2350 if (zc->l_entry.le_value_intlen > integer_size) {
2353 fzap_leaf_array(zl, zc, integer_size,
2354 num_integers, value);
2364 * Lookup a value in a fatzap directory.
2367 fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh,
2368 const char *name, uint64_t integer_size, uint64_t num_integers,
2371 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2377 if (zh->zap_magic != ZAP_MAGIC)
2380 if ((rc = fzap_check_size(integer_size, num_integers)) != 0)
2383 z.zap_block_shift = ilog2(bsize);
2386 z.zap_dnode = dnode;
2388 hash = zap_hash(zh->zap_salt, name);
2389 rc = zap_deref_leaf(&z, hash, &zl);
2393 rc = zap_leaf_lookup(zl, hash, name, integer_size, num_integers, value);
2400 * Lookup a name in a zap object and return its value as a uint64_t.
2403 zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name,
2404 uint64_t integer_size, uint64_t num_integers, void *value)
2408 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2414 rc = dnode_read(spa, dnode, 0, zap, size);
2418 switch (zap->zap_block_type) {
2420 rc = mzap_lookup((const mzap_phys_t *)zap, size, name, value);
2423 rc = fzap_lookup(spa, dnode, zap, name, integer_size,
2424 num_integers, value);
2427 printf("ZFS: invalid zap_type=%" PRIx64 "\n",
2428 zap->zap_block_type);
2437 * List a microzap directory.
2440 mzap_list(const mzap_phys_t *mz, size_t size,
2441 int (*callback)(const char *, uint64_t))
2443 const mzap_ent_phys_t *mze;
2447 * Microzap objects use exactly one block. Read the whole
2451 chunks = size / MZAP_ENT_LEN - 1;
2452 for (i = 0; i < chunks; i++) {
2453 mze = &mz->mz_chunk[i];
2454 if (mze->mze_name[0]) {
2455 rc = callback(mze->mze_name, mze->mze_value);
2465 * List a fatzap directory.
2468 fzap_list(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh,
2469 int (*callback)(const char *, uint64_t))
2471 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2476 if (zh->zap_magic != ZAP_MAGIC)
2479 z.zap_block_shift = ilog2(bsize);
2483 * This assumes that the leaf blocks start at block 1. The
2484 * documentation isn't exactly clear on this.
2487 zl.l_bs = z.zap_block_shift;
2488 zl.l_phys = malloc(bsize);
2489 if (zl.l_phys == NULL)
2492 for (i = 0; i < zh->zap_num_leafs; i++) {
2493 off_t off = ((off_t)(i + 1)) << zl.l_bs;
2497 if (dnode_read(spa, dnode, off, zl.l_phys, bsize)) {
2502 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) {
2503 zap_leaf_chunk_t *zc, *nc;
2506 zc = &ZAP_LEAF_CHUNK(&zl, j);
2507 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY)
2509 namelen = zc->l_entry.le_name_numints;
2510 if (namelen > sizeof(name))
2511 namelen = sizeof(name);
2514 * Paste the name back together.
2516 nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk);
2518 while (namelen > 0) {
2521 if (len > ZAP_LEAF_ARRAY_BYTES)
2522 len = ZAP_LEAF_ARRAY_BYTES;
2523 memcpy(p, nc->l_array.la_array, len);
2526 nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next);
2530 * Assume the first eight bytes of the value are
2533 value = fzap_leaf_value(&zl, zc);
2535 /* printf("%s 0x%jx\n", name, (uintmax_t)value); */
2536 rc = callback((const char *)name, value);
2548 static int zfs_printf(const char *name, uint64_t value __unused)
2551 printf("%s\n", name);
2557 * List a zap directory.
2560 zap_list(const spa_t *spa, const dnode_phys_t *dnode)
2563 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2570 rc = dnode_read(spa, dnode, 0, zap, size);
2572 if (zap->zap_block_type == ZBT_MICRO)
2573 rc = mzap_list((const mzap_phys_t *)zap, size,
2576 rc = fzap_list(spa, dnode, zap, zfs_printf);
2583 objset_get_dnode(const spa_t *spa, const objset_phys_t *os, uint64_t objnum,
2584 dnode_phys_t *dnode)
2588 offset = objnum * sizeof(dnode_phys_t);
2589 return dnode_read(spa, &os->os_meta_dnode, offset,
2590 dnode, sizeof(dnode_phys_t));
2594 * Lookup a name in a microzap directory.
2597 mzap_rlookup(const mzap_phys_t *mz, size_t size, char *name, uint64_t value)
2599 const mzap_ent_phys_t *mze;
2603 * Microzap objects use exactly one block. Read the whole
2606 chunks = size / MZAP_ENT_LEN - 1;
2607 for (i = 0; i < chunks; i++) {
2608 mze = &mz->mz_chunk[i];
2609 if (value == mze->mze_value) {
2610 strcpy(name, mze->mze_name);
2619 fzap_name_copy(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, char *name)
2622 const zap_leaf_chunk_t *nc;
2625 namelen = zc->l_entry.le_name_numints;
2627 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk);
2629 while (namelen > 0) {
2632 if (len > ZAP_LEAF_ARRAY_BYTES)
2633 len = ZAP_LEAF_ARRAY_BYTES;
2634 memcpy(p, nc->l_array.la_array, len);
2637 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next);
2644 fzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh,
2645 char *name, uint64_t value)
2647 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2652 if (zh->zap_magic != ZAP_MAGIC)
2655 z.zap_block_shift = ilog2(bsize);
2659 * This assumes that the leaf blocks start at block 1. The
2660 * documentation isn't exactly clear on this.
2663 zl.l_bs = z.zap_block_shift;
2664 zl.l_phys = malloc(bsize);
2665 if (zl.l_phys == NULL)
2668 for (i = 0; i < zh->zap_num_leafs; i++) {
2669 off_t off = ((off_t)(i + 1)) << zl.l_bs;
2671 rc = dnode_read(spa, dnode, off, zl.l_phys, bsize);
2675 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) {
2676 zap_leaf_chunk_t *zc;
2678 zc = &ZAP_LEAF_CHUNK(&zl, j);
2679 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY)
2681 if (zc->l_entry.le_value_intlen != 8 ||
2682 zc->l_entry.le_value_numints != 1)
2685 if (fzap_leaf_value(&zl, zc) == value) {
2686 fzap_name_copy(&zl, zc, name);
2699 zap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name,
2703 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2710 rc = dnode_read(spa, dnode, 0, zap, size);
2712 if (zap->zap_block_type == ZBT_MICRO)
2713 rc = mzap_rlookup((const mzap_phys_t *)zap, size,
2716 rc = fzap_rlookup(spa, dnode, zap, name, value);
2723 zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result)
2726 char component[256];
2727 uint64_t dir_obj, parent_obj, child_dir_zapobj;
2728 dnode_phys_t child_dir_zap, dataset, dir, parent;
2730 dsl_dataset_phys_t *ds;
2734 p = &name[sizeof(name) - 1];
2737 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
2738 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
2741 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
2742 dir_obj = ds->ds_dir_obj;
2745 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir) != 0)
2747 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2749 /* Actual loop condition. */
2750 parent_obj = dd->dd_parent_obj;
2751 if (parent_obj == 0)
2754 if (objset_get_dnode(spa, &spa->spa_mos, parent_obj,
2757 dd = (dsl_dir_phys_t *)&parent.dn_bonus;
2758 child_dir_zapobj = dd->dd_child_dir_zapobj;
2759 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj,
2760 &child_dir_zap) != 0)
2762 if (zap_rlookup(spa, &child_dir_zap, component, dir_obj) != 0)
2765 len = strlen(component);
2767 memcpy(p, component, len);
2771 /* Actual loop iteration. */
2772 dir_obj = parent_obj;
2783 zfs_lookup_dataset(const spa_t *spa, const char *name, uint64_t *objnum)
2786 uint64_t dir_obj, child_dir_zapobj;
2787 dnode_phys_t child_dir_zap, dir;
2791 if (objset_get_dnode(spa, &spa->spa_mos,
2792 DMU_POOL_DIRECTORY_OBJECT, &dir))
2794 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (dir_obj),
2800 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir))
2802 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2806 /* Actual loop condition #1. */
2812 memcpy(element, p, q - p);
2813 element[q - p] = '\0';
2820 child_dir_zapobj = dd->dd_child_dir_zapobj;
2821 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj,
2822 &child_dir_zap) != 0)
2825 /* Actual loop condition #2. */
2826 if (zap_lookup(spa, &child_dir_zap, element, sizeof (dir_obj),
2831 *objnum = dd->dd_head_dataset_obj;
2837 zfs_list_dataset(const spa_t *spa, uint64_t objnum/*, int pos, char *entry*/)
2839 uint64_t dir_obj, child_dir_zapobj;
2840 dnode_phys_t child_dir_zap, dir, dataset;
2841 dsl_dataset_phys_t *ds;
2844 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
2845 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
2848 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
2849 dir_obj = ds->ds_dir_obj;
2851 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir)) {
2852 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj);
2855 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2857 child_dir_zapobj = dd->dd_child_dir_zapobj;
2858 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj,
2859 &child_dir_zap) != 0) {
2860 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj);
2864 return (zap_list(spa, &child_dir_zap) != 0);
2868 zfs_callback_dataset(const spa_t *spa, uint64_t objnum,
2869 int (*callback)(const char *, uint64_t))
2871 uint64_t dir_obj, child_dir_zapobj;
2872 dnode_phys_t child_dir_zap, dir, dataset;
2873 dsl_dataset_phys_t *ds;
2879 err = objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset);
2881 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
2884 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
2885 dir_obj = ds->ds_dir_obj;
2887 err = objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir);
2889 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj);
2892 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2894 child_dir_zapobj = dd->dd_child_dir_zapobj;
2895 err = objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj,
2898 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj);
2902 size = child_dir_zap.dn_datablkszsec << SPA_MINBLOCKSHIFT;
2905 err = dnode_read(spa, &child_dir_zap, 0, zap, size);
2909 if (zap->zap_block_type == ZBT_MICRO)
2910 err = mzap_list((const mzap_phys_t *)zap, size,
2913 err = fzap_list(spa, &child_dir_zap, zap, callback);
2924 * Find the object set given the object number of its dataset object
2925 * and return its details in *objset
2928 zfs_mount_dataset(const spa_t *spa, uint64_t objnum, objset_phys_t *objset)
2930 dnode_phys_t dataset;
2931 dsl_dataset_phys_t *ds;
2933 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
2934 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
2938 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
2939 if (zio_read(spa, &ds->ds_bp, objset)) {
2940 printf("ZFS: can't read object set for dataset %ju\n",
2949 * Find the object set pointed to by the BOOTFS property or the root
2950 * dataset if there is none and return its details in *objset
2953 zfs_get_root(const spa_t *spa, uint64_t *objid)
2955 dnode_phys_t dir, propdir;
2956 uint64_t props, bootfs, root;
2961 * Start with the MOS directory object.
2963 if (objset_get_dnode(spa, &spa->spa_mos,
2964 DMU_POOL_DIRECTORY_OBJECT, &dir)) {
2965 printf("ZFS: can't read MOS object directory\n");
2970 * Lookup the pool_props and see if we can find a bootfs.
2972 if (zap_lookup(spa, &dir, DMU_POOL_PROPS,
2973 sizeof(props), 1, &props) == 0 &&
2974 objset_get_dnode(spa, &spa->spa_mos, props, &propdir) == 0 &&
2975 zap_lookup(spa, &propdir, "bootfs",
2976 sizeof(bootfs), 1, &bootfs) == 0 && bootfs != 0) {
2981 * Lookup the root dataset directory
2983 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET,
2984 sizeof(root), 1, &root) ||
2985 objset_get_dnode(spa, &spa->spa_mos, root, &dir)) {
2986 printf("ZFS: can't find root dsl_dir\n");
2991 * Use the information from the dataset directory's bonus buffer
2992 * to find the dataset object and from that the object set itself.
2994 dsl_dir_phys_t *dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2995 *objid = dd->dd_head_dataset_obj;
3000 zfs_mount(const spa_t *spa, uint64_t rootobj, struct zfsmount *mount)
3006 * Find the root object set if not explicitly provided
3008 if (rootobj == 0 && zfs_get_root(spa, &rootobj)) {
3009 printf("ZFS: can't find root filesystem\n");
3013 if (zfs_mount_dataset(spa, rootobj, &mount->objset)) {
3014 printf("ZFS: can't open root filesystem\n");
3018 mount->rootobj = rootobj;
3024 * callback function for feature name checks.
3027 check_feature(const char *name, uint64_t value)
3033 if (name[0] == '\0')
3036 for (i = 0; features_for_read[i] != NULL; i++) {
3037 if (strcmp(name, features_for_read[i]) == 0)
3040 printf("ZFS: unsupported feature: %s\n", name);
3045 * Checks whether the MOS features that are active are supported.
3048 check_mos_features(const spa_t *spa)
3056 if ((rc = objset_get_dnode(spa, &spa->spa_mos, DMU_OT_OBJECT_DIRECTORY,
3059 if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ,
3060 sizeof (objnum), 1, &objnum)) != 0) {
3062 * It is older pool without features. As we have already
3063 * tested the label, just return without raising the error.
3068 if ((rc = objset_get_dnode(spa, &spa->spa_mos, objnum, &dir)) != 0)
3071 if (dir.dn_type != DMU_OTN_ZAP_METADATA)
3074 size = dir.dn_datablkszsec << SPA_MINBLOCKSHIFT;
3079 if (dnode_read(spa, &dir, 0, zap, size)) {
3084 if (zap->zap_block_type == ZBT_MICRO)
3085 rc = mzap_list((const mzap_phys_t *)zap, size, check_feature);
3087 rc = fzap_list(spa, &dir, zap, check_feature);
3094 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
3102 if ((rc = objset_get_dnode(spa, &spa->spa_mos, obj, &dir)) != 0)
3104 if (dir.dn_type != DMU_OT_PACKED_NVLIST &&
3105 dir.dn_bonustype != DMU_OT_PACKED_NVLIST_SIZE) {
3109 if (dir.dn_bonuslen != sizeof (uint64_t))
3112 size = *(uint64_t *)DN_BONUS(&dir);
3117 rc = dnode_read(spa, &dir, 0, nv, size);
3123 *value = nvlist_import(nv + 4, nv[0], nv[1]);
3129 zfs_spa_init(spa_t *spa)
3132 uint64_t config_object;
3136 if (zio_read(spa, &spa->spa_uberblock.ub_rootbp, &spa->spa_mos)) {
3137 printf("ZFS: can't read MOS of pool %s\n", spa->spa_name);
3140 if (spa->spa_mos.os_type != DMU_OST_META) {
3141 printf("ZFS: corrupted MOS of pool %s\n", spa->spa_name);
3145 if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT,
3147 printf("ZFS: failed to read pool %s directory object\n",
3151 /* this is allowed to fail, older pools do not have salt */
3152 rc = zap_lookup(spa, &dir, DMU_POOL_CHECKSUM_SALT, 1,
3153 sizeof (spa->spa_cksum_salt.zcs_bytes),
3154 spa->spa_cksum_salt.zcs_bytes);
3156 rc = check_mos_features(spa);
3158 printf("ZFS: pool %s is not supported\n", spa->spa_name);
3162 rc = zap_lookup(spa, &dir, DMU_POOL_CONFIG,
3163 sizeof (config_object), 1, &config_object);
3165 printf("ZFS: can not read MOS %s\n", DMU_POOL_CONFIG);
3168 rc = load_nvlist(spa, config_object, &nvlist);
3172 * Update vdevs from MOS config. Note, we do skip encoding bytes
3173 * here. See also vdev_label_read_config().
3175 rc = vdev_init_from_nvlist(spa, nvlist);
3176 nvlist_destroy(nvlist);
3181 zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb)
3184 if (dn->dn_bonustype != DMU_OT_SA) {
3185 znode_phys_t *zp = (znode_phys_t *)dn->dn_bonus;
3187 sb->st_mode = zp->zp_mode;
3188 sb->st_uid = zp->zp_uid;
3189 sb->st_gid = zp->zp_gid;
3190 sb->st_size = zp->zp_size;
3192 sa_hdr_phys_t *sahdrp;
3197 if (dn->dn_bonuslen != 0)
3198 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn);
3200 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0) {
3201 blkptr_t *bp = DN_SPILL_BLKPTR(dn);
3204 size = BP_GET_LSIZE(bp);
3209 error = zio_read(spa, bp, buf);
3220 hdrsize = SA_HDR_SIZE(sahdrp);
3221 sb->st_mode = *(uint64_t *)((char *)sahdrp + hdrsize +
3223 sb->st_uid = *(uint64_t *)((char *)sahdrp + hdrsize +
3225 sb->st_gid = *(uint64_t *)((char *)sahdrp + hdrsize +
3227 sb->st_size = *(uint64_t *)((char *)sahdrp + hdrsize +
3236 zfs_dnode_readlink(const spa_t *spa, dnode_phys_t *dn, char *path, size_t psize)
3240 if (dn->dn_bonustype == DMU_OT_SA) {
3241 sa_hdr_phys_t *sahdrp = NULL;
3247 if (dn->dn_bonuslen != 0) {
3248 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn);
3252 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) == 0)
3254 bp = DN_SPILL_BLKPTR(dn);
3256 size = BP_GET_LSIZE(bp);
3261 rc = zio_read(spa, bp, buf);
3268 hdrsize = SA_HDR_SIZE(sahdrp);
3269 p = (char *)((uintptr_t)sahdrp + hdrsize + SA_SYMLINK_OFFSET);
3270 memcpy(path, p, psize);
3275 * Second test is purely to silence bogus compiler
3276 * warning about accessing past the end of dn_bonus.
3278 if (psize + sizeof(znode_phys_t) <= dn->dn_bonuslen &&
3279 sizeof(znode_phys_t) <= sizeof(dn->dn_bonus)) {
3280 memcpy(path, &dn->dn_bonus[sizeof(znode_phys_t)], psize);
3282 rc = dnode_read(spa, dn, 0, path, psize);
3289 STAILQ_ENTRY(obj_list) entry;
3293 * Lookup a file and return its dnode.
3296 zfs_lookup(const struct zfsmount *mount, const char *upath, dnode_phys_t *dnode)
3305 int symlinks_followed = 0;
3307 struct obj_list *entry, *tentry;
3308 STAILQ_HEAD(, obj_list) on_cache = STAILQ_HEAD_INITIALIZER(on_cache);
3311 if (mount->objset.os_type != DMU_OST_ZFS) {
3312 printf("ZFS: unexpected object set type %ju\n",
3313 (uintmax_t)mount->objset.os_type);
3317 if ((entry = malloc(sizeof(struct obj_list))) == NULL)
3321 * Get the root directory dnode.
3323 rc = objset_get_dnode(spa, &mount->objset, MASTER_NODE_OBJ, &dn);
3329 rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, sizeof(objnum), 1, &objnum);
3334 entry->objnum = objnum;
3335 STAILQ_INSERT_HEAD(&on_cache, entry, entry);
3337 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
3343 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
3352 while (*q != '\0' && *q != '/')
3356 if (p + 1 == q && p[0] == '.') {
3361 if (p + 2 == q && p[0] == '.' && p[1] == '.') {
3363 if (STAILQ_FIRST(&on_cache) ==
3364 STAILQ_LAST(&on_cache, obj_list, entry)) {
3368 entry = STAILQ_FIRST(&on_cache);
3369 STAILQ_REMOVE_HEAD(&on_cache, entry);
3371 objnum = (STAILQ_FIRST(&on_cache))->objnum;
3374 if (q - p + 1 > sizeof(element)) {
3378 memcpy(element, p, q - p);
3382 if ((rc = zfs_dnode_stat(spa, &dn, &sb)) != 0)
3384 if (!S_ISDIR(sb.st_mode)) {
3389 rc = zap_lookup(spa, &dn, element, sizeof (objnum), 1, &objnum);
3392 objnum = ZFS_DIRENT_OBJ(objnum);
3394 if ((entry = malloc(sizeof(struct obj_list))) == NULL) {
3398 entry->objnum = objnum;
3399 STAILQ_INSERT_HEAD(&on_cache, entry, entry);
3400 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
3405 * Check for symlink.
3407 rc = zfs_dnode_stat(spa, &dn, &sb);
3410 if (S_ISLNK(sb.st_mode)) {
3411 if (symlinks_followed > 10) {
3415 symlinks_followed++;
3418 * Read the link value and copy the tail of our
3419 * current path onto the end.
3421 if (sb.st_size + strlen(p) + 1 > sizeof(path)) {
3425 strcpy(&path[sb.st_size], p);
3427 rc = zfs_dnode_readlink(spa, &dn, path, sb.st_size);
3432 * Restart with the new path, starting either at
3433 * the root or at the parent depending whether or
3434 * not the link is relative.
3438 while (STAILQ_FIRST(&on_cache) !=
3439 STAILQ_LAST(&on_cache, obj_list, entry)) {
3440 entry = STAILQ_FIRST(&on_cache);
3441 STAILQ_REMOVE_HEAD(&on_cache, entry);
3445 entry = STAILQ_FIRST(&on_cache);
3446 STAILQ_REMOVE_HEAD(&on_cache, entry);
3449 objnum = (STAILQ_FIRST(&on_cache))->objnum;
3455 STAILQ_FOREACH_SAFE(entry, &on_cache, entry, tentry)