2 * Copyright (c) 2007 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
31 * Stand-alone ZFS file reader.
34 #include <sys/endian.h>
36 #include <sys/stdint.h>
38 #include <machine/_inttypes.h>
49 static struct zfsmount zfsmount __unused;
52 * The indirect_child_t represents the vdev that we will read from, when we
53 * need to read all copies of the data (e.g. for scrub or reconstruction).
54 * For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
55 * ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
56 * ic_vdev is a child of the mirror.
58 typedef struct indirect_child {
64 * The indirect_split_t represents one mapped segment of an i/o to the
65 * indirect vdev. For non-split (contiguously-mapped) blocks, there will be
66 * only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
67 * For split blocks, there will be several of these.
69 typedef struct indirect_split {
70 list_node_t is_node; /* link on iv_splits */
73 * is_split_offset is the offset into the i/o.
74 * This is the sum of the previous splits' is_size's.
76 uint64_t is_split_offset;
78 vdev_t *is_vdev; /* top-level vdev */
79 uint64_t is_target_offset; /* offset on is_vdev */
81 int is_children; /* number of entries in is_child[] */
84 * is_good_child is the child that we are currently using to
85 * attempt reconstruction.
89 indirect_child_t is_child[1]; /* variable-length */
93 * The indirect_vsd_t is associated with each i/o to the indirect vdev.
94 * It is the "Vdev-Specific Data" in the zio_t's io_vsd.
96 typedef struct indirect_vsd {
97 boolean_t iv_split_block;
98 boolean_t iv_reconstruct;
100 list_t iv_splits; /* list of indirect_split_t's */
104 * List of all vdevs, chained through v_alllink.
106 static vdev_list_t zfs_vdevs;
109 * List of ZFS features supported for read
111 static const char *features_for_read[] = {
112 "org.illumos:lz4_compress",
113 "com.delphix:hole_birth",
114 "com.delphix:extensible_dataset",
115 "com.delphix:embedded_data",
116 "org.open-zfs:large_blocks",
117 "org.illumos:sha512",
119 "org.zfsonlinux:large_dnode",
120 "com.joyent:multi_vdev_crash_dump",
121 "com.delphix:spacemap_histogram",
122 "com.delphix:zpool_checkpoint",
123 "com.delphix:spacemap_v2",
124 "com.datto:encryption",
125 "org.zfsonlinux:allocation_classes",
126 "com.datto:resilver_defer",
127 "com.delphix:device_removal",
128 "com.delphix:obsolete_counts",
129 "com.intel:allocation_classes",
134 * List of all pools, chained through spa_link.
136 static spa_list_t zfs_pools;
138 static const dnode_phys_t *dnode_cache_obj;
139 static uint64_t dnode_cache_bn;
140 static char *dnode_cache_buf;
141 static char *zfs_temp_buf, *zfs_temp_end, *zfs_temp_ptr;
143 #define TEMP_SIZE (1024 * 1024)
145 static int zio_read(const spa_t *spa, const blkptr_t *bp, void *buf);
146 static int zfs_get_root(const spa_t *spa, uint64_t *objid);
147 static int zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result);
148 static int zap_lookup(const spa_t *spa, const dnode_phys_t *dnode,
149 const char *name, uint64_t integer_size, uint64_t num_integers,
151 static int objset_get_dnode(const spa_t *, const objset_phys_t *, uint64_t,
153 static int dnode_read(const spa_t *, const dnode_phys_t *, off_t, void *,
155 static int vdev_indirect_read(vdev_t *, const blkptr_t *, void *, off_t,
157 static int vdev_mirror_read(vdev_t *, const blkptr_t *, void *, off_t, size_t);
158 vdev_indirect_mapping_t *vdev_indirect_mapping_open(spa_t *, objset_phys_t *,
160 vdev_indirect_mapping_entry_phys_t *
161 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *, uint64_t,
162 uint64_t, uint64_t *);
167 STAILQ_INIT(&zfs_vdevs);
168 STAILQ_INIT(&zfs_pools);
170 zfs_temp_buf = malloc(TEMP_SIZE);
171 zfs_temp_end = zfs_temp_buf + TEMP_SIZE;
172 zfs_temp_ptr = zfs_temp_buf;
173 dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE);
179 zfs_alloc(size_t size)
183 if (zfs_temp_ptr + size > zfs_temp_end) {
184 panic("ZFS: out of temporary buffer space");
187 zfs_temp_ptr += size;
193 zfs_free(void *ptr, size_t size)
196 zfs_temp_ptr -= size;
197 if (zfs_temp_ptr != ptr) {
198 panic("ZFS: zfs_alloc()/zfs_free() mismatch");
203 xdr_int(const unsigned char **xdr, int *ip)
211 xdr_u_int(const unsigned char **xdr, u_int *ip)
219 xdr_uint64_t(const unsigned char **xdr, uint64_t *lp)
225 *lp = (((uint64_t)hi) << 32) | lo;
230 nvlist_find(const unsigned char *nvlist, const char *name, int type,
231 int *elementsp, void *valuep)
233 const unsigned char *p, *pair;
235 int encoded_size, decoded_size;
242 xdr_int(&p, &encoded_size);
243 xdr_int(&p, &decoded_size);
244 while (encoded_size && decoded_size) {
245 int namelen, pairtype, elements;
246 const char *pairname;
248 xdr_int(&p, &namelen);
249 pairname = (const char *)p;
250 p += roundup(namelen, 4);
251 xdr_int(&p, &pairtype);
253 if (memcmp(name, pairname, namelen) == 0 && type == pairtype) {
254 xdr_int(&p, &elements);
256 *elementsp = elements;
257 if (type == DATA_TYPE_UINT64) {
258 xdr_uint64_t(&p, (uint64_t *)valuep);
260 } else if (type == DATA_TYPE_STRING) {
263 (*(const char **)valuep) = (const char *)p;
265 } else if (type == DATA_TYPE_NVLIST ||
266 type == DATA_TYPE_NVLIST_ARRAY) {
267 (*(const unsigned char **)valuep) =
268 (const unsigned char *)p;
275 * Not the pair we are looking for, skip to the
278 p = pair + encoded_size;
282 xdr_int(&p, &encoded_size);
283 xdr_int(&p, &decoded_size);
290 nvlist_check_features_for_read(const unsigned char *nvlist)
292 const unsigned char *p, *pair;
294 int encoded_size, decoded_size;
304 xdr_int(&p, &encoded_size);
305 xdr_int(&p, &decoded_size);
306 while (encoded_size && decoded_size) {
307 int namelen, pairtype;
308 const char *pairname;
313 xdr_int(&p, &namelen);
314 pairname = (const char *)p;
315 p += roundup(namelen, 4);
316 xdr_int(&p, &pairtype);
318 for (i = 0; features_for_read[i] != NULL; i++) {
319 if (memcmp(pairname, features_for_read[i],
327 printf("ZFS: unsupported feature: %s\n", pairname);
331 p = pair + encoded_size;
334 xdr_int(&p, &encoded_size);
335 xdr_int(&p, &decoded_size);
342 * Return the next nvlist in an nvlist array.
344 static const unsigned char *
345 nvlist_next(const unsigned char *nvlist)
347 const unsigned char *p, *pair;
349 int encoded_size, decoded_size;
356 xdr_int(&p, &encoded_size);
357 xdr_int(&p, &decoded_size);
358 while (encoded_size && decoded_size) {
359 p = pair + encoded_size;
362 xdr_int(&p, &encoded_size);
363 xdr_int(&p, &decoded_size);
371 static const unsigned char *
372 nvlist_print(const unsigned char *nvlist, unsigned int indent)
374 static const char *typenames[] = {
385 "DATA_TYPE_BYTE_ARRAY",
386 "DATA_TYPE_INT16_ARRAY",
387 "DATA_TYPE_UINT16_ARRAY",
388 "DATA_TYPE_INT32_ARRAY",
389 "DATA_TYPE_UINT32_ARRAY",
390 "DATA_TYPE_INT64_ARRAY",
391 "DATA_TYPE_UINT64_ARRAY",
392 "DATA_TYPE_STRING_ARRAY",
395 "DATA_TYPE_NVLIST_ARRAY",
396 "DATA_TYPE_BOOLEAN_VALUE",
399 "DATA_TYPE_BOOLEAN_ARRAY",
400 "DATA_TYPE_INT8_ARRAY",
401 "DATA_TYPE_UINT8_ARRAY"
405 const unsigned char *p, *pair;
407 int encoded_size, decoded_size;
414 xdr_int(&p, &encoded_size);
415 xdr_int(&p, &decoded_size);
416 while (encoded_size && decoded_size) {
417 int namelen, pairtype, elements;
418 const char *pairname;
420 xdr_int(&p, &namelen);
421 pairname = (const char *)p;
422 p += roundup(namelen, 4);
423 xdr_int(&p, &pairtype);
425 for (i = 0; i < indent; i++)
427 printf("%s %s", typenames[pairtype], pairname);
429 xdr_int(&p, &elements);
431 case DATA_TYPE_UINT64: {
433 xdr_uint64_t(&p, &val);
434 printf(" = 0x%jx\n", (uintmax_t)val);
438 case DATA_TYPE_STRING: {
441 printf(" = \"%s\"\n", p);
445 case DATA_TYPE_NVLIST:
447 nvlist_print(p, indent + 1);
450 case DATA_TYPE_NVLIST_ARRAY:
451 for (j = 0; j < elements; j++) {
453 p = nvlist_print(p, indent + 1);
454 if (j != elements - 1) {
455 for (i = 0; i < indent; i++)
457 printf("%s %s", typenames[pairtype],
467 p = pair + encoded_size;
470 xdr_int(&p, &encoded_size);
471 xdr_int(&p, &decoded_size);
480 vdev_read_phys(vdev_t *vdev, const blkptr_t *bp, void *buf,
481 off_t offset, size_t size)
486 if (!vdev->v_phys_read)
490 psize = BP_GET_PSIZE(bp);
495 rc = vdev->v_phys_read(vdev, vdev->v_read_priv, offset, buf, psize);
498 rc = zio_checksum_verify(vdev->v_spa, bp, buf);
504 typedef struct remap_segment {
508 uint64_t rs_split_offset;
512 static remap_segment_t *
513 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
515 remap_segment_t *rs = malloc(sizeof (remap_segment_t));
519 rs->rs_offset = offset;
520 rs->rs_asize = asize;
521 rs->rs_split_offset = split_offset;
527 vdev_indirect_mapping_t *
528 vdev_indirect_mapping_open(spa_t *spa, objset_phys_t *os,
529 uint64_t mapping_object)
531 vdev_indirect_mapping_t *vim;
532 vdev_indirect_mapping_phys_t *vim_phys;
535 vim = calloc(1, sizeof (*vim));
539 vim->vim_dn = calloc(1, sizeof (*vim->vim_dn));
540 if (vim->vim_dn == NULL) {
545 rc = objset_get_dnode(spa, os, mapping_object, vim->vim_dn);
553 vim->vim_phys = malloc(sizeof (*vim->vim_phys));
554 if (vim->vim_phys == NULL) {
560 vim_phys = (vdev_indirect_mapping_phys_t *)DN_BONUS(vim->vim_dn);
561 *vim->vim_phys = *vim_phys;
563 vim->vim_objset = os;
564 vim->vim_object = mapping_object;
565 vim->vim_entries = NULL;
567 vim->vim_havecounts =
568 (vim->vim_dn->dn_bonuslen > VDEV_INDIRECT_MAPPING_SIZE_V0);
574 * Compare an offset with an indirect mapping entry; there are three
575 * possible scenarios:
577 * 1. The offset is "less than" the mapping entry; meaning the
578 * offset is less than the source offset of the mapping entry. In
579 * this case, there is no overlap between the offset and the
580 * mapping entry and -1 will be returned.
582 * 2. The offset is "greater than" the mapping entry; meaning the
583 * offset is greater than the mapping entry's source offset plus
584 * the entry's size. In this case, there is no overlap between
585 * the offset and the mapping entry and 1 will be returned.
587 * NOTE: If the offset is actually equal to the entry's offset
588 * plus size, this is considered to be "greater" than the entry,
589 * and this case applies (i.e. 1 will be returned). Thus, the
590 * entry's "range" can be considered to be inclusive at its
591 * start, but exclusive at its end: e.g. [src, src + size).
593 * 3. The last case to consider is if the offset actually falls
594 * within the mapping entry's range. If this is the case, the
595 * offset is considered to be "equal to" the mapping entry and
596 * 0 will be returned.
598 * NOTE: If the offset is equal to the entry's source offset,
599 * this case applies and 0 will be returned. If the offset is
600 * equal to the entry's source plus its size, this case does
601 * *not* apply (see "NOTE" above for scenario 2), and 1 will be
605 dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem)
607 const uint64_t *key = v_key;
608 const vdev_indirect_mapping_entry_phys_t *array_elem =
610 uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem);
612 if (*key < src_offset) {
614 } else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) {
622 * Return array entry.
624 static vdev_indirect_mapping_entry_phys_t *
625 vdev_indirect_mapping_entry(vdev_indirect_mapping_t *vim, uint64_t index)
631 if (vim->vim_phys->vimp_num_entries == 0)
634 if (vim->vim_entries == NULL) {
637 bsize = vim->vim_dn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
638 size = vim->vim_phys->vimp_num_entries *
639 sizeof (*vim->vim_entries);
641 size = bsize / sizeof (*vim->vim_entries);
642 size *= sizeof (*vim->vim_entries);
644 vim->vim_entries = malloc(size);
645 if (vim->vim_entries == NULL)
647 vim->vim_num_entries = size / sizeof (*vim->vim_entries);
648 offset = index * sizeof (*vim->vim_entries);
651 /* We have data in vim_entries */
653 if (index >= vim->vim_entry_offset &&
654 index <= vim->vim_entry_offset + vim->vim_num_entries) {
655 index -= vim->vim_entry_offset;
656 return (&vim->vim_entries[index]);
658 offset = index * sizeof (*vim->vim_entries);
661 vim->vim_entry_offset = index;
662 size = vim->vim_num_entries * sizeof (*vim->vim_entries);
663 rc = dnode_read(vim->vim_spa, vim->vim_dn, offset, vim->vim_entries,
666 /* Read error, invalidate vim_entries. */
667 free(vim->vim_entries);
668 vim->vim_entries = NULL;
671 index -= vim->vim_entry_offset;
672 return (&vim->vim_entries[index]);
676 * Returns the mapping entry for the given offset.
678 * It's possible that the given offset will not be in the mapping table
679 * (i.e. no mapping entries contain this offset), in which case, the
680 * return value value depends on the "next_if_missing" parameter.
682 * If the offset is not found in the table and "next_if_missing" is
683 * B_FALSE, then NULL will always be returned. The behavior is intended
684 * to allow consumers to get the entry corresponding to the offset
685 * parameter, iff the offset overlaps with an entry in the table.
687 * If the offset is not found in the table and "next_if_missing" is
688 * B_TRUE, then the entry nearest to the given offset will be returned,
689 * such that the entry's source offset is greater than the offset
690 * passed in (i.e. the "next" mapping entry in the table is returned, if
691 * the offset is missing from the table). If there are no entries whose
692 * source offset is greater than the passed in offset, NULL is returned.
694 static vdev_indirect_mapping_entry_phys_t *
695 vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim,
698 ASSERT(vim->vim_phys->vimp_num_entries > 0);
700 vdev_indirect_mapping_entry_phys_t *entry;
702 uint64_t last = vim->vim_phys->vimp_num_entries - 1;
706 * We don't define these inside of the while loop because we use
707 * their value in the case that offset isn't in the mapping.
712 while (last >= base) {
713 mid = base + ((last - base) >> 1);
715 entry = vdev_indirect_mapping_entry(vim, mid);
718 result = dva_mapping_overlap_compare(&offset, entry);
722 } else if (result < 0) {
732 * Given an indirect vdev and an extent on that vdev, it duplicates the
733 * physical entries of the indirect mapping that correspond to the extent
734 * to a new array and returns a pointer to it. In addition, copied_entries
735 * is populated with the number of mapping entries that were duplicated.
737 * Finally, since we are doing an allocation, it is up to the caller to
738 * free the array allocated in this function.
740 vdev_indirect_mapping_entry_phys_t *
741 vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
742 uint64_t asize, uint64_t *copied_entries)
744 vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
745 vdev_indirect_mapping_t *vim = vd->v_mapping;
746 uint64_t entries = 0;
748 vdev_indirect_mapping_entry_phys_t *first_mapping =
749 vdev_indirect_mapping_entry_for_offset(vim, offset);
750 ASSERT3P(first_mapping, !=, NULL);
752 vdev_indirect_mapping_entry_phys_t *m = first_mapping;
754 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
755 uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
756 uint64_t inner_size = MIN(asize, size - inner_offset);
758 offset += inner_size;
764 size_t copy_length = entries * sizeof (*first_mapping);
765 duplicate_mappings = malloc(copy_length);
766 if (duplicate_mappings != NULL)
767 bcopy(first_mapping, duplicate_mappings, copy_length);
771 *copied_entries = entries;
773 return (duplicate_mappings);
777 vdev_lookup_top(spa_t *spa, uint64_t vdev)
782 vlist = &spa->spa_root_vdev->v_children;
783 STAILQ_FOREACH(rvd, vlist, v_childlink)
784 if (rvd->v_id == vdev)
791 * This is a callback for vdev_indirect_remap() which allocates an
792 * indirect_split_t for each split segment and adds it to iv_splits.
795 vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
796 uint64_t size, void *arg)
800 indirect_vsd_t *iv = zio->io_vsd;
802 if (vd->v_read == vdev_indirect_read)
805 if (vd->v_read == vdev_mirror_read)
808 indirect_split_t *is =
809 malloc(offsetof(indirect_split_t, is_child[n]));
811 zio->io_error = ENOMEM;
814 bzero(is, offsetof(indirect_split_t, is_child[n]));
818 is->is_split_offset = split_offset;
819 is->is_target_offset = offset;
823 * Note that we only consider multiple copies of the data for
824 * *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
825 * though they use the same ops as mirror, because there's only one
826 * "good" copy under the replacing/spare.
828 if (vd->v_read == vdev_mirror_read) {
832 STAILQ_FOREACH(kid, &vd->v_children, v_childlink) {
833 is->is_child[i++].ic_vdev = kid;
836 is->is_child[0].ic_vdev = vd;
839 list_insert_tail(&iv->iv_splits, is);
843 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize, void *arg)
846 spa_t *spa = vd->v_spa;
850 list_create(&stack, sizeof (remap_segment_t),
851 offsetof(remap_segment_t, rs_node));
853 rs = rs_alloc(vd, offset, asize, 0);
855 printf("vdev_indirect_remap: out of memory.\n");
856 zio->io_error = ENOMEM;
858 for (; rs != NULL; rs = list_remove_head(&stack)) {
859 vdev_t *v = rs->rs_vd;
860 uint64_t num_entries = 0;
861 /* vdev_indirect_mapping_t *vim = v->v_mapping; */
862 vdev_indirect_mapping_entry_phys_t *mapping =
863 vdev_indirect_mapping_duplicate_adjacent_entries(v,
864 rs->rs_offset, rs->rs_asize, &num_entries);
866 if (num_entries == 0)
867 zio->io_error = ENOMEM;
869 for (uint64_t i = 0; i < num_entries; i++) {
870 vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
871 uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
872 uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
873 uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
874 uint64_t inner_offset = rs->rs_offset -
875 DVA_MAPPING_GET_SRC_OFFSET(m);
876 uint64_t inner_size =
877 MIN(rs->rs_asize, size - inner_offset);
878 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
880 if (dst_v->v_read == vdev_indirect_read) {
883 o = rs_alloc(dst_v, dst_offset + inner_offset,
884 inner_size, rs->rs_split_offset);
886 printf("vdev_indirect_remap: "
888 zio->io_error = ENOMEM;
892 list_insert_head(&stack, o);
894 vdev_indirect_gather_splits(rs->rs_split_offset, dst_v,
895 dst_offset + inner_offset,
899 * vdev_indirect_gather_splits can have memory
900 * allocation error, we can not recover from it.
902 if (zio->io_error != 0)
904 rs->rs_offset += inner_size;
905 rs->rs_asize -= inner_size;
906 rs->rs_split_offset += inner_size;
911 if (zio->io_error != 0)
915 list_destroy(&stack);
919 vdev_indirect_map_free(zio_t *zio)
921 indirect_vsd_t *iv = zio->io_vsd;
922 indirect_split_t *is;
924 while ((is = list_head(&iv->iv_splits)) != NULL) {
925 for (int c = 0; c < is->is_children; c++) {
926 indirect_child_t *ic = &is->is_child[c];
929 list_remove(&iv->iv_splits, is);
936 vdev_indirect_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
937 off_t offset, size_t bytes)
940 spa_t *spa = vdev->v_spa;
942 indirect_split_t *first;
945 iv = calloc(1, sizeof(*iv));
949 list_create(&iv->iv_splits,
950 sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
952 bzero(&zio, sizeof(zio));
954 zio.io_bp = (blkptr_t *)bp;
957 zio.io_offset = offset;
961 if (vdev->v_mapping == NULL) {
962 vdev_indirect_config_t *vic;
964 vic = &vdev->vdev_indirect_config;
965 vdev->v_mapping = vdev_indirect_mapping_open(spa,
966 &spa->spa_mos, vic->vic_mapping_object);
969 vdev_indirect_remap(vdev, offset, bytes, &zio);
970 if (zio.io_error != 0)
971 return (zio.io_error);
973 first = list_head(&iv->iv_splits);
974 if (first->is_size == zio.io_size) {
976 * This is not a split block; we are pointing to the entire
977 * data, which will checksum the same as the original data.
978 * Pass the BP down so that the child i/o can verify the
979 * checksum, and try a different location if available
980 * (e.g. on a mirror).
982 * While this special case could be handled the same as the
983 * general (split block) case, doing it this way ensures
984 * that the vast majority of blocks on indirect vdevs
985 * (which are not split) are handled identically to blocks
986 * on non-indirect vdevs. This allows us to be less strict
987 * about performance in the general (but rare) case.
989 rc = first->is_vdev->v_read(first->is_vdev, zio.io_bp,
990 zio.io_data, first->is_target_offset, bytes);
992 iv->iv_split_block = B_TRUE;
994 * Read one copy of each split segment, from the
995 * top-level vdev. Since we don't know the
996 * checksum of each split individually, the child
997 * zio can't ensure that we get the right data.
998 * E.g. if it's a mirror, it will just read from a
999 * random (healthy) leaf vdev. We have to verify
1000 * the checksum in vdev_indirect_io_done().
1002 for (indirect_split_t *is = list_head(&iv->iv_splits);
1003 is != NULL; is = list_next(&iv->iv_splits, is)) {
1004 char *ptr = zio.io_data;
1006 rc = is->is_vdev->v_read(is->is_vdev, zio.io_bp,
1007 ptr + is->is_split_offset, is->is_target_offset,
1010 if (zio_checksum_verify(spa, zio.io_bp, zio.io_data))
1016 vdev_indirect_map_free(&zio);
1024 vdev_disk_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
1025 off_t offset, size_t bytes)
1028 return (vdev_read_phys(vdev, bp, buf,
1029 offset + VDEV_LABEL_START_SIZE, bytes));
1034 vdev_mirror_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
1035 off_t offset, size_t bytes)
1041 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1042 if (kid->v_state != VDEV_STATE_HEALTHY)
1044 rc = kid->v_read(kid, bp, buf, offset, bytes);
1053 vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
1054 off_t offset, size_t bytes)
1059 * Here we should have two kids:
1060 * First one which is the one we are replacing and we can trust
1061 * only this one to have valid data, but it might not be present.
1062 * Second one is that one we are replacing with. It is most likely
1063 * healthy, but we can't trust it has needed data, so we won't use it.
1065 kid = STAILQ_FIRST(&vdev->v_children);
1068 if (kid->v_state != VDEV_STATE_HEALTHY)
1070 return (kid->v_read(kid, bp, buf, offset, bytes));
1074 vdev_find(uint64_t guid)
1078 STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink)
1079 if (vdev->v_guid == guid)
1086 vdev_create(uint64_t guid, vdev_read_t *_read)
1089 vdev_indirect_config_t *vic;
1091 vdev = calloc(1, sizeof(vdev_t));
1093 STAILQ_INIT(&vdev->v_children);
1094 vdev->v_guid = guid;
1095 vdev->v_read = _read;
1098 * root vdev has no read function, we use this fact to
1099 * skip setting up data we do not need for root vdev.
1100 * We only point root vdev from spa.
1102 if (_read != NULL) {
1103 vic = &vdev->vdev_indirect_config;
1104 vic->vic_prev_indirect_vdev = UINT64_MAX;
1105 STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink);
1113 vdev_set_initial_state(vdev_t *vdev, const unsigned char *nvlist)
1115 uint64_t is_offline, is_faulted, is_degraded, is_removed, isnt_present;
1118 is_offline = is_removed = is_faulted = is_degraded = isnt_present = 0;
1120 (void) nvlist_find(nvlist, ZPOOL_CONFIG_OFFLINE, DATA_TYPE_UINT64, NULL,
1122 (void) nvlist_find(nvlist, ZPOOL_CONFIG_REMOVED, DATA_TYPE_UINT64, NULL,
1124 (void) nvlist_find(nvlist, ZPOOL_CONFIG_FAULTED, DATA_TYPE_UINT64, NULL,
1126 (void) nvlist_find(nvlist, ZPOOL_CONFIG_DEGRADED, DATA_TYPE_UINT64,
1127 NULL, &is_degraded);
1128 (void) nvlist_find(nvlist, ZPOOL_CONFIG_NOT_PRESENT, DATA_TYPE_UINT64,
1129 NULL, &isnt_present);
1130 (void) nvlist_find(nvlist, ZPOOL_CONFIG_IS_LOG, DATA_TYPE_UINT64, NULL,
1133 if (is_offline != 0)
1134 vdev->v_state = VDEV_STATE_OFFLINE;
1135 else if (is_removed != 0)
1136 vdev->v_state = VDEV_STATE_REMOVED;
1137 else if (is_faulted != 0)
1138 vdev->v_state = VDEV_STATE_FAULTED;
1139 else if (is_degraded != 0)
1140 vdev->v_state = VDEV_STATE_DEGRADED;
1141 else if (isnt_present != 0)
1142 vdev->v_state = VDEV_STATE_CANT_OPEN;
1144 vdev->v_islog = is_log != 0;
1148 vdev_init(uint64_t guid, const unsigned char *nvlist, vdev_t **vdevp)
1150 uint64_t id, ashift, asize, nparity;
1155 if (nvlist_find(nvlist, ZPOOL_CONFIG_ID, DATA_TYPE_UINT64, NULL, &id) ||
1156 nvlist_find(nvlist, ZPOOL_CONFIG_TYPE, DATA_TYPE_STRING,
1161 if (strcmp(type, VDEV_TYPE_MIRROR) != 0 &&
1162 strcmp(type, VDEV_TYPE_DISK) != 0 &&
1164 strcmp(type, VDEV_TYPE_FILE) != 0 &&
1166 strcmp(type, VDEV_TYPE_RAIDZ) != 0 &&
1167 strcmp(type, VDEV_TYPE_INDIRECT) != 0 &&
1168 strcmp(type, VDEV_TYPE_REPLACING) != 0) {
1169 printf("ZFS: can only boot from disk, mirror, raidz1, "
1170 "raidz2 and raidz3 vdevs\n");
1174 if (strcmp(type, VDEV_TYPE_MIRROR) == 0)
1175 vdev = vdev_create(guid, vdev_mirror_read);
1176 else if (strcmp(type, VDEV_TYPE_RAIDZ) == 0)
1177 vdev = vdev_create(guid, vdev_raidz_read);
1178 else if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
1179 vdev = vdev_create(guid, vdev_replacing_read);
1180 else if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
1181 vdev_indirect_config_t *vic;
1183 vdev = vdev_create(guid, vdev_indirect_read);
1185 vdev->v_state = VDEV_STATE_HEALTHY;
1186 vic = &vdev->vdev_indirect_config;
1189 ZPOOL_CONFIG_INDIRECT_OBJECT,
1191 NULL, &vic->vic_mapping_object);
1193 ZPOOL_CONFIG_INDIRECT_BIRTHS,
1195 NULL, &vic->vic_births_object);
1197 ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
1199 NULL, &vic->vic_prev_indirect_vdev);
1202 vdev = vdev_create(guid, vdev_disk_read);
1208 vdev_set_initial_state(vdev, nvlist);
1210 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASHIFT,
1211 DATA_TYPE_UINT64, NULL, &ashift) == 0)
1212 vdev->v_ashift = ashift;
1214 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE,
1215 DATA_TYPE_UINT64, NULL, &asize) == 0) {
1216 vdev->v_psize = asize +
1217 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
1220 if (nvlist_find(nvlist, ZPOOL_CONFIG_NPARITY,
1221 DATA_TYPE_UINT64, NULL, &nparity) == 0)
1222 vdev->v_nparity = nparity;
1224 if (nvlist_find(nvlist, ZPOOL_CONFIG_PATH,
1225 DATA_TYPE_STRING, NULL, &path) == 0) {
1226 if (strncmp(path, "/dev/", 5) == 0)
1228 vdev->v_name = strdup(path);
1233 if (strcmp(type, "raidz") == 0) {
1234 if (vdev->v_nparity < 1 ||
1235 vdev->v_nparity > 3) {
1236 printf("ZFS: invalid raidz parity: %d\n",
1240 (void) asprintf(&name, "%s%d-%" PRIu64, type,
1241 vdev->v_nparity, id);
1243 (void) asprintf(&name, "%s-%" PRIu64, type, id);
1245 vdev->v_name = name;
1252 * Find slot for vdev. We return either NULL to signal to use
1253 * STAILQ_INSERT_HEAD, or we return link element to be used with
1254 * STAILQ_INSERT_AFTER.
1257 vdev_find_previous(vdev_t *top_vdev, vdev_t *vdev)
1259 vdev_t *v, *previous;
1261 if (STAILQ_EMPTY(&top_vdev->v_children))
1265 STAILQ_FOREACH(v, &top_vdev->v_children, v_childlink) {
1266 if (v->v_id > vdev->v_id)
1269 if (v->v_id == vdev->v_id)
1272 if (v->v_id < vdev->v_id)
1279 vdev_child_count(vdev_t *vdev)
1285 STAILQ_FOREACH(v, &vdev->v_children, v_childlink) {
1292 * Insert vdev into top_vdev children list. List is ordered by v_id.
1295 vdev_insert(vdev_t *top_vdev, vdev_t *vdev)
1301 * The top level vdev can appear in random order, depending how
1302 * the firmware is presenting the disk devices.
1303 * However, we will insert vdev to create list ordered by v_id,
1304 * so we can use either STAILQ_INSERT_HEAD or STAILQ_INSERT_AFTER
1305 * as STAILQ does not have insert before.
1307 previous = vdev_find_previous(top_vdev, vdev);
1309 if (previous == NULL) {
1310 STAILQ_INSERT_HEAD(&top_vdev->v_children, vdev, v_childlink);
1311 } else if (previous->v_id == vdev->v_id) {
1313 * This vdev was configured from label config,
1314 * do not insert duplicate.
1318 STAILQ_INSERT_AFTER(&top_vdev->v_children, previous, vdev,
1322 count = vdev_child_count(top_vdev);
1323 if (top_vdev->v_nchildren < count)
1324 top_vdev->v_nchildren = count;
1328 vdev_from_nvlist(spa_t *spa, uint64_t top_guid, const unsigned char *nvlist)
1330 vdev_t *top_vdev, *vdev;
1331 const unsigned char *kids;
1335 top_vdev = vdev_find(top_guid);
1336 if (top_vdev == NULL) {
1337 rc = vdev_init(top_guid, nvlist, &top_vdev);
1340 top_vdev->v_spa = spa;
1341 top_vdev->v_top = top_vdev;
1342 vdev_insert(spa->spa_root_vdev, top_vdev);
1345 /* Add children if there are any. */
1346 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
1349 for (int i = 0; i < nkids; i++) {
1352 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID,
1353 DATA_TYPE_UINT64, NULL, &guid);
1356 rc = vdev_init(guid, kids, &vdev);
1361 vdev->v_top = top_vdev;
1362 vdev_insert(top_vdev, vdev);
1364 kids = nvlist_next(kids);
1368 * When there are no children, nvlist_find() does return
1369 * error, reset it because leaf devices have no children.
1378 vdev_init_from_label(spa_t *spa, const unsigned char *nvlist)
1380 uint64_t pool_guid, top_guid;
1381 const unsigned char *vdevs;
1383 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
1384 NULL, &pool_guid) ||
1385 nvlist_find(nvlist, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64,
1387 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST,
1389 printf("ZFS: can't find vdev details\n");
1393 return (vdev_from_nvlist(spa, top_guid, vdevs));
1397 vdev_set_state(vdev_t *vdev)
1403 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1404 vdev_set_state(kid);
1408 * A mirror or raidz is healthy if all its kids are healthy. A
1409 * mirror is degraded if any of its kids is healthy; a raidz
1410 * is degraded if at most nparity kids are offline.
1412 if (STAILQ_FIRST(&vdev->v_children)) {
1415 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1416 if (kid->v_state == VDEV_STATE_HEALTHY)
1421 if (bad_kids == 0) {
1422 vdev->v_state = VDEV_STATE_HEALTHY;
1424 if (vdev->v_read == vdev_mirror_read) {
1426 vdev->v_state = VDEV_STATE_DEGRADED;
1428 vdev->v_state = VDEV_STATE_OFFLINE;
1430 } else if (vdev->v_read == vdev_raidz_read) {
1431 if (bad_kids > vdev->v_nparity) {
1432 vdev->v_state = VDEV_STATE_OFFLINE;
1434 vdev->v_state = VDEV_STATE_DEGRADED;
1442 vdev_update_from_nvlist(uint64_t top_guid, const unsigned char *nvlist)
1445 const unsigned char *kids;
1448 /* Update top vdev. */
1449 vdev = vdev_find(top_guid);
1451 vdev_set_initial_state(vdev, nvlist);
1453 /* Update children if there are any. */
1454 rc = nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
1457 for (int i = 0; i < nkids; i++) {
1460 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID,
1461 DATA_TYPE_UINT64, NULL, &guid);
1465 vdev = vdev_find(guid);
1467 vdev_set_initial_state(vdev, kids);
1469 kids = nvlist_next(kids);
1479 vdev_init_from_nvlist(spa_t *spa, const unsigned char *nvlist)
1481 uint64_t pool_guid, vdev_children;
1482 const unsigned char *vdevs, *kids;
1485 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
1486 NULL, &pool_guid) ||
1487 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN, DATA_TYPE_UINT64,
1488 NULL, &vdev_children) ||
1489 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST,
1491 printf("ZFS: can't find vdev details\n");
1496 if (spa->spa_guid != pool_guid)
1499 spa->spa_root_vdev->v_nchildren = vdev_children;
1501 rc = nvlist_find(vdevs, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
1505 * MOS config has at least one child for root vdev.
1510 for (int i = 0; i < nkids; i++) {
1514 rc = nvlist_find(kids, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64,
1518 vdev = vdev_find(guid);
1520 * Top level vdev is missing, create it.
1523 rc = vdev_from_nvlist(spa, guid, kids);
1525 rc = vdev_update_from_nvlist(guid, kids);
1528 kids = nvlist_next(kids);
1532 * Re-evaluate top-level vdev state.
1534 vdev_set_state(spa->spa_root_vdev);
1540 spa_find_by_guid(uint64_t guid)
1544 STAILQ_FOREACH(spa, &zfs_pools, spa_link)
1545 if (spa->spa_guid == guid)
1552 spa_find_by_name(const char *name)
1556 STAILQ_FOREACH(spa, &zfs_pools, spa_link)
1557 if (strcmp(spa->spa_name, name) == 0)
1565 spa_get_primary(void)
1568 return (STAILQ_FIRST(&zfs_pools));
1572 spa_get_primary_vdev(const spa_t *spa)
1578 spa = spa_get_primary();
1581 vdev = spa->spa_root_vdev;
1584 for (kid = STAILQ_FIRST(&vdev->v_children); kid != NULL;
1585 kid = STAILQ_FIRST(&vdev->v_children))
1592 spa_create(uint64_t guid, const char *name)
1596 if ((spa = calloc(1, sizeof(spa_t))) == NULL)
1598 if ((spa->spa_name = strdup(name)) == NULL) {
1602 spa->spa_guid = guid;
1603 spa->spa_root_vdev = vdev_create(guid, NULL);
1604 if (spa->spa_root_vdev == NULL) {
1605 free(spa->spa_name);
1609 spa->spa_root_vdev->v_name = strdup("root");
1610 STAILQ_INSERT_TAIL(&zfs_pools, spa, spa_link);
1616 state_name(vdev_state_t state)
1618 static const char *names[] = {
1628 return (names[state]);
1633 #define pager_printf printf
1638 pager_printf(const char *fmt, ...)
1643 va_start(args, fmt);
1644 vsnprintf(line, sizeof(line), fmt, args);
1646 return (pager_output(line));
1651 #define STATUS_FORMAT " %s %s\n"
1654 print_state(int indent, const char *name, vdev_state_t state)
1660 for (i = 0; i < indent; i++)
1663 return (pager_printf(STATUS_FORMAT, buf, state_name(state)));
1667 vdev_status(vdev_t *vdev, int indent)
1672 if (vdev->v_islog) {
1673 (void) pager_output(" logs\n");
1677 ret = print_state(indent, vdev->v_name, vdev->v_state);
1681 STAILQ_FOREACH(kid, &vdev->v_children, v_childlink) {
1682 ret = vdev_status(kid, indent + 1);
1690 spa_status(spa_t *spa)
1692 static char bootfs[ZFS_MAXNAMELEN];
1696 int good_kids, bad_kids, degraded_kids, ret;
1699 ret = pager_printf(" pool: %s\n", spa->spa_name);
1703 if (zfs_get_root(spa, &rootid) == 0 &&
1704 zfs_rlookup(spa, rootid, bootfs) == 0) {
1705 if (bootfs[0] == '\0')
1706 ret = pager_printf("bootfs: %s\n", spa->spa_name);
1708 ret = pager_printf("bootfs: %s/%s\n", spa->spa_name,
1713 ret = pager_printf("config:\n\n");
1716 ret = pager_printf(STATUS_FORMAT, "NAME", "STATE");
1723 vlist = &spa->spa_root_vdev->v_children;
1724 STAILQ_FOREACH(vdev, vlist, v_childlink) {
1725 if (vdev->v_state == VDEV_STATE_HEALTHY)
1727 else if (vdev->v_state == VDEV_STATE_DEGRADED)
1733 state = VDEV_STATE_CLOSED;
1734 if (good_kids > 0 && (degraded_kids + bad_kids) == 0)
1735 state = VDEV_STATE_HEALTHY;
1736 else if ((good_kids + degraded_kids) > 0)
1737 state = VDEV_STATE_DEGRADED;
1739 ret = print_state(0, spa->spa_name, state);
1743 STAILQ_FOREACH(vdev, vlist, v_childlink) {
1744 ret = vdev_status(vdev, 1);
1752 spa_all_status(void)
1755 int first = 1, ret = 0;
1757 STAILQ_FOREACH(spa, &zfs_pools, spa_link) {
1759 ret = pager_printf("\n");
1764 ret = spa_status(spa);
1772 vdev_label_offset(uint64_t psize, int l, uint64_t offset)
1774 uint64_t label_offset;
1776 if (l < VDEV_LABELS / 2)
1779 label_offset = psize - VDEV_LABELS * sizeof (vdev_label_t);
1781 return (offset + l * sizeof (vdev_label_t) + label_offset);
1785 vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
1787 unsigned int seq1 = 0;
1788 unsigned int seq2 = 0;
1789 int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg);
1794 cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
1798 if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
1799 seq1 = MMP_SEQ(ub1);
1801 if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
1802 seq2 = MMP_SEQ(ub2);
1804 return (AVL_CMP(seq1, seq2));
1808 uberblock_verify(uberblock_t *ub)
1810 if (ub->ub_magic == BSWAP_64((uint64_t)UBERBLOCK_MAGIC)) {
1811 byteswap_uint64_array(ub, sizeof (uberblock_t));
1814 if (ub->ub_magic != UBERBLOCK_MAGIC ||
1815 !SPA_VERSION_IS_SUPPORTED(ub->ub_version))
1822 vdev_label_read(vdev_t *vd, int l, void *buf, uint64_t offset,
1828 off = vdev_label_offset(vd->v_psize, l, offset);
1831 BP_SET_LSIZE(&bp, size);
1832 BP_SET_PSIZE(&bp, size);
1833 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
1834 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
1835 DVA_SET_OFFSET(BP_IDENTITY(&bp), off);
1836 ZIO_SET_CHECKSUM(&bp.blk_cksum, off, 0, 0, 0);
1838 return (vdev_read_phys(vd, &bp, buf, off, size));
1841 static unsigned char *
1842 vdev_label_read_config(vdev_t *vd, uint64_t txg)
1845 uint64_t best_txg = 0;
1846 uint64_t label_txg = 0;
1852 label = malloc(sizeof (vdev_phys_t));
1856 nvl_size = VDEV_PHYS_SIZE - sizeof (zio_eck_t) - 4;
1857 nvl = malloc(nvl_size);
1861 for (int l = 0; l < VDEV_LABELS; l++) {
1862 const unsigned char *nvlist;
1864 if (vdev_label_read(vd, l, label,
1865 offsetof(vdev_label_t, vl_vdev_phys),
1866 sizeof (vdev_phys_t)))
1869 if (label->vp_nvlist[0] != NV_ENCODE_XDR)
1872 nvlist = (const unsigned char *) label->vp_nvlist + 4;
1873 error = nvlist_find(nvlist, ZPOOL_CONFIG_POOL_TXG,
1874 DATA_TYPE_UINT64, NULL, &label_txg);
1875 if (error != 0 || label_txg == 0) {
1876 memcpy(nvl, nvlist, nvl_size);
1880 if (label_txg <= txg && label_txg > best_txg) {
1881 best_txg = label_txg;
1882 memcpy(nvl, nvlist, nvl_size);
1885 * Use asize from pool config. We need this
1886 * because we can get bad value from BIOS.
1888 if (nvlist_find(nvlist, ZPOOL_CONFIG_ASIZE,
1889 DATA_TYPE_UINT64, NULL, &asize) == 0) {
1890 vd->v_psize = asize +
1891 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
1896 if (best_txg == 0) {
1906 vdev_uberblock_load(vdev_t *vd, uberblock_t *ub)
1910 buf = malloc(VDEV_UBERBLOCK_SIZE(vd));
1914 for (int l = 0; l < VDEV_LABELS; l++) {
1915 for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
1916 if (vdev_label_read(vd, l, buf,
1917 VDEV_UBERBLOCK_OFFSET(vd, n),
1918 VDEV_UBERBLOCK_SIZE(vd)))
1920 if (uberblock_verify(buf) != 0)
1923 if (vdev_uberblock_compare(buf, ub) > 0)
1931 vdev_probe(vdev_phys_read_t *_read, void *read_priv, spa_t **spap)
1936 unsigned char *nvlist;
1938 uint64_t guid, vdev_children;
1939 uint64_t pool_txg, pool_guid;
1940 const char *pool_name;
1941 const unsigned char *features;
1945 * Load the vdev label and figure out which
1946 * uberblock is most current.
1948 memset(&vtmp, 0, sizeof(vtmp));
1949 vtmp.v_phys_read = _read;
1950 vtmp.v_read_priv = read_priv;
1951 vtmp.v_psize = P2ALIGN(ldi_get_size(read_priv),
1952 (uint64_t)sizeof (vdev_label_t));
1954 /* Test for minimum device size. */
1955 if (vtmp.v_psize < SPA_MINDEVSIZE)
1958 nvlist = vdev_label_read_config(&vtmp, UINT64_MAX);
1962 if (nvlist_find(nvlist, ZPOOL_CONFIG_VERSION, DATA_TYPE_UINT64,
1968 if (!SPA_VERSION_IS_SUPPORTED(val)) {
1969 printf("ZFS: unsupported ZFS version %u (should be %u)\n",
1970 (unsigned)val, (unsigned)SPA_VERSION);
1975 /* Check ZFS features for read */
1976 if (nvlist_find(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ,
1977 DATA_TYPE_NVLIST, NULL, &features) == 0 &&
1978 nvlist_check_features_for_read(features) != 0) {
1983 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_STATE, DATA_TYPE_UINT64,
1989 if (val == POOL_STATE_DESTROYED) {
1990 /* We don't boot only from destroyed pools. */
1995 if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64,
1996 NULL, &pool_txg) != 0 ||
1997 nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
1998 NULL, &pool_guid) != 0 ||
1999 nvlist_find(nvlist, ZPOOL_CONFIG_POOL_NAME, DATA_TYPE_STRING,
2000 NULL, &pool_name) != 0) {
2002 * Cache and spare devices end up here - just ignore
2010 * Create the pool if this is the first time we've seen it.
2012 spa = spa_find_by_guid(pool_guid);
2014 nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_CHILDREN,
2015 DATA_TYPE_UINT64, NULL, &vdev_children);
2016 spa = spa_create(pool_guid, pool_name);
2021 spa->spa_root_vdev->v_nchildren = vdev_children;
2023 if (pool_txg > spa->spa_txg)
2024 spa->spa_txg = pool_txg;
2027 * Get the vdev tree and create our in-core copy of it.
2028 * If we already have a vdev with this guid, this must
2029 * be some kind of alias (overlapping slices, dangerously dedicated
2032 if (nvlist_find(nvlist, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64,
2033 NULL, &guid) != 0) {
2037 vdev = vdev_find(guid);
2038 /* Has this vdev already been inited? */
2039 if (vdev && vdev->v_phys_read) {
2044 rc = vdev_init_from_label(spa, nvlist);
2050 * We should already have created an incomplete vdev for this
2051 * vdev. Find it and initialise it with our read proc.
2053 vdev = vdev_find(guid);
2055 vdev->v_phys_read = _read;
2056 vdev->v_read_priv = read_priv;
2057 vdev->v_psize = vtmp.v_psize;
2059 * If no other state is set, mark vdev healthy.
2061 if (vdev->v_state == VDEV_STATE_UNKNOWN)
2062 vdev->v_state = VDEV_STATE_HEALTHY;
2064 printf("ZFS: inconsistent nvlist contents\n");
2069 spa->spa_with_log = vdev->v_islog;
2072 * Re-evaluate top-level vdev state.
2074 vdev_set_state(vdev->v_top);
2077 * Ok, we are happy with the pool so far. Lets find
2078 * the best uberblock and then we can actually access
2079 * the contents of the pool.
2081 vdev_uberblock_load(vdev, &spa->spa_uberblock);
2093 for (v = 0; v < 32; v++)
2100 zio_read_gang(const spa_t *spa, const blkptr_t *bp, void *buf)
2103 zio_gbh_phys_t zio_gb;
2107 /* Artificial BP for gang block header. */
2109 BP_SET_PSIZE(&gbh_bp, SPA_GANGBLOCKSIZE);
2110 BP_SET_LSIZE(&gbh_bp, SPA_GANGBLOCKSIZE);
2111 BP_SET_CHECKSUM(&gbh_bp, ZIO_CHECKSUM_GANG_HEADER);
2112 BP_SET_COMPRESS(&gbh_bp, ZIO_COMPRESS_OFF);
2113 for (i = 0; i < SPA_DVAS_PER_BP; i++)
2114 DVA_SET_GANG(&gbh_bp.blk_dva[i], 0);
2116 /* Read gang header block using the artificial BP. */
2117 if (zio_read(spa, &gbh_bp, &zio_gb))
2121 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
2122 blkptr_t *gbp = &zio_gb.zg_blkptr[i];
2124 if (BP_IS_HOLE(gbp))
2126 if (zio_read(spa, gbp, pbuf))
2128 pbuf += BP_GET_PSIZE(gbp);
2131 if (zio_checksum_verify(spa, bp, buf))
2137 zio_read(const spa_t *spa, const blkptr_t *bp, void *buf)
2139 int cpfunc = BP_GET_COMPRESS(bp);
2140 uint64_t align, size;
2145 * Process data embedded in block pointer
2147 if (BP_IS_EMBEDDED(bp)) {
2148 ASSERT(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
2150 size = BPE_GET_PSIZE(bp);
2151 ASSERT(size <= BPE_PAYLOAD_SIZE);
2153 if (cpfunc != ZIO_COMPRESS_OFF)
2154 pbuf = zfs_alloc(size);
2158 decode_embedded_bp_compressed(bp, pbuf);
2161 if (cpfunc != ZIO_COMPRESS_OFF) {
2162 error = zio_decompress_data(cpfunc, pbuf,
2163 size, buf, BP_GET_LSIZE(bp));
2164 zfs_free(pbuf, size);
2167 printf("ZFS: i/o error - unable to decompress "
2168 "block pointer data, error %d\n", error);
2174 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
2175 const dva_t *dva = &bp->blk_dva[i];
2181 if (!dva->dva_word[0] && !dva->dva_word[1])
2184 vdevid = DVA_GET_VDEV(dva);
2185 offset = DVA_GET_OFFSET(dva);
2186 vlist = &spa->spa_root_vdev->v_children;
2187 STAILQ_FOREACH(vdev, vlist, v_childlink) {
2188 if (vdev->v_id == vdevid)
2191 if (!vdev || !vdev->v_read)
2194 size = BP_GET_PSIZE(bp);
2195 if (vdev->v_read == vdev_raidz_read) {
2196 align = 1ULL << vdev->v_ashift;
2197 if (P2PHASE(size, align) != 0)
2198 size = P2ROUNDUP(size, align);
2200 if (size != BP_GET_PSIZE(bp) || cpfunc != ZIO_COMPRESS_OFF)
2201 pbuf = zfs_alloc(size);
2205 if (DVA_GET_GANG(dva))
2206 error = zio_read_gang(spa, bp, pbuf);
2208 error = vdev->v_read(vdev, bp, pbuf, offset, size);
2210 if (cpfunc != ZIO_COMPRESS_OFF)
2211 error = zio_decompress_data(cpfunc, pbuf,
2212 BP_GET_PSIZE(bp), buf, BP_GET_LSIZE(bp));
2213 else if (size != BP_GET_PSIZE(bp))
2214 bcopy(pbuf, buf, BP_GET_PSIZE(bp));
2217 zfs_free(pbuf, size);
2222 printf("ZFS: i/o error - all block copies unavailable\n");
2227 dnode_read(const spa_t *spa, const dnode_phys_t *dnode, off_t offset,
2228 void *buf, size_t buflen)
2230 int ibshift = dnode->dn_indblkshift - SPA_BLKPTRSHIFT;
2231 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2232 int nlevels = dnode->dn_nlevels;
2235 if (bsize > SPA_MAXBLOCKSIZE) {
2236 printf("ZFS: I/O error - blocks larger than %llu are not "
2237 "supported\n", SPA_MAXBLOCKSIZE);
2242 * Note: bsize may not be a power of two here so we need to do an
2243 * actual divide rather than a bitshift.
2245 while (buflen > 0) {
2246 uint64_t bn = offset / bsize;
2247 int boff = offset % bsize;
2249 const blkptr_t *indbp;
2252 if (bn > dnode->dn_maxblkid)
2255 if (dnode == dnode_cache_obj && bn == dnode_cache_bn)
2258 indbp = dnode->dn_blkptr;
2259 for (i = 0; i < nlevels; i++) {
2261 * Copy the bp from the indirect array so that
2262 * we can re-use the scratch buffer for multi-level
2265 ibn = bn >> ((nlevels - i - 1) * ibshift);
2266 ibn &= ((1 << ibshift) - 1);
2268 if (BP_IS_HOLE(&bp)) {
2269 memset(dnode_cache_buf, 0, bsize);
2272 rc = zio_read(spa, &bp, dnode_cache_buf);
2275 indbp = (const blkptr_t *) dnode_cache_buf;
2277 dnode_cache_obj = dnode;
2278 dnode_cache_bn = bn;
2282 * The buffer contains our data block. Copy what we
2283 * need from it and loop.
2286 if (i > buflen) i = buflen;
2287 memcpy(buf, &dnode_cache_buf[boff], i);
2288 buf = ((char *)buf) + i;
2297 * Lookup a value in a microzap directory.
2300 mzap_lookup(const mzap_phys_t *mz, size_t size, const char *name,
2303 const mzap_ent_phys_t *mze;
2307 * Microzap objects use exactly one block. Read the whole
2310 chunks = size / MZAP_ENT_LEN - 1;
2311 for (i = 0; i < chunks; i++) {
2312 mze = &mz->mz_chunk[i];
2313 if (strcmp(mze->mze_name, name) == 0) {
2314 *value = mze->mze_value;
2323 * Compare a name with a zap leaf entry. Return non-zero if the name
2327 fzap_name_equal(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc,
2331 const zap_leaf_chunk_t *nc;
2334 namelen = zc->l_entry.le_name_numints;
2336 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk);
2338 while (namelen > 0) {
2342 if (len > ZAP_LEAF_ARRAY_BYTES)
2343 len = ZAP_LEAF_ARRAY_BYTES;
2344 if (memcmp(p, nc->l_array.la_array, len))
2348 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next);
2355 * Extract a uint64_t value from a zap leaf entry.
2358 fzap_leaf_value(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc)
2360 const zap_leaf_chunk_t *vc;
2365 vc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_value_chunk);
2366 for (i = 0, value = 0, p = vc->l_array.la_array; i < 8; i++) {
2367 value = (value << 8) | p[i];
2374 stv(int len, void *addr, uint64_t value)
2378 *(uint8_t *)addr = value;
2381 *(uint16_t *)addr = value;
2384 *(uint32_t *)addr = value;
2387 *(uint64_t *)addr = value;
2393 * Extract a array from a zap leaf entry.
2396 fzap_leaf_array(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc,
2397 uint64_t integer_size, uint64_t num_integers, void *buf)
2399 uint64_t array_int_len = zc->l_entry.le_value_intlen;
2401 uint64_t *u64 = buf;
2403 int len = MIN(zc->l_entry.le_value_numints, num_integers);
2404 int chunk = zc->l_entry.le_value_chunk;
2407 if (integer_size == 8 && len == 1) {
2408 *u64 = fzap_leaf_value(zl, zc);
2413 struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(zl, chunk).l_array;
2416 ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(zl));
2417 for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) {
2418 value = (value << 8) | la->la_array[i];
2420 if (byten == array_int_len) {
2421 stv(integer_size, p, value);
2429 chunk = la->la_next;
2434 fzap_check_size(uint64_t integer_size, uint64_t num_integers)
2437 switch (integer_size) {
2447 if (integer_size * num_integers > ZAP_MAXVALUELEN)
2454 zap_leaf_free(zap_leaf_t *leaf)
2461 zap_get_leaf_byblk(fat_zap_t *zap, uint64_t blk, zap_leaf_t **lp)
2463 int bs = FZAP_BLOCK_SHIFT(zap);
2466 *lp = malloc(sizeof(**lp));
2471 (*lp)->l_phys = malloc(1 << bs);
2473 if ((*lp)->l_phys == NULL) {
2477 err = dnode_read(zap->zap_spa, zap->zap_dnode, blk << bs, (*lp)->l_phys,
2486 zap_table_load(fat_zap_t *zap, zap_table_phys_t *tbl, uint64_t idx,
2489 int bs = FZAP_BLOCK_SHIFT(zap);
2490 uint64_t blk = idx >> (bs - 3);
2491 uint64_t off = idx & ((1 << (bs - 3)) - 1);
2495 buf = malloc(1 << zap->zap_block_shift);
2498 rc = dnode_read(zap->zap_spa, zap->zap_dnode, (tbl->zt_blk + blk) << bs,
2499 buf, 1 << zap->zap_block_shift);
2507 zap_idx_to_blk(fat_zap_t *zap, uint64_t idx, uint64_t *valp)
2509 if (zap->zap_phys->zap_ptrtbl.zt_numblks == 0) {
2510 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx);
2513 return (zap_table_load(zap, &zap->zap_phys->zap_ptrtbl,
2518 #define ZAP_HASH_IDX(hash, n) (((n) == 0) ? 0 : ((hash) >> (64 - (n))))
2520 zap_deref_leaf(fat_zap_t *zap, uint64_t h, zap_leaf_t **lp)
2525 idx = ZAP_HASH_IDX(h, zap->zap_phys->zap_ptrtbl.zt_shift);
2526 err = zap_idx_to_blk(zap, idx, &blk);
2529 return (zap_get_leaf_byblk(zap, blk, lp));
2532 #define CHAIN_END 0xffff /* end of the chunk chain */
2533 #define LEAF_HASH(l, h) \
2534 ((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \
2536 (64 - ZAP_LEAF_HASH_SHIFT(l) - (l)->l_phys->l_hdr.lh_prefix_len)))
2537 #define LEAF_HASH_ENTPTR(l, h) (&(l)->l_phys->l_hash[LEAF_HASH(l, h)])
2540 zap_leaf_lookup(zap_leaf_t *zl, uint64_t hash, const char *name,
2541 uint64_t integer_size, uint64_t num_integers, void *value)
2545 struct zap_leaf_entry *le;
2548 * Make sure this chunk matches our hash.
2550 if (zl->l_phys->l_hdr.lh_prefix_len > 0 &&
2551 zl->l_phys->l_hdr.lh_prefix !=
2552 hash >> (64 - zl->l_phys->l_hdr.lh_prefix_len))
2556 for (chunkp = LEAF_HASH_ENTPTR(zl, hash);
2557 *chunkp != CHAIN_END; chunkp = &le->le_next) {
2558 zap_leaf_chunk_t *zc;
2559 uint16_t chunk = *chunkp;
2561 le = ZAP_LEAF_ENTRY(zl, chunk);
2562 if (le->le_hash != hash)
2564 zc = &ZAP_LEAF_CHUNK(zl, chunk);
2565 if (fzap_name_equal(zl, zc, name)) {
2566 if (zc->l_entry.le_value_intlen > integer_size) {
2569 fzap_leaf_array(zl, zc, integer_size,
2570 num_integers, value);
2580 * Lookup a value in a fatzap directory.
2583 fzap_lookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh,
2584 const char *name, uint64_t integer_size, uint64_t num_integers,
2587 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2593 if (zh->zap_magic != ZAP_MAGIC)
2596 if ((rc = fzap_check_size(integer_size, num_integers)) != 0)
2599 z.zap_block_shift = ilog2(bsize);
2602 z.zap_dnode = dnode;
2604 hash = zap_hash(zh->zap_salt, name);
2605 rc = zap_deref_leaf(&z, hash, &zl);
2609 rc = zap_leaf_lookup(zl, hash, name, integer_size, num_integers, value);
2616 * Lookup a name in a zap object and return its value as a uint64_t.
2619 zap_lookup(const spa_t *spa, const dnode_phys_t *dnode, const char *name,
2620 uint64_t integer_size, uint64_t num_integers, void *value)
2624 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2630 rc = dnode_read(spa, dnode, 0, zap, size);
2634 switch (zap->zap_block_type) {
2636 rc = mzap_lookup((const mzap_phys_t *)zap, size, name, value);
2639 rc = fzap_lookup(spa, dnode, zap, name, integer_size,
2640 num_integers, value);
2643 printf("ZFS: invalid zap_type=%" PRIx64 "\n",
2644 zap->zap_block_type);
2653 * List a microzap directory.
2656 mzap_list(const mzap_phys_t *mz, size_t size,
2657 int (*callback)(const char *, uint64_t))
2659 const mzap_ent_phys_t *mze;
2663 * Microzap objects use exactly one block. Read the whole
2667 chunks = size / MZAP_ENT_LEN - 1;
2668 for (i = 0; i < chunks; i++) {
2669 mze = &mz->mz_chunk[i];
2670 if (mze->mze_name[0]) {
2671 rc = callback(mze->mze_name, mze->mze_value);
2681 * List a fatzap directory.
2684 fzap_list(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh,
2685 int (*callback)(const char *, uint64_t))
2687 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2692 if (zh->zap_magic != ZAP_MAGIC)
2695 z.zap_block_shift = ilog2(bsize);
2699 * This assumes that the leaf blocks start at block 1. The
2700 * documentation isn't exactly clear on this.
2703 zl.l_bs = z.zap_block_shift;
2704 zl.l_phys = malloc(bsize);
2705 if (zl.l_phys == NULL)
2708 for (i = 0; i < zh->zap_num_leafs; i++) {
2709 off_t off = ((off_t)(i + 1)) << zl.l_bs;
2713 if (dnode_read(spa, dnode, off, zl.l_phys, bsize)) {
2718 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) {
2719 zap_leaf_chunk_t *zc, *nc;
2722 zc = &ZAP_LEAF_CHUNK(&zl, j);
2723 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY)
2725 namelen = zc->l_entry.le_name_numints;
2726 if (namelen > sizeof(name))
2727 namelen = sizeof(name);
2730 * Paste the name back together.
2732 nc = &ZAP_LEAF_CHUNK(&zl, zc->l_entry.le_name_chunk);
2734 while (namelen > 0) {
2737 if (len > ZAP_LEAF_ARRAY_BYTES)
2738 len = ZAP_LEAF_ARRAY_BYTES;
2739 memcpy(p, nc->l_array.la_array, len);
2742 nc = &ZAP_LEAF_CHUNK(&zl, nc->l_array.la_next);
2746 * Assume the first eight bytes of the value are
2749 value = fzap_leaf_value(&zl, zc);
2751 /* printf("%s 0x%jx\n", name, (uintmax_t)value); */
2752 rc = callback((const char *)name, value);
2764 static int zfs_printf(const char *name, uint64_t value __unused)
2767 printf("%s\n", name);
2773 * List a zap directory.
2776 zap_list(const spa_t *spa, const dnode_phys_t *dnode)
2779 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2786 rc = dnode_read(spa, dnode, 0, zap, size);
2788 if (zap->zap_block_type == ZBT_MICRO)
2789 rc = mzap_list((const mzap_phys_t *)zap, size,
2792 rc = fzap_list(spa, dnode, zap, zfs_printf);
2799 objset_get_dnode(const spa_t *spa, const objset_phys_t *os, uint64_t objnum,
2800 dnode_phys_t *dnode)
2804 offset = objnum * sizeof(dnode_phys_t);
2805 return dnode_read(spa, &os->os_meta_dnode, offset,
2806 dnode, sizeof(dnode_phys_t));
2810 * Lookup a name in a microzap directory.
2813 mzap_rlookup(const mzap_phys_t *mz, size_t size, char *name, uint64_t value)
2815 const mzap_ent_phys_t *mze;
2819 * Microzap objects use exactly one block. Read the whole
2822 chunks = size / MZAP_ENT_LEN - 1;
2823 for (i = 0; i < chunks; i++) {
2824 mze = &mz->mz_chunk[i];
2825 if (value == mze->mze_value) {
2826 strcpy(name, mze->mze_name);
2835 fzap_name_copy(const zap_leaf_t *zl, const zap_leaf_chunk_t *zc, char *name)
2838 const zap_leaf_chunk_t *nc;
2841 namelen = zc->l_entry.le_name_numints;
2843 nc = &ZAP_LEAF_CHUNK(zl, zc->l_entry.le_name_chunk);
2845 while (namelen > 0) {
2848 if (len > ZAP_LEAF_ARRAY_BYTES)
2849 len = ZAP_LEAF_ARRAY_BYTES;
2850 memcpy(p, nc->l_array.la_array, len);
2853 nc = &ZAP_LEAF_CHUNK(zl, nc->l_array.la_next);
2860 fzap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, zap_phys_t *zh,
2861 char *name, uint64_t value)
2863 int bsize = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2868 if (zh->zap_magic != ZAP_MAGIC)
2871 z.zap_block_shift = ilog2(bsize);
2875 * This assumes that the leaf blocks start at block 1. The
2876 * documentation isn't exactly clear on this.
2879 zl.l_bs = z.zap_block_shift;
2880 zl.l_phys = malloc(bsize);
2881 if (zl.l_phys == NULL)
2884 for (i = 0; i < zh->zap_num_leafs; i++) {
2885 off_t off = ((off_t)(i + 1)) << zl.l_bs;
2887 rc = dnode_read(spa, dnode, off, zl.l_phys, bsize);
2891 for (j = 0; j < ZAP_LEAF_NUMCHUNKS(&zl); j++) {
2892 zap_leaf_chunk_t *zc;
2894 zc = &ZAP_LEAF_CHUNK(&zl, j);
2895 if (zc->l_entry.le_type != ZAP_CHUNK_ENTRY)
2897 if (zc->l_entry.le_value_intlen != 8 ||
2898 zc->l_entry.le_value_numints != 1)
2901 if (fzap_leaf_value(&zl, zc) == value) {
2902 fzap_name_copy(&zl, zc, name);
2915 zap_rlookup(const spa_t *spa, const dnode_phys_t *dnode, char *name,
2919 size_t size = dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
2926 rc = dnode_read(spa, dnode, 0, zap, size);
2928 if (zap->zap_block_type == ZBT_MICRO)
2929 rc = mzap_rlookup((const mzap_phys_t *)zap, size,
2932 rc = fzap_rlookup(spa, dnode, zap, name, value);
2939 zfs_rlookup(const spa_t *spa, uint64_t objnum, char *result)
2942 char component[256];
2943 uint64_t dir_obj, parent_obj, child_dir_zapobj;
2944 dnode_phys_t child_dir_zap, dataset, dir, parent;
2946 dsl_dataset_phys_t *ds;
2950 p = &name[sizeof(name) - 1];
2953 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
2954 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
2957 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
2958 dir_obj = ds->ds_dir_obj;
2961 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir) != 0)
2963 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
2965 /* Actual loop condition. */
2966 parent_obj = dd->dd_parent_obj;
2967 if (parent_obj == 0)
2970 if (objset_get_dnode(spa, &spa->spa_mos, parent_obj,
2973 dd = (dsl_dir_phys_t *)&parent.dn_bonus;
2974 child_dir_zapobj = dd->dd_child_dir_zapobj;
2975 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj,
2976 &child_dir_zap) != 0)
2978 if (zap_rlookup(spa, &child_dir_zap, component, dir_obj) != 0)
2981 len = strlen(component);
2983 memcpy(p, component, len);
2987 /* Actual loop iteration. */
2988 dir_obj = parent_obj;
2999 zfs_lookup_dataset(const spa_t *spa, const char *name, uint64_t *objnum)
3002 uint64_t dir_obj, child_dir_zapobj;
3003 dnode_phys_t child_dir_zap, dir;
3007 if (objset_get_dnode(spa, &spa->spa_mos,
3008 DMU_POOL_DIRECTORY_OBJECT, &dir))
3010 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET, sizeof (dir_obj),
3016 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir))
3018 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
3022 /* Actual loop condition #1. */
3028 memcpy(element, p, q - p);
3029 element[q - p] = '\0';
3036 child_dir_zapobj = dd->dd_child_dir_zapobj;
3037 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj,
3038 &child_dir_zap) != 0)
3041 /* Actual loop condition #2. */
3042 if (zap_lookup(spa, &child_dir_zap, element, sizeof (dir_obj),
3047 *objnum = dd->dd_head_dataset_obj;
3053 zfs_list_dataset(const spa_t *spa, uint64_t objnum/*, int pos, char *entry*/)
3055 uint64_t dir_obj, child_dir_zapobj;
3056 dnode_phys_t child_dir_zap, dir, dataset;
3057 dsl_dataset_phys_t *ds;
3060 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
3061 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
3064 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
3065 dir_obj = ds->ds_dir_obj;
3067 if (objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir)) {
3068 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj);
3071 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
3073 child_dir_zapobj = dd->dd_child_dir_zapobj;
3074 if (objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj,
3075 &child_dir_zap) != 0) {
3076 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj);
3080 return (zap_list(spa, &child_dir_zap) != 0);
3084 zfs_callback_dataset(const spa_t *spa, uint64_t objnum,
3085 int (*callback)(const char *, uint64_t))
3087 uint64_t dir_obj, child_dir_zapobj;
3088 dnode_phys_t child_dir_zap, dir, dataset;
3089 dsl_dataset_phys_t *ds;
3095 err = objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset);
3097 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
3100 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
3101 dir_obj = ds->ds_dir_obj;
3103 err = objset_get_dnode(spa, &spa->spa_mos, dir_obj, &dir);
3105 printf("ZFS: can't find dirobj %ju\n", (uintmax_t)dir_obj);
3108 dd = (dsl_dir_phys_t *)&dir.dn_bonus;
3110 child_dir_zapobj = dd->dd_child_dir_zapobj;
3111 err = objset_get_dnode(spa, &spa->spa_mos, child_dir_zapobj,
3114 printf("ZFS: can't find child zap %ju\n", (uintmax_t)dir_obj);
3118 size = child_dir_zap.dn_datablkszsec << SPA_MINBLOCKSHIFT;
3121 err = dnode_read(spa, &child_dir_zap, 0, zap, size);
3125 if (zap->zap_block_type == ZBT_MICRO)
3126 err = mzap_list((const mzap_phys_t *)zap, size,
3129 err = fzap_list(spa, &child_dir_zap, zap, callback);
3140 * Find the object set given the object number of its dataset object
3141 * and return its details in *objset
3144 zfs_mount_dataset(const spa_t *spa, uint64_t objnum, objset_phys_t *objset)
3146 dnode_phys_t dataset;
3147 dsl_dataset_phys_t *ds;
3149 if (objset_get_dnode(spa, &spa->spa_mos, objnum, &dataset)) {
3150 printf("ZFS: can't find dataset %ju\n", (uintmax_t)objnum);
3154 ds = (dsl_dataset_phys_t *)&dataset.dn_bonus;
3155 if (zio_read(spa, &ds->ds_bp, objset)) {
3156 printf("ZFS: can't read object set for dataset %ju\n",
3165 * Find the object set pointed to by the BOOTFS property or the root
3166 * dataset if there is none and return its details in *objset
3169 zfs_get_root(const spa_t *spa, uint64_t *objid)
3171 dnode_phys_t dir, propdir;
3172 uint64_t props, bootfs, root;
3177 * Start with the MOS directory object.
3179 if (objset_get_dnode(spa, &spa->spa_mos,
3180 DMU_POOL_DIRECTORY_OBJECT, &dir)) {
3181 printf("ZFS: can't read MOS object directory\n");
3186 * Lookup the pool_props and see if we can find a bootfs.
3188 if (zap_lookup(spa, &dir, DMU_POOL_PROPS,
3189 sizeof(props), 1, &props) == 0 &&
3190 objset_get_dnode(spa, &spa->spa_mos, props, &propdir) == 0 &&
3191 zap_lookup(spa, &propdir, "bootfs",
3192 sizeof(bootfs), 1, &bootfs) == 0 && bootfs != 0) {
3197 * Lookup the root dataset directory
3199 if (zap_lookup(spa, &dir, DMU_POOL_ROOT_DATASET,
3200 sizeof(root), 1, &root) ||
3201 objset_get_dnode(spa, &spa->spa_mos, root, &dir)) {
3202 printf("ZFS: can't find root dsl_dir\n");
3207 * Use the information from the dataset directory's bonus buffer
3208 * to find the dataset object and from that the object set itself.
3210 dsl_dir_phys_t *dd = (dsl_dir_phys_t *)&dir.dn_bonus;
3211 *objid = dd->dd_head_dataset_obj;
3216 zfs_mount(const spa_t *spa, uint64_t rootobj, struct zfsmount *mount)
3222 * Find the root object set if not explicitly provided
3224 if (rootobj == 0 && zfs_get_root(spa, &rootobj)) {
3225 printf("ZFS: can't find root filesystem\n");
3229 if (zfs_mount_dataset(spa, rootobj, &mount->objset)) {
3230 printf("ZFS: can't open root filesystem\n");
3234 mount->rootobj = rootobj;
3240 * callback function for feature name checks.
3243 check_feature(const char *name, uint64_t value)
3249 if (name[0] == '\0')
3252 for (i = 0; features_for_read[i] != NULL; i++) {
3253 if (strcmp(name, features_for_read[i]) == 0)
3256 printf("ZFS: unsupported feature: %s\n", name);
3261 * Checks whether the MOS features that are active are supported.
3264 check_mos_features(const spa_t *spa)
3272 if ((rc = objset_get_dnode(spa, &spa->spa_mos, DMU_OT_OBJECT_DIRECTORY,
3275 if ((rc = zap_lookup(spa, &dir, DMU_POOL_FEATURES_FOR_READ,
3276 sizeof (objnum), 1, &objnum)) != 0) {
3278 * It is older pool without features. As we have already
3279 * tested the label, just return without raising the error.
3284 if ((rc = objset_get_dnode(spa, &spa->spa_mos, objnum, &dir)) != 0)
3287 if (dir.dn_type != DMU_OTN_ZAP_METADATA)
3290 size = dir.dn_datablkszsec << SPA_MINBLOCKSHIFT;
3295 if (dnode_read(spa, &dir, 0, zap, size)) {
3300 if (zap->zap_block_type == ZBT_MICRO)
3301 rc = mzap_list((const mzap_phys_t *)zap, size, check_feature);
3303 rc = fzap_list(spa, &dir, zap, check_feature);
3310 load_nvlist(spa_t *spa, uint64_t obj, unsigned char **value)
3318 if ((rc = objset_get_dnode(spa, &spa->spa_mos, obj, &dir)) != 0)
3320 if (dir.dn_type != DMU_OT_PACKED_NVLIST &&
3321 dir.dn_bonustype != DMU_OT_PACKED_NVLIST_SIZE) {
3325 if (dir.dn_bonuslen != sizeof (uint64_t))
3328 size = *(uint64_t *)DN_BONUS(&dir);
3333 rc = dnode_read(spa, &dir, 0, nv, size);
3344 zfs_spa_init(spa_t *spa)
3347 uint64_t config_object;
3348 unsigned char *nvlist;
3351 if (zio_read(spa, &spa->spa_uberblock.ub_rootbp, &spa->spa_mos)) {
3352 printf("ZFS: can't read MOS of pool %s\n", spa->spa_name);
3355 if (spa->spa_mos.os_type != DMU_OST_META) {
3356 printf("ZFS: corrupted MOS of pool %s\n", spa->spa_name);
3360 if (objset_get_dnode(spa, &spa->spa_mos, DMU_POOL_DIRECTORY_OBJECT,
3362 printf("ZFS: failed to read pool %s directory object\n",
3366 /* this is allowed to fail, older pools do not have salt */
3367 rc = zap_lookup(spa, &dir, DMU_POOL_CHECKSUM_SALT, 1,
3368 sizeof (spa->spa_cksum_salt.zcs_bytes),
3369 spa->spa_cksum_salt.zcs_bytes);
3371 rc = check_mos_features(spa);
3373 printf("ZFS: pool %s is not supported\n", spa->spa_name);
3377 rc = zap_lookup(spa, &dir, DMU_POOL_CONFIG,
3378 sizeof (config_object), 1, &config_object);
3380 printf("ZFS: can not read MOS %s\n", DMU_POOL_CONFIG);
3383 rc = load_nvlist(spa, config_object, &nvlist);
3388 * Update vdevs from MOS config. Note, we do skip encoding bytes
3389 * here. See also vdev_label_read_config().
3391 rc = vdev_init_from_nvlist(spa, nvlist + 4);
3397 zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb)
3400 if (dn->dn_bonustype != DMU_OT_SA) {
3401 znode_phys_t *zp = (znode_phys_t *)dn->dn_bonus;
3403 sb->st_mode = zp->zp_mode;
3404 sb->st_uid = zp->zp_uid;
3405 sb->st_gid = zp->zp_gid;
3406 sb->st_size = zp->zp_size;
3408 sa_hdr_phys_t *sahdrp;
3413 if (dn->dn_bonuslen != 0)
3414 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn);
3416 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0) {
3417 blkptr_t *bp = DN_SPILL_BLKPTR(dn);
3420 size = BP_GET_LSIZE(bp);
3421 buf = zfs_alloc(size);
3422 error = zio_read(spa, bp, buf);
3424 zfs_free(buf, size);
3432 hdrsize = SA_HDR_SIZE(sahdrp);
3433 sb->st_mode = *(uint64_t *)((char *)sahdrp + hdrsize +
3435 sb->st_uid = *(uint64_t *)((char *)sahdrp + hdrsize +
3437 sb->st_gid = *(uint64_t *)((char *)sahdrp + hdrsize +
3439 sb->st_size = *(uint64_t *)((char *)sahdrp + hdrsize +
3442 zfs_free(buf, size);
3449 zfs_dnode_readlink(const spa_t *spa, dnode_phys_t *dn, char *path, size_t psize)
3453 if (dn->dn_bonustype == DMU_OT_SA) {
3454 sa_hdr_phys_t *sahdrp = NULL;
3460 if (dn->dn_bonuslen != 0)
3461 sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn);
3465 if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) == 0)
3467 bp = DN_SPILL_BLKPTR(dn);
3469 size = BP_GET_LSIZE(bp);
3470 buf = zfs_alloc(size);
3471 rc = zio_read(spa, bp, buf);
3473 zfs_free(buf, size);
3478 hdrsize = SA_HDR_SIZE(sahdrp);
3479 p = (char *)((uintptr_t)sahdrp + hdrsize + SA_SYMLINK_OFFSET);
3480 memcpy(path, p, psize);
3482 zfs_free(buf, size);
3486 * Second test is purely to silence bogus compiler
3487 * warning about accessing past the end of dn_bonus.
3489 if (psize + sizeof(znode_phys_t) <= dn->dn_bonuslen &&
3490 sizeof(znode_phys_t) <= sizeof(dn->dn_bonus)) {
3491 memcpy(path, &dn->dn_bonus[sizeof(znode_phys_t)], psize);
3493 rc = dnode_read(spa, dn, 0, path, psize);
3500 STAILQ_ENTRY(obj_list) entry;
3504 * Lookup a file and return its dnode.
3507 zfs_lookup(const struct zfsmount *mount, const char *upath, dnode_phys_t *dnode)
3516 int symlinks_followed = 0;
3518 struct obj_list *entry, *tentry;
3519 STAILQ_HEAD(, obj_list) on_cache = STAILQ_HEAD_INITIALIZER(on_cache);
3522 if (mount->objset.os_type != DMU_OST_ZFS) {
3523 printf("ZFS: unexpected object set type %ju\n",
3524 (uintmax_t)mount->objset.os_type);
3528 if ((entry = malloc(sizeof(struct obj_list))) == NULL)
3532 * Get the root directory dnode.
3534 rc = objset_get_dnode(spa, &mount->objset, MASTER_NODE_OBJ, &dn);
3540 rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, sizeof(objnum), 1, &objnum);
3545 entry->objnum = objnum;
3546 STAILQ_INSERT_HEAD(&on_cache, entry, entry);
3548 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
3554 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
3563 while (*q != '\0' && *q != '/')
3567 if (p + 1 == q && p[0] == '.') {
3572 if (p + 2 == q && p[0] == '.' && p[1] == '.') {
3574 if (STAILQ_FIRST(&on_cache) ==
3575 STAILQ_LAST(&on_cache, obj_list, entry)) {
3579 entry = STAILQ_FIRST(&on_cache);
3580 STAILQ_REMOVE_HEAD(&on_cache, entry);
3582 objnum = (STAILQ_FIRST(&on_cache))->objnum;
3585 if (q - p + 1 > sizeof(element)) {
3589 memcpy(element, p, q - p);
3593 if ((rc = zfs_dnode_stat(spa, &dn, &sb)) != 0)
3595 if (!S_ISDIR(sb.st_mode)) {
3600 rc = zap_lookup(spa, &dn, element, sizeof (objnum), 1, &objnum);
3603 objnum = ZFS_DIRENT_OBJ(objnum);
3605 if ((entry = malloc(sizeof(struct obj_list))) == NULL) {
3609 entry->objnum = objnum;
3610 STAILQ_INSERT_HEAD(&on_cache, entry, entry);
3611 rc = objset_get_dnode(spa, &mount->objset, objnum, &dn);
3616 * Check for symlink.
3618 rc = zfs_dnode_stat(spa, &dn, &sb);
3621 if (S_ISLNK(sb.st_mode)) {
3622 if (symlinks_followed > 10) {
3626 symlinks_followed++;
3629 * Read the link value and copy the tail of our
3630 * current path onto the end.
3632 if (sb.st_size + strlen(p) + 1 > sizeof(path)) {
3636 strcpy(&path[sb.st_size], p);
3638 rc = zfs_dnode_readlink(spa, &dn, path, sb.st_size);
3643 * Restart with the new path, starting either at
3644 * the root or at the parent depending whether or
3645 * not the link is relative.
3649 while (STAILQ_FIRST(&on_cache) !=
3650 STAILQ_LAST(&on_cache, obj_list, entry)) {
3651 entry = STAILQ_FIRST(&on_cache);
3652 STAILQ_REMOVE_HEAD(&on_cache, entry);
3656 entry = STAILQ_FIRST(&on_cache);
3657 STAILQ_REMOVE_HEAD(&on_cache, entry);
3660 objnum = (STAILQ_FIRST(&on_cache))->objnum;
3666 STAILQ_FOREACH_SAFE(entry, &on_cache, entry, tentry)